summaryrefslogtreecommitdiffstats
path: root/drivers/firmware
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firmware')
-rw-r--r--drivers/firmware/Kconfig318
-rw-r--r--drivers/firmware/Makefile40
-rw-r--r--drivers/firmware/arm_ffa/Kconfig21
-rw-r--r--drivers/firmware/arm_ffa/Makefile6
-rw-r--r--drivers/firmware/arm_ffa/bus.c232
-rw-r--r--drivers/firmware/arm_ffa/common.h31
-rw-r--r--drivers/firmware/arm_ffa/driver.c787
-rw-r--r--drivers/firmware/arm_ffa/smccc.c39
-rw-r--r--drivers/firmware/arm_scmi/Kconfig196
-rw-r--r--drivers/firmware/arm_scmi/Makefile27
-rw-r--r--drivers/firmware/arm_scmi/base.c428
-rw-r--r--drivers/firmware/arm_scmi/bus.c515
-rw-r--r--drivers/firmware/arm_scmi/clock.c619
-rw-r--r--drivers/firmware/arm_scmi/common.h337
-rw-r--r--drivers/firmware/arm_scmi/driver.c3054
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c329
-rw-r--r--drivers/firmware/arm_scmi/msg.c111
-rw-r--r--drivers/firmware/arm_scmi/notify.c1712
-rw-r--r--drivers/firmware/arm_scmi/notify.h90
-rw-r--r--drivers/firmware/arm_scmi/optee.c643
-rw-r--r--drivers/firmware/arm_scmi/perf.c1148
-rw-r--r--drivers/firmware/arm_scmi/power.c342
-rw-r--r--drivers/firmware/arm_scmi/powercap.c989
-rw-r--r--drivers/firmware/arm_scmi/protocols.h349
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c1450
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.h31
-rw-r--r--drivers/firmware/arm_scmi/reset.c356
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c153
-rw-r--r--drivers/firmware/arm_scmi/scmi_power_control.c362
-rw-r--r--drivers/firmware/arm_scmi/sensors.c1152
-rw-r--r--drivers/firmware/arm_scmi/shmem.c130
-rw-r--r--drivers/firmware/arm_scmi/smc.c277
-rw-r--r--drivers/firmware/arm_scmi/system.c158
-rw-r--r--drivers/firmware/arm_scmi/virtio.c939
-rw-r--r--drivers/firmware/arm_scmi/voltage.c445
-rw-r--r--drivers/firmware/arm_scpi.c1060
-rw-r--r--drivers/firmware/arm_sdei.c1116
-rw-r--r--drivers/firmware/broadcom/Kconfig32
-rw-r--r--drivers/firmware/broadcom/Makefile4
-rw-r--r--drivers/firmware/broadcom/bcm47xx_nvram.c257
-rw-r--r--drivers/firmware/broadcom/bcm47xx_sprom.c726
-rw-r--r--drivers/firmware/broadcom/tee_bnxt_fw.c286
-rw-r--r--drivers/firmware/cirrus/Kconfig5
-rw-r--r--drivers/firmware/cirrus/Makefile3
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c3344
-rw-r--r--drivers/firmware/dmi-id.c259
-rw-r--r--drivers/firmware/dmi-sysfs.c703
-rw-r--r--drivers/firmware/dmi_scan.c1210
-rw-r--r--drivers/firmware/edd.c780
-rw-r--r--drivers/firmware/efi/Kconfig303
-rw-r--r--drivers/firmware/efi/Makefile44
-rw-r--r--drivers/firmware/efi/apple-properties.c237
-rw-r--r--drivers/firmware/efi/arm-runtime.c178
-rw-r--r--drivers/firmware/efi/capsule-loader.c343
-rw-r--r--drivers/firmware/efi/capsule.c315
-rw-r--r--drivers/firmware/efi/cper-arm.c341
-rw-r--r--drivers/firmware/efi/cper-x86.c361
-rw-r--r--drivers/firmware/efi/cper.c689
-rw-r--r--drivers/firmware/efi/cper_cxl.c189
-rw-r--r--drivers/firmware/efi/cper_cxl.h66
-rw-r--r--drivers/firmware/efi/dev-path-parser.c184
-rw-r--r--drivers/firmware/efi/earlycon.c275
-rw-r--r--drivers/firmware/efi/efi-bgrt.c88
-rw-r--r--drivers/firmware/efi/efi-init.c244
-rw-r--r--drivers/firmware/efi/efi-pstore.c269
-rw-r--r--drivers/firmware/efi/efi.c1182
-rw-r--r--drivers/firmware/efi/efibc.c92
-rw-r--r--drivers/firmware/efi/embedded-firmware.c147
-rw-r--r--drivers/firmware/efi/esrt.c431
-rw-r--r--drivers/firmware/efi/fdtparams.c133
-rw-r--r--drivers/firmware/efi/libstub/Makefile177
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot63
-rw-r--r--drivers/firmware/efi/libstub/alignedmem.c60
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c134
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c70
-rw-r--r--drivers/firmware/efi/libstub/arm64.c140
-rw-r--r--drivers/firmware/efi/libstub/bitmap.c41
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-entry.c78
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c722
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c282
-rw-r--r--drivers/firmware/efi/libstub/efistub.h1154
-rw-r--r--drivers/firmware/efi/libstub/fdt.c379
-rw-r--r--drivers/firmware/efi/libstub/file.c309
-rw-r--r--drivers/firmware/efi/libstub/find.c43
-rw-r--r--drivers/firmware/efi/libstub/gop.c580
-rw-r--r--drivers/firmware/efi/libstub/intrinsics.c48
-rw-r--r--drivers/firmware/efi/libstub/kaslr.c159
-rw-r--r--drivers/firmware/efi/libstub/loongarch-stub.c43
-rw-r--r--drivers/firmware/efi/libstub/loongarch.c80
-rw-r--r--drivers/firmware/efi/libstub/mem.c130
-rw-r--r--drivers/firmware/efi/libstub/pci.c114
-rw-r--r--drivers/firmware/efi/libstub/printk.c154
-rw-r--r--drivers/firmware/efi/libstub/random.c179
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c134
-rw-r--r--drivers/firmware/efi/libstub/relocate.c165
-rw-r--r--drivers/firmware/efi/libstub/riscv-stub.c58
-rw-r--r--drivers/firmware/efi/libstub/riscv.c98
-rw-r--r--drivers/firmware/efi/libstub/screen_info.c51
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c62
-rw-r--r--drivers/firmware/efi/libstub/skip_spaces.c12
-rw-r--r--drivers/firmware/efi/libstub/smbios.c57
-rw-r--r--drivers/firmware/efi/libstub/string.c204
-rw-r--r--drivers/firmware/efi/libstub/systable.c8
-rw-r--r--drivers/firmware/efi/libstub/tpm.c167
-rw-r--r--drivers/firmware/efi/libstub/unaccepted_memory.c222
-rw-r--r--drivers/firmware/efi/libstub/vsprintf.c564
-rw-r--r--drivers/firmware/efi/libstub/x86-5lvl.c95
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c1003
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.h19
-rw-r--r--drivers/firmware/efi/libstub/zboot-header.S159
-rw-r--r--drivers/firmware/efi/libstub/zboot.c147
-rw-r--r--drivers/firmware/efi/libstub/zboot.lds52
-rw-r--r--drivers/firmware/efi/memattr.c184
-rw-r--r--drivers/firmware/efi/memmap.c154
-rw-r--r--drivers/firmware/efi/mokvar-table.c362
-rw-r--r--drivers/firmware/efi/rci2-table.c150
-rw-r--r--drivers/firmware/efi/reboot.c78
-rw-r--r--drivers/firmware/efi/riscv-runtime.c154
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c590
-rw-r--r--drivers/firmware/efi/sysfb_efi.c375
-rw-r--r--drivers/firmware/efi/test/Makefile2
-rw-r--r--drivers/firmware/efi/test/efi_test.c782
-rw-r--r--drivers/firmware/efi/test/efi_test.h124
-rw-r--r--drivers/firmware/efi/tpm.c110
-rw-r--r--drivers/firmware/efi/unaccepted_memory.c203
-rw-r--r--drivers/firmware/efi/vars.c259
-rw-r--r--drivers/firmware/google/Kconfig84
-rw-r--r--drivers/firmware/google/Makefile14
-rw-r--r--drivers/firmware/google/cbmem.c129
-rw-r--r--drivers/firmware/google/coreboot_table.c239
-rw-r--r--drivers/firmware/google/coreboot_table.h114
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c91
-rw-r--r--drivers/firmware/google/gsmi.c1093
-rw-r--r--drivers/firmware/google/memconsole-coreboot.c110
-rw-r--r--drivers/firmware/google/memconsole-x86-legacy.c157
-rw-r--r--drivers/firmware/google/memconsole.c53
-rw-r--r--drivers/firmware/google/memconsole.h36
-rw-r--r--drivers/firmware/google/vpd.c320
-rw-r--r--drivers/firmware/google/vpd_decode.c98
-rw-r--r--drivers/firmware/google/vpd_decode.h50
-rw-r--r--drivers/firmware/imx/Kconfig30
-rw-r--r--drivers/firmware/imx/Makefile3
-rw-r--r--drivers/firmware/imx/imx-dsp.c191
-rw-r--r--drivers/firmware/imx/imx-scu-irq.c259
-rw-r--r--drivers/firmware/imx/imx-scu-soc.c152
-rw-r--r--drivers/firmware/imx/imx-scu.c366
-rw-r--r--drivers/firmware/imx/misc.c137
-rw-r--r--drivers/firmware/imx/rm.c90
-rw-r--r--drivers/firmware/iscsi_ibft.c906
-rw-r--r--drivers/firmware/iscsi_ibft_find.c100
-rw-r--r--drivers/firmware/memmap.c419
-rw-r--r--drivers/firmware/meson/Kconfig11
-rw-r--r--drivers/firmware/meson/Makefile2
-rw-r--r--drivers/firmware/meson/meson_sm.c339
-rw-r--r--drivers/firmware/mtk-adsp-ipc.c144
-rw-r--r--drivers/firmware/pcdp.c135
-rw-r--r--drivers/firmware/pcdp.h108
-rw-r--r--drivers/firmware/psci/Kconfig14
-rw-r--r--drivers/firmware/psci/Makefile4
-rw-r--r--drivers/firmware/psci/psci.c791
-rw-r--r--drivers/firmware/psci/psci_checker.c492
-rw-r--r--drivers/firmware/qcom_scm-legacy.c246
-rw-r--r--drivers/firmware/qcom_scm-smc.c225
-rw-r--r--drivers/firmware/qcom_scm.c1518
-rw-r--r--drivers/firmware/qcom_scm.h167
-rw-r--r--drivers/firmware/qemu_fw_cfg.c940
-rw-r--r--drivers/firmware/raspberrypi.c414
-rw-r--r--drivers/firmware/scpi_pm_domain.c157
-rw-r--r--drivers/firmware/smccc/Kconfig25
-rw-r--r--drivers/firmware/smccc/Makefile4
-rw-r--r--drivers/firmware/smccc/kvm_guest.c51
-rw-r--r--drivers/firmware/smccc/smccc.c87
-rw-r--r--drivers/firmware/smccc/soc_id.c95
-rw-r--r--drivers/firmware/stratix10-rsu.c817
-rw-r--r--drivers/firmware/stratix10-svc.c1316
-rw-r--r--drivers/firmware/sysfb.c131
-rw-r--r--drivers/firmware/sysfb_simplefb.c166
-rw-r--r--drivers/firmware/tegra/Kconfig27
-rw-r--r--drivers/firmware/tegra/Makefile9
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c804
-rw-r--r--drivers/firmware/tegra/bpmp-private.h35
-rw-r--r--drivers/firmware/tegra/bpmp-tegra186.c389
-rw-r--r--drivers/firmware/tegra/bpmp-tegra210.c237
-rw-r--r--drivers/firmware/tegra/bpmp.c911
-rw-r--r--drivers/firmware/tegra/ivc.c721
-rw-r--r--drivers/firmware/ti_sci.c3452
-rw-r--r--drivers/firmware/ti_sci.h1397
-rw-r--r--drivers/firmware/trusted_foundations.c184
-rw-r--r--drivers/firmware/turris-mox-rwtm.c589
-rw-r--r--drivers/firmware/xilinx/Kconfig26
-rw-r--r--drivers/firmware/xilinx/Makefile5
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c239
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.h24
-rw-r--r--drivers/firmware/xilinx/zynqmp.c2042
194 files changed, 70386 insertions, 0 deletions
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
new file mode 100644
index 0000000000..b59e3041fd
--- /dev/null
+++ b/drivers/firmware/Kconfig
@@ -0,0 +1,318 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.rst.
+#
+
+menu "Firmware Drivers"
+
+source "drivers/firmware/arm_scmi/Kconfig"
+
+config ARM_SCPI_PROTOCOL
+ tristate "ARM System Control and Power Interface (SCPI) Message Protocol"
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on MAILBOX
+ help
+ System Control and Power Interface (SCPI) Message Protocol is
+ defined for the purpose of communication between the Application
+ Cores(AP) and the System Control Processor(SCP). The MHU peripheral
+ provides a mechanism for inter-processor communication between SCP
+ and AP.
+
+ SCP controls most of the power management on the Application
+ Processors. It offers control and management of: the core/cluster
+ power states, various power domain DVFS including the core/cluster,
+ certain system clocks configuration, thermal sensors and many
+ others.
+
+ This protocol library provides interface for all the client drivers
+ making use of the features offered by the SCP.
+
+config ARM_SCPI_POWER_DOMAIN
+ tristate "SCPI power domain driver"
+ depends on ARM_SCPI_PROTOCOL || (COMPILE_TEST && OF)
+ default y
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the SCPI power domains which can be
+ enabled or disabled via the SCP firmware
+
+config ARM_SDE_INTERFACE
+ bool "ARM Software Delegated Exception Interface (SDEI)"
+ depends on ARM64
+ depends on ACPI_APEI_GHES
+ help
+ The Software Delegated Exception Interface (SDEI) is an ARM
+ standard for registering callbacks from the platform firmware
+ into the OS. This is typically used to implement RAS notifications.
+
+config EDD
+ tristate "BIOS Enhanced Disk Drive calls determine boot disk"
+ depends on X86
+ help
+ Say Y or M here if you want to enable BIOS Enhanced Disk Drive
+ Services real mode BIOS calls to determine which disk
+ BIOS tries boot from. This information is then exported via sysfs.
+
+ This option is experimental and is known to fail to boot on some
+ obscure configurations. Most disk controller BIOS vendors do
+ not yet implement this feature.
+
+config EDD_OFF
+ bool "Sets default behavior for EDD detection to off"
+ depends on EDD
+ default n
+ help
+ Say Y if you want EDD disabled by default, even though it is compiled into the
+ kernel. Say N if you want EDD enabled by default. EDD can be dynamically set
+ using the kernel parameter 'edd={on|skipmbr|off}'.
+
+config FIRMWARE_MEMMAP
+ bool "Add firmware-provided memory map to sysfs" if EXPERT
+ default X86
+ help
+ Add the firmware-provided (unmodified) memory map to /sys/firmware/memmap.
+ That memory map is used for example by kexec to set up parameter area
+ for the next kernel, but can also be used for debugging purposes.
+
+ See also Documentation/ABI/testing/sysfs-firmware-memmap.
+
+config EFI_PCDP
+ bool "Console device selection via EFI PCDP or HCDP table"
+ depends on ACPI && EFI && IA64
+ default y if IA64
+ help
+ If your firmware supplies the PCDP table, and you want to
+ automatically use the primary console device it describes
+ as the Linux console, say Y here.
+
+ If your firmware supplies the HCDP table, and you want to
+ use the first serial port it describes as the Linux console,
+ say Y here. If your EFI ConOut path contains only a UART
+ device, it will become the console automatically. Otherwise,
+ you must specify the "console=hcdp" kernel boot argument.
+
+ Neither the PCDP nor the HCDP affects naming of serial devices,
+ so a serial console may be /dev/ttyS0, /dev/ttyS1, etc, depending
+ on how the driver discovers devices.
+
+ You must also enable the appropriate drivers (serial, VGA, etc.)
+
+ See DIG64_HCDPv20_042804.pdf available from
+ <http://www.dig64.org/specifications/>
+
+config DMIID
+ bool "Export DMI identification via sysfs to userspace"
+ depends on DMI
+ default y
+ help
+ Say Y here if you want to query SMBIOS/DMI system identification
+ information from userspace through /sys/class/dmi/id/ or if you want
+ DMI-based module auto-loading.
+
+config DMI_SYSFS
+ tristate "DMI table support in sysfs"
+ depends on SYSFS && DMI
+ default n
+ help
+ Say Y or M here to enable the exporting of the raw DMI table
+ data via sysfs. This is useful for consuming the data without
+ requiring any access to /dev/mem at all. Tables are found
+ under /sys/firmware/dmi when this option is enabled and
+ loaded.
+
+config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
+ bool
+
+config ISCSI_IBFT_FIND
+ bool "iSCSI Boot Firmware Table Attributes"
+ depends on X86 && ISCSI_IBFT
+ default n
+ help
+ This option enables the kernel to find the region of memory
+ in which the ISCSI Boot Firmware Table (iBFT) resides. This
+ is necessary for iSCSI Boot Firmware Table Attributes module to work
+ properly.
+
+config ISCSI_IBFT
+ tristate "iSCSI Boot Firmware Table Attributes module"
+ select ISCSI_BOOT_SYSFS
+ select ISCSI_IBFT_FIND if X86
+ depends on ACPI && SCSI && SCSI_LOWLEVEL
+ default n
+ help
+ This option enables support for detection and exposing of iSCSI
+ Boot Firmware Table (iBFT) via sysfs to userspace. If you wish to
+ detect iSCSI boot parameters dynamically during system boot, say Y.
+ Otherwise, say N.
+
+config RASPBERRYPI_FIRMWARE
+ tristate "Raspberry Pi Firmware Driver"
+ depends on BCM2835_MBOX
+ help
+ This option enables support for communicating with the firmware on the
+ Raspberry Pi.
+
+config FW_CFG_SYSFS
+ tristate "QEMU fw_cfg device support in sysfs"
+ depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86)
+ depends on HAS_IOPORT_MAP
+ default n
+ help
+ Say Y or M here to enable the exporting of the QEMU firmware
+ configuration (fw_cfg) file entries via sysfs. Entries are
+ found under /sys/firmware/fw_cfg when this option is enabled
+ and loaded.
+
+config FW_CFG_SYSFS_CMDLINE
+ bool "QEMU fw_cfg device parameter parsing"
+ depends on FW_CFG_SYSFS
+ help
+ Allow the qemu_fw_cfg device to be initialized via the kernel
+ command line or using a module parameter.
+ WARNING: Using incorrect parameters (base address in particular)
+ may crash your system.
+
+config INTEL_STRATIX10_SERVICE
+ tristate "Intel Stratix10 Service Layer"
+ depends on ARCH_INTEL_SOCFPGA && ARM64 && HAVE_ARM_SMCCC
+ default n
+ help
+ Intel Stratix10 service layer runs at privileged exception level,
+ interfaces with the service providers (FPGA manager is one of them)
+ and manages secure monitor call to communicate with secure monitor
+ software at secure monitor exception level.
+
+ Say Y here if you want Stratix10 service layer support.
+
+config INTEL_STRATIX10_RSU
+ tristate "Intel Stratix10 Remote System Update"
+ depends on INTEL_STRATIX10_SERVICE
+ help
+ The Intel Remote System Update (RSU) driver exposes interfaces
+ access through the Intel Service Layer to user space via sysfs
+ device attribute nodes. The RSU interfaces report/control some of
+ the optional RSU features of the Stratix 10 SoC FPGA.
+
+ The RSU provides a way for customers to update the boot
+ configuration of a Stratix 10 SoC device with significantly reduced
+ risk of corrupting the bitstream storage and bricking the system.
+
+ Enable RSU support if you are using an Intel SoC FPGA with the RSU
+ feature enabled and you want Linux user space control.
+
+ Say Y here if you want Intel RSU support.
+
+config MTK_ADSP_IPC
+ tristate "MTK ADSP IPC Protocol driver"
+ depends on MTK_ADSP_MBOX
+ help
+ Say yes here to add support for the MediaTek ADSP IPC
+ between host AP (Linux) and the firmware running on ADSP.
+ ADSP exists on some mtk processors.
+ Client might use shared memory to exchange information with ADSP.
+
+config QCOM_SCM
+ tristate
+
+config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
+ bool "Qualcomm download mode enabled by default"
+ depends on QCOM_SCM
+ help
+ A device with "download mode" enabled will upon an unexpected
+ warm-restart enter a special debug mode that allows the user to
+ "download" memory content over USB for offline postmortem analysis.
+ The feature can be enabled/disabled on the kernel command line.
+
+ Say Y here to enable "download mode" by default.
+
+config SYSFB
+ bool
+ select BOOT_VESA_SUPPORT
+
+config SYSFB_SIMPLEFB
+ bool "Mark VGA/VBE/EFI FB as generic system framebuffer"
+ depends on X86 || EFI
+ select SYSFB
+ help
+ Firmwares often provide initial graphics framebuffers so the BIOS,
+ bootloader or kernel can show basic video-output during boot for
+ user-guidance and debugging. Historically, x86 used the VESA BIOS
+ Extensions and EFI-framebuffers for this, which are mostly limited
+ to x86 BIOS or EFI systems.
+ This option, if enabled, marks VGA/VBE/EFI framebuffers as generic
+ framebuffers so the new generic system-framebuffer drivers can be
+ used instead. If the framebuffer is not compatible with the generic
+ modes, it is advertised as fallback platform framebuffer so legacy
+ drivers like efifb, vesafb and uvesafb can pick it up.
+ If this option is not selected, all system framebuffers are always
+ marked as fallback platform framebuffers as usual.
+
+ Note: Legacy fbdev drivers, including vesafb, efifb, uvesafb, will
+ not be able to pick up generic system framebuffers if this option
+ is selected. You are highly encouraged to enable simplefb as
+ replacement if you select this option. simplefb can correctly deal
+ with generic system framebuffers. But you should still keep vesafb
+ and others enabled as fallback if a system framebuffer is
+ incompatible with simplefb.
+
+ If unsure, say Y.
+
+config TI_SCI_PROTOCOL
+ tristate "TI System Control Interface (TISCI) Message Protocol"
+ depends on TI_MESSAGE_MANAGER
+ help
+ TI System Control Interface (TISCI) Message Protocol is used to manage
+ compute systems such as ARM, DSP etc with the system controller in
+ complex System on Chip(SoC) such as those found on certain keystone
+ generation SoC from TI.
+
+ System controller provides various facilities including power
+ management function support.
+
+ This protocol library is used by client drivers to use the features
+ provided by the system controller.
+
+config TRUSTED_FOUNDATIONS
+ bool "Trusted Foundations secure monitor support"
+ depends on ARM && CPU_V7
+ help
+ Some devices (including most early Tegra-based consumer devices on
+ the market) are booted with the Trusted Foundations secure monitor
+ active, requiring some core operations to be performed by the secure
+ monitor instead of the kernel.
+
+ This option allows the kernel to invoke the secure monitor whenever
+ required on devices using Trusted Foundations. See the functions and
+ comments in linux/firmware/trusted_foundations.h or the device tree
+ bindings for "tlm,trusted-foundations" for details on how to use it.
+
+ Choose N if you don't know what this is about.
+
+config TURRIS_MOX_RWTM
+ tristate "Turris Mox rWTM secure firmware driver"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on HAS_DMA && OF
+ depends on MAILBOX
+ select HW_RANDOM
+ select ARMADA_37XX_RWTM_MBOX
+ help
+ This driver communicates with the firmware on the Cortex-M3 secure
+ processor of the Turris Mox router. Enable if you are building for
+ Turris Mox, and you will be able to read the device serial number and
+ other manufacturing data and also utilize the Entropy Bit Generator
+ for hardware random number generation.
+
+source "drivers/firmware/arm_ffa/Kconfig"
+source "drivers/firmware/broadcom/Kconfig"
+source "drivers/firmware/cirrus/Kconfig"
+source "drivers/firmware/google/Kconfig"
+source "drivers/firmware/efi/Kconfig"
+source "drivers/firmware/imx/Kconfig"
+source "drivers/firmware/meson/Kconfig"
+source "drivers/firmware/psci/Kconfig"
+source "drivers/firmware/smccc/Kconfig"
+source "drivers/firmware/tegra/Kconfig"
+source "drivers/firmware/xilinx/Kconfig"
+
+endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
new file mode 100644
index 0000000000..28fcddcd68
--- /dev/null
+++ b/drivers/firmware/Makefile
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the linux kernel.
+#
+obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o
+obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
+obj-$(CONFIG_ARM_SDE_INTERFACE) += arm_sdei.o
+obj-$(CONFIG_DMI) += dmi_scan.o
+obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o
+obj-$(CONFIG_EDD) += edd.o
+obj-$(CONFIG_EFI_PCDP) += pcdp.o
+obj-$(CONFIG_DMIID) += dmi-id.o
+obj-$(CONFIG_INTEL_STRATIX10_SERVICE) += stratix10-svc.o
+obj-$(CONFIG_INTEL_STRATIX10_RSU) += stratix10-rsu.o
+obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
+obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
+obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
+obj-$(CONFIG_MTK_ADSP_IPC) += mtk-adsp-ipc.o
+obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
+obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o
+obj-$(CONFIG_QCOM_SCM) += qcom-scm.o
+qcom-scm-objs += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o
+obj-$(CONFIG_SYSFB) += sysfb.o
+obj-$(CONFIG_SYSFB_SIMPLEFB) += sysfb_simplefb.o
+obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
+obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
+obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
+
+obj-y += arm_ffa/
+obj-y += arm_scmi/
+obj-y += broadcom/
+obj-y += cirrus/
+obj-y += meson/
+obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
+obj-y += efi/
+obj-y += imx/
+obj-y += psci/
+obj-y += smccc/
+obj-y += tegra/
+obj-y += xilinx/
diff --git a/drivers/firmware/arm_ffa/Kconfig b/drivers/firmware/arm_ffa/Kconfig
new file mode 100644
index 0000000000..5e3ae5cf82
--- /dev/null
+++ b/drivers/firmware/arm_ffa/Kconfig
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config ARM_FFA_TRANSPORT
+ tristate "Arm Firmware Framework for Armv8-A"
+ depends on OF
+ depends on ARM64
+ default n
+ help
+ This Firmware Framework(FF) for Arm A-profile processors describes
+ interfaces that standardize communication between the various
+ software images which includes communication between images in
+ the Secure world and Normal world. It also leverages the
+ virtualization extension to isolate software images provided
+ by an ecosystem of vendors from each other.
+
+ This driver provides interface for all the client drivers making
+ use of the features offered by ARM FF-A.
+
+config ARM_FFA_SMCCC
+ bool
+ default ARM_FFA_TRANSPORT
+ depends on ARM64 && HAVE_ARM_SMCCC_DISCOVERY
diff --git a/drivers/firmware/arm_ffa/Makefile b/drivers/firmware/arm_ffa/Makefile
new file mode 100644
index 0000000000..9d9f375232
--- /dev/null
+++ b/drivers/firmware/arm_ffa/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ffa-bus-y = bus.o
+ffa-driver-y = driver.o
+ffa-transport-$(CONFIG_ARM_FFA_SMCCC) += smccc.o
+ffa-module-objs := $(ffa-bus-y) $(ffa-driver-y) $(ffa-transport-y)
+obj-$(CONFIG_ARM_FFA_TRANSPORT) = ffa-module.o
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
new file mode 100644
index 0000000000..7865438b36
--- /dev/null
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm_ffa.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "common.h"
+
+static DEFINE_IDA(ffa_bus_id);
+
+static int ffa_device_match(struct device *dev, struct device_driver *drv)
+{
+ const struct ffa_device_id *id_table;
+ struct ffa_device *ffa_dev;
+
+ id_table = to_ffa_driver(drv)->id_table;
+ ffa_dev = to_ffa_dev(dev);
+
+ while (!uuid_is_null(&id_table->uuid)) {
+ /*
+ * FF-A v1.0 doesn't provide discovery of UUIDs, just the
+ * partition IDs, so fetch the partitions IDs for this
+ * id_table UUID and assign the UUID to the device if the
+ * partition ID matches
+ */
+ if (uuid_is_null(&ffa_dev->uuid))
+ ffa_device_match_uuid(ffa_dev, &id_table->uuid);
+
+ if (uuid_equal(&ffa_dev->uuid, &id_table->uuid))
+ return 1;
+ id_table++;
+ }
+
+ return 0;
+}
+
+static int ffa_device_probe(struct device *dev)
+{
+ struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
+ struct ffa_device *ffa_dev = to_ffa_dev(dev);
+
+ return ffa_drv->probe(ffa_dev);
+}
+
+static void ffa_device_remove(struct device *dev)
+{
+ struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
+
+ if (ffa_drv->remove)
+ ffa_drv->remove(to_ffa_dev(dev));
+}
+
+static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct ffa_device *ffa_dev = to_ffa_dev(dev);
+
+ return add_uevent_var(env, "MODALIAS=arm_ffa:%04x:%pUb",
+ ffa_dev->vm_id, &ffa_dev->uuid);
+}
+
+static ssize_t partition_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ffa_device *ffa_dev = to_ffa_dev(dev);
+
+ return sprintf(buf, "0x%04x\n", ffa_dev->vm_id);
+}
+static DEVICE_ATTR_RO(partition_id);
+
+static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ffa_device *ffa_dev = to_ffa_dev(dev);
+
+ return sprintf(buf, "%pUb\n", &ffa_dev->uuid);
+}
+static DEVICE_ATTR_RO(uuid);
+
+static struct attribute *ffa_device_attributes_attrs[] = {
+ &dev_attr_partition_id.attr,
+ &dev_attr_uuid.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ffa_device_attributes);
+
+struct bus_type ffa_bus_type = {
+ .name = "arm_ffa",
+ .match = ffa_device_match,
+ .probe = ffa_device_probe,
+ .remove = ffa_device_remove,
+ .uevent = ffa_device_uevent,
+ .dev_groups = ffa_device_attributes_groups,
+};
+EXPORT_SYMBOL_GPL(ffa_bus_type);
+
+int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
+ const char *mod_name)
+{
+ int ret;
+
+ if (!driver->probe)
+ return -EINVAL;
+
+ driver->driver.bus = &ffa_bus_type;
+ driver->driver.name = driver->name;
+ driver->driver.owner = owner;
+ driver->driver.mod_name = mod_name;
+
+ ret = driver_register(&driver->driver);
+ if (!ret)
+ pr_debug("registered new ffa driver %s\n", driver->name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ffa_driver_register);
+
+void ffa_driver_unregister(struct ffa_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(ffa_driver_unregister);
+
+static void ffa_release_device(struct device *dev)
+{
+ struct ffa_device *ffa_dev = to_ffa_dev(dev);
+
+ ida_free(&ffa_bus_id, ffa_dev->id);
+ kfree(ffa_dev);
+}
+
+static int __ffa_devices_unregister(struct device *dev, void *data)
+{
+ device_unregister(dev);
+
+ return 0;
+}
+
+static void ffa_devices_unregister(void)
+{
+ bus_for_each_dev(&ffa_bus_type, NULL, NULL,
+ __ffa_devices_unregister);
+}
+
+bool ffa_device_is_valid(struct ffa_device *ffa_dev)
+{
+ bool valid = false;
+ struct device *dev = NULL;
+ struct ffa_device *tmp_dev;
+
+ do {
+ dev = bus_find_next_device(&ffa_bus_type, dev);
+ tmp_dev = to_ffa_dev(dev);
+ if (tmp_dev == ffa_dev) {
+ valid = true;
+ break;
+ }
+ put_device(dev);
+ } while (dev);
+
+ put_device(dev);
+
+ return valid;
+}
+
+struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ const struct ffa_ops *ops)
+{
+ int id, ret;
+ struct device *dev;
+ struct ffa_device *ffa_dev;
+
+ id = ida_alloc_min(&ffa_bus_id, 1, GFP_KERNEL);
+ if (id < 0)
+ return NULL;
+
+ ffa_dev = kzalloc(sizeof(*ffa_dev), GFP_KERNEL);
+ if (!ffa_dev) {
+ ida_free(&ffa_bus_id, id);
+ return NULL;
+ }
+
+ dev = &ffa_dev->dev;
+ dev->bus = &ffa_bus_type;
+ dev->release = ffa_release_device;
+ dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
+
+ ffa_dev->id = id;
+ ffa_dev->vm_id = vm_id;
+ ffa_dev->ops = ops;
+ uuid_copy(&ffa_dev->uuid, uuid);
+
+ ret = device_register(&ffa_dev->dev);
+ if (ret) {
+ dev_err(dev, "unable to register device %s err=%d\n",
+ dev_name(dev), ret);
+ put_device(dev);
+ return NULL;
+ }
+
+ return ffa_dev;
+}
+EXPORT_SYMBOL_GPL(ffa_device_register);
+
+void ffa_device_unregister(struct ffa_device *ffa_dev)
+{
+ if (!ffa_dev)
+ return;
+
+ device_unregister(&ffa_dev->dev);
+}
+EXPORT_SYMBOL_GPL(ffa_device_unregister);
+
+int arm_ffa_bus_init(void)
+{
+ return bus_register(&ffa_bus_type);
+}
+
+void arm_ffa_bus_exit(void)
+{
+ ffa_devices_unregister();
+ bus_unregister(&ffa_bus_type);
+ ida_destroy(&ffa_bus_id);
+}
diff --git a/drivers/firmware/arm_ffa/common.h b/drivers/firmware/arm_ffa/common.h
new file mode 100644
index 0000000000..d6eccf1fd3
--- /dev/null
+++ b/drivers/firmware/arm_ffa/common.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#ifndef _FFA_COMMON_H
+#define _FFA_COMMON_H
+
+#include <linux/arm_ffa.h>
+#include <linux/arm-smccc.h>
+#include <linux/err.h>
+
+typedef struct arm_smccc_1_2_regs ffa_value_t;
+
+typedef void (ffa_fn)(ffa_value_t, ffa_value_t *);
+
+int arm_ffa_bus_init(void);
+void arm_ffa_bus_exit(void);
+bool ffa_device_is_valid(struct ffa_device *ffa_dev);
+void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid);
+
+#ifdef CONFIG_ARM_FFA_SMCCC
+int __init ffa_transport_init(ffa_fn **invoke_ffa_fn);
+#else
+static inline int __init ffa_transport_init(ffa_fn **invoke_ffa_fn)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif /* _FFA_COMMON_H */
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
new file mode 100644
index 0000000000..7cd6b1564e
--- /dev/null
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -0,0 +1,787 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Arm Firmware Framework for ARMv8-A(FFA) interface driver
+ *
+ * The Arm FFA specification[1] describes a software architecture to
+ * leverages the virtualization extension to isolate software images
+ * provided by an ecosystem of vendors from each other and describes
+ * interfaces that standardize communication between the various software
+ * images including communication between images in the Secure world and
+ * Normal world. Any Hypervisor could use the FFA interfaces to enable
+ * communication between VMs it manages.
+ *
+ * The Hypervisor a.k.a Partition managers in FFA terminology can assign
+ * system resources(Memory regions, Devices, CPU cycles) to the partitions
+ * and manage isolation amongst them.
+ *
+ * [1] https://developer.arm.com/docs/den0077/latest
+ *
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#define DRIVER_NAME "ARM FF-A"
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+#include <linux/arm_ffa.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+
+#include "common.h"
+
+#define FFA_DRIVER_VERSION FFA_VERSION_1_0
+#define FFA_MIN_VERSION FFA_VERSION_1_0
+
+#define SENDER_ID_MASK GENMASK(31, 16)
+#define RECEIVER_ID_MASK GENMASK(15, 0)
+#define SENDER_ID(x) ((u16)(FIELD_GET(SENDER_ID_MASK, (x))))
+#define RECEIVER_ID(x) ((u16)(FIELD_GET(RECEIVER_ID_MASK, (x))))
+#define PACK_TARGET_INFO(s, r) \
+ (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r)))
+
+/*
+ * Keeping RX TX buffer size as 4K for now
+ * 64K may be preferred to keep it min a page in 64K PAGE_SIZE config
+ */
+#define RXTX_BUFFER_SIZE SZ_4K
+
+static ffa_fn *invoke_ffa_fn;
+
+static const int ffa_linux_errmap[] = {
+ /* better than switch case as long as return value is continuous */
+ 0, /* FFA_RET_SUCCESS */
+ -EOPNOTSUPP, /* FFA_RET_NOT_SUPPORTED */
+ -EINVAL, /* FFA_RET_INVALID_PARAMETERS */
+ -ENOMEM, /* FFA_RET_NO_MEMORY */
+ -EBUSY, /* FFA_RET_BUSY */
+ -EINTR, /* FFA_RET_INTERRUPTED */
+ -EACCES, /* FFA_RET_DENIED */
+ -EAGAIN, /* FFA_RET_RETRY */
+ -ECANCELED, /* FFA_RET_ABORTED */
+};
+
+static inline int ffa_to_linux_errno(int errno)
+{
+ int err_idx = -errno;
+
+ if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap))
+ return ffa_linux_errmap[err_idx];
+ return -EINVAL;
+}
+
+struct ffa_drv_info {
+ u32 version;
+ u16 vm_id;
+ struct mutex rx_lock; /* lock to protect Rx buffer */
+ struct mutex tx_lock; /* lock to protect Tx buffer */
+ void *rx_buffer;
+ void *tx_buffer;
+ bool mem_ops_native;
+};
+
+static struct ffa_drv_info *drv_info;
+
+/*
+ * The driver must be able to support all the versions from the earliest
+ * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION.
+ * The specification states that if firmware supports a FFA implementation
+ * that is incompatible with and at a greater version number than specified
+ * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION),
+ * it must return the NOT_SUPPORTED error code.
+ */
+static u32 ffa_compatible_version_find(u32 version)
+{
+ u16 major = FFA_MAJOR_VERSION(version), minor = FFA_MINOR_VERSION(version);
+ u16 drv_major = FFA_MAJOR_VERSION(FFA_DRIVER_VERSION);
+ u16 drv_minor = FFA_MINOR_VERSION(FFA_DRIVER_VERSION);
+
+ if ((major < drv_major) || (major == drv_major && minor <= drv_minor))
+ return version;
+
+ pr_info("Firmware version higher than driver version, downgrading\n");
+ return FFA_DRIVER_VERSION;
+}
+
+static int ffa_version_check(u32 *version)
+{
+ ffa_value_t ver;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION,
+ }, &ver);
+
+ if (ver.a0 == FFA_RET_NOT_SUPPORTED) {
+ pr_info("FFA_VERSION returned not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ver.a0 < FFA_MIN_VERSION) {
+ pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
+ FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
+ FFA_MAJOR_VERSION(FFA_MIN_VERSION),
+ FFA_MINOR_VERSION(FFA_MIN_VERSION));
+ return -EINVAL;
+ }
+
+ pr_info("Driver version %d.%d\n", FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
+ FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
+ pr_info("Firmware version %d.%d found\n", FFA_MAJOR_VERSION(ver.a0),
+ FFA_MINOR_VERSION(ver.a0));
+ *version = ffa_compatible_version_find(ver.a0);
+
+ return 0;
+}
+
+static int ffa_rx_release(void)
+{
+ ffa_value_t ret;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_RX_RELEASE,
+ }, &ret);
+
+ if (ret.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)ret.a2);
+
+ /* check for ret.a0 == FFA_RX_RELEASE ? */
+
+ return 0;
+}
+
+static int ffa_rxtx_map(phys_addr_t tx_buf, phys_addr_t rx_buf, u32 pg_cnt)
+{
+ ffa_value_t ret;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_FN_NATIVE(RXTX_MAP),
+ .a1 = tx_buf, .a2 = rx_buf, .a3 = pg_cnt,
+ }, &ret);
+
+ if (ret.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)ret.a2);
+
+ return 0;
+}
+
+static int ffa_rxtx_unmap(u16 vm_id)
+{
+ ffa_value_t ret;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0),
+ }, &ret);
+
+ if (ret.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)ret.a2);
+
+ return 0;
+}
+
+#define PARTITION_INFO_GET_RETURN_COUNT_ONLY BIT(0)
+
+/* buffer must be sizeof(struct ffa_partition_info) * num_partitions */
+static int
+__ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
+ struct ffa_partition_info *buffer, int num_partitions)
+{
+ int idx, count, flags = 0, sz, buf_sz;
+ ffa_value_t partition_info;
+
+ if (drv_info->version > FFA_VERSION_1_0 &&
+ (!buffer || !num_partitions)) /* Just get the count for now */
+ flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY;
+
+ mutex_lock(&drv_info->rx_lock);
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_PARTITION_INFO_GET,
+ .a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3,
+ .a5 = flags,
+ }, &partition_info);
+
+ if (partition_info.a0 == FFA_ERROR) {
+ mutex_unlock(&drv_info->rx_lock);
+ return ffa_to_linux_errno((int)partition_info.a2);
+ }
+
+ count = partition_info.a2;
+
+ if (drv_info->version > FFA_VERSION_1_0) {
+ buf_sz = sz = partition_info.a3;
+ if (sz > sizeof(*buffer))
+ buf_sz = sizeof(*buffer);
+ } else {
+ /* FFA_VERSION_1_0 lacks size in the response */
+ buf_sz = sz = 8;
+ }
+
+ if (buffer && count <= num_partitions)
+ for (idx = 0; idx < count; idx++)
+ memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
+ buf_sz);
+
+ ffa_rx_release();
+
+ mutex_unlock(&drv_info->rx_lock);
+
+ return count;
+}
+
+/* buffer is allocated and caller must free the same if returned count > 0 */
+static int
+ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer)
+{
+ int count;
+ u32 uuid0_4[4];
+ struct ffa_partition_info *pbuf;
+
+ export_uuid((u8 *)uuid0_4, uuid);
+ count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2],
+ uuid0_4[3], NULL, 0);
+ if (count <= 0)
+ return count;
+
+ pbuf = kcalloc(count, sizeof(*pbuf), GFP_KERNEL);
+ if (!pbuf)
+ return -ENOMEM;
+
+ count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2],
+ uuid0_4[3], pbuf, count);
+ if (count <= 0)
+ kfree(pbuf);
+ else
+ *buffer = pbuf;
+
+ return count;
+}
+
+#define VM_ID_MASK GENMASK(15, 0)
+static int ffa_id_get(u16 *vm_id)
+{
+ ffa_value_t id;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_ID_GET,
+ }, &id);
+
+ if (id.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)id.a2);
+
+ *vm_id = FIELD_GET(VM_ID_MASK, (id.a2));
+
+ return 0;
+}
+
+static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
+ struct ffa_send_direct_data *data)
+{
+ u32 req_id, resp_id, src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
+ ffa_value_t ret;
+
+ if (mode_32bit) {
+ req_id = FFA_MSG_SEND_DIRECT_REQ;
+ resp_id = FFA_MSG_SEND_DIRECT_RESP;
+ } else {
+ req_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_REQ);
+ resp_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_RESP);
+ }
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = req_id, .a1 = src_dst_ids, .a2 = 0,
+ .a3 = data->data0, .a4 = data->data1, .a5 = data->data2,
+ .a6 = data->data3, .a7 = data->data4,
+ }, &ret);
+
+ while (ret.a0 == FFA_INTERRUPT)
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_RUN, .a1 = ret.a1,
+ }, &ret);
+
+ if (ret.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)ret.a2);
+
+ if (ret.a0 == resp_id) {
+ data->data0 = ret.a3;
+ data->data1 = ret.a4;
+ data->data2 = ret.a5;
+ data->data3 = ret.a6;
+ data->data4 = ret.a7;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
+ u32 frag_len, u32 len, u64 *handle)
+{
+ ffa_value_t ret;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = func_id, .a1 = len, .a2 = frag_len,
+ .a3 = buf, .a4 = buf_sz,
+ }, &ret);
+
+ while (ret.a0 == FFA_MEM_OP_PAUSE)
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_MEM_OP_RESUME,
+ .a1 = ret.a1, .a2 = ret.a2,
+ }, &ret);
+
+ if (ret.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)ret.a2);
+
+ if (ret.a0 == FFA_SUCCESS) {
+ if (handle)
+ *handle = PACK_HANDLE(ret.a2, ret.a3);
+ } else if (ret.a0 == FFA_MEM_FRAG_RX) {
+ if (handle)
+ *handle = PACK_HANDLE(ret.a1, ret.a2);
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return frag_len;
+}
+
+static int ffa_mem_next_frag(u64 handle, u32 frag_len)
+{
+ ffa_value_t ret;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_MEM_FRAG_TX,
+ .a1 = HANDLE_LOW(handle), .a2 = HANDLE_HIGH(handle),
+ .a3 = frag_len,
+ }, &ret);
+
+ while (ret.a0 == FFA_MEM_OP_PAUSE)
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_MEM_OP_RESUME,
+ .a1 = ret.a1, .a2 = ret.a2,
+ }, &ret);
+
+ if (ret.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)ret.a2);
+
+ if (ret.a0 == FFA_MEM_FRAG_RX)
+ return ret.a3;
+ else if (ret.a0 == FFA_SUCCESS)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int
+ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len,
+ u32 len, u64 *handle, bool first)
+{
+ if (!first)
+ return ffa_mem_next_frag(*handle, frag_len);
+
+ return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len, len, handle);
+}
+
+static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
+{
+ u32 num_pages = 0;
+
+ do {
+ num_pages += sg->length / FFA_PAGE_SIZE;
+ } while ((sg = sg_next(sg)));
+
+ return num_pages;
+}
+
+static u8 ffa_memory_attributes_get(u32 func_id)
+{
+ /*
+ * For the memory lend or donate operation, if the receiver is a PE or
+ * a proxy endpoint, the owner/sender must not specify the attributes
+ */
+ if (func_id == FFA_FN_NATIVE(MEM_LEND) ||
+ func_id == FFA_MEM_LEND)
+ return 0;
+
+ return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
+}
+
+static int
+ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
+ struct ffa_mem_ops_args *args)
+{
+ int rc = 0;
+ bool first = true;
+ phys_addr_t addr = 0;
+ struct ffa_composite_mem_region *composite;
+ struct ffa_mem_region_addr_range *constituents;
+ struct ffa_mem_region_attributes *ep_mem_access;
+ struct ffa_mem_region *mem_region = buffer;
+ u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg);
+
+ mem_region->tag = args->tag;
+ mem_region->flags = args->flags;
+ mem_region->sender_id = drv_info->vm_id;
+ mem_region->attributes = ffa_memory_attributes_get(func_id);
+ ep_mem_access = &mem_region->ep_mem_access[0];
+
+ for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
+ ep_mem_access->receiver = args->attrs[idx].receiver;
+ ep_mem_access->attrs = args->attrs[idx].attrs;
+ ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs);
+ ep_mem_access->flag = 0;
+ ep_mem_access->reserved = 0;
+ }
+ mem_region->handle = 0;
+ mem_region->reserved_0 = 0;
+ mem_region->reserved_1 = 0;
+ mem_region->ep_count = args->nattrs;
+
+ composite = buffer + COMPOSITE_OFFSET(args->nattrs);
+ composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
+ composite->addr_range_cnt = num_entries;
+ composite->reserved = 0;
+
+ length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries);
+ frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0);
+ if (frag_len > max_fragsize)
+ return -ENXIO;
+
+ if (!args->use_txbuf) {
+ addr = virt_to_phys(buffer);
+ buf_sz = max_fragsize / FFA_PAGE_SIZE;
+ }
+
+ constituents = buffer + frag_len;
+ idx = 0;
+ do {
+ if (frag_len == max_fragsize) {
+ rc = ffa_transmit_fragment(func_id, addr, buf_sz,
+ frag_len, length,
+ &args->g_handle, first);
+ if (rc < 0)
+ return -ENXIO;
+
+ first = false;
+ idx = 0;
+ frag_len = 0;
+ constituents = buffer;
+ }
+
+ if ((void *)constituents - buffer > max_fragsize) {
+ pr_err("Memory Region Fragment > Tx Buffer size\n");
+ return -EFAULT;
+ }
+
+ constituents->address = sg_phys(args->sg);
+ constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
+ constituents->reserved = 0;
+ constituents++;
+ frag_len += sizeof(struct ffa_mem_region_addr_range);
+ } while ((args->sg = sg_next(args->sg)));
+
+ return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len,
+ length, &args->g_handle, first);
+}
+
+static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args)
+{
+ int ret;
+ void *buffer;
+
+ if (!args->use_txbuf) {
+ buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+ } else {
+ buffer = drv_info->tx_buffer;
+ mutex_lock(&drv_info->tx_lock);
+ }
+
+ ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args);
+
+ if (args->use_txbuf)
+ mutex_unlock(&drv_info->tx_lock);
+ else
+ free_pages_exact(buffer, RXTX_BUFFER_SIZE);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int ffa_memory_reclaim(u64 g_handle, u32 flags)
+{
+ ffa_value_t ret;
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_MEM_RECLAIM,
+ .a1 = HANDLE_LOW(g_handle), .a2 = HANDLE_HIGH(g_handle),
+ .a3 = flags,
+ }, &ret);
+
+ if (ret.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)ret.a2);
+
+ return 0;
+}
+
+static int ffa_features(u32 func_feat_id, u32 input_props,
+ u32 *if_props_1, u32 *if_props_2)
+{
+ ffa_value_t id;
+
+ if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) {
+ pr_err("%s: Invalid Parameters: %x, %x", __func__,
+ func_feat_id, input_props);
+ return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS);
+ }
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props,
+ }, &id);
+
+ if (id.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)id.a2);
+
+ if (if_props_1)
+ *if_props_1 = id.a2;
+ if (if_props_2)
+ *if_props_2 = id.a3;
+
+ return 0;
+}
+
+static void ffa_set_up_mem_ops_native_flag(void)
+{
+ if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) ||
+ !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL))
+ drv_info->mem_ops_native = true;
+}
+
+static u32 ffa_api_version_get(void)
+{
+ return drv_info->version;
+}
+
+static int ffa_partition_info_get(const char *uuid_str,
+ struct ffa_partition_info *buffer)
+{
+ int count;
+ uuid_t uuid;
+ struct ffa_partition_info *pbuf;
+
+ if (uuid_parse(uuid_str, &uuid)) {
+ pr_err("invalid uuid (%s)\n", uuid_str);
+ return -ENODEV;
+ }
+
+ count = ffa_partition_probe(&uuid, &pbuf);
+ if (count <= 0)
+ return -ENOENT;
+
+ memcpy(buffer, pbuf, sizeof(*pbuf) * count);
+ kfree(pbuf);
+ return 0;
+}
+
+static void ffa_mode_32bit_set(struct ffa_device *dev)
+{
+ dev->mode_32bit = true;
+}
+
+static int ffa_sync_send_receive(struct ffa_device *dev,
+ struct ffa_send_direct_data *data)
+{
+ return ffa_msg_send_direct_req(drv_info->vm_id, dev->vm_id,
+ dev->mode_32bit, data);
+}
+
+static int ffa_memory_share(struct ffa_mem_ops_args *args)
+{
+ if (drv_info->mem_ops_native)
+ return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
+
+ return ffa_memory_ops(FFA_MEM_SHARE, args);
+}
+
+static int ffa_memory_lend(struct ffa_mem_ops_args *args)
+{
+ /* Note that upon a successful MEM_LEND request the caller
+ * must ensure that the memory region specified is not accessed
+ * until a successful MEM_RECALIM call has been made.
+ * On systems with a hypervisor present this will been enforced,
+ * however on systems without a hypervisor the responsibility
+ * falls to the calling kernel driver to prevent access.
+ */
+ if (drv_info->mem_ops_native)
+ return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
+
+ return ffa_memory_ops(FFA_MEM_LEND, args);
+}
+
+static const struct ffa_info_ops ffa_drv_info_ops = {
+ .api_version_get = ffa_api_version_get,
+ .partition_info_get = ffa_partition_info_get,
+};
+
+static const struct ffa_msg_ops ffa_drv_msg_ops = {
+ .mode_32bit_set = ffa_mode_32bit_set,
+ .sync_send_receive = ffa_sync_send_receive,
+};
+
+static const struct ffa_mem_ops ffa_drv_mem_ops = {
+ .memory_reclaim = ffa_memory_reclaim,
+ .memory_share = ffa_memory_share,
+ .memory_lend = ffa_memory_lend,
+};
+
+static const struct ffa_ops ffa_drv_ops = {
+ .info_ops = &ffa_drv_info_ops,
+ .msg_ops = &ffa_drv_msg_ops,
+ .mem_ops = &ffa_drv_mem_ops,
+};
+
+void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
+{
+ int count, idx;
+ struct ffa_partition_info *pbuf, *tpbuf;
+
+ /*
+ * FF-A v1.1 provides UUID for each partition as part of the discovery
+ * API, the discovered UUID must be populated in the device's UUID and
+ * there is no need to copy the same from the driver table.
+ */
+ if (drv_info->version > FFA_VERSION_1_0)
+ return;
+
+ count = ffa_partition_probe(uuid, &pbuf);
+ if (count <= 0)
+ return;
+
+ for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++)
+ if (tpbuf->id == ffa_dev->vm_id)
+ uuid_copy(&ffa_dev->uuid, uuid);
+ kfree(pbuf);
+}
+
+static void ffa_setup_partitions(void)
+{
+ int count, idx;
+ uuid_t uuid;
+ struct ffa_device *ffa_dev;
+ struct ffa_partition_info *pbuf, *tpbuf;
+
+ count = ffa_partition_probe(&uuid_null, &pbuf);
+ if (count <= 0) {
+ pr_info("%s: No partitions found, error %d\n", __func__, count);
+ return;
+ }
+
+ for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
+ import_uuid(&uuid, (u8 *)tpbuf->uuid);
+
+ /* Note that if the UUID will be uuid_null, that will require
+ * ffa_device_match() to find the UUID of this partition id
+ * with help of ffa_device_match_uuid(). FF-A v1.1 and above
+ * provides UUID here for each partition as part of the
+ * discovery API and the same is passed.
+ */
+ ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops);
+ if (!ffa_dev) {
+ pr_err("%s: failed to register partition ID 0x%x\n",
+ __func__, tpbuf->id);
+ continue;
+ }
+
+ if (drv_info->version > FFA_VERSION_1_0 &&
+ !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
+ ffa_mode_32bit_set(ffa_dev);
+ }
+ kfree(pbuf);
+}
+
+static int __init ffa_init(void)
+{
+ int ret;
+
+ ret = ffa_transport_init(&invoke_ffa_fn);
+ if (ret)
+ return ret;
+
+ ret = arm_ffa_bus_init();
+ if (ret)
+ return ret;
+
+ drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL);
+ if (!drv_info) {
+ ret = -ENOMEM;
+ goto ffa_bus_exit;
+ }
+
+ ret = ffa_version_check(&drv_info->version);
+ if (ret)
+ goto free_drv_info;
+
+ if (ffa_id_get(&drv_info->vm_id)) {
+ pr_err("failed to obtain VM id for self\n");
+ ret = -ENODEV;
+ goto free_drv_info;
+ }
+
+ drv_info->rx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
+ if (!drv_info->rx_buffer) {
+ ret = -ENOMEM;
+ goto free_pages;
+ }
+
+ drv_info->tx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
+ if (!drv_info->tx_buffer) {
+ ret = -ENOMEM;
+ goto free_pages;
+ }
+
+ ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer),
+ virt_to_phys(drv_info->rx_buffer),
+ RXTX_BUFFER_SIZE / FFA_PAGE_SIZE);
+ if (ret) {
+ pr_err("failed to register FFA RxTx buffers\n");
+ goto free_pages;
+ }
+
+ mutex_init(&drv_info->rx_lock);
+ mutex_init(&drv_info->tx_lock);
+
+ ffa_setup_partitions();
+
+ ffa_set_up_mem_ops_native_flag();
+
+ return 0;
+free_pages:
+ if (drv_info->tx_buffer)
+ free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
+ free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
+free_drv_info:
+ kfree(drv_info);
+ffa_bus_exit:
+ arm_ffa_bus_exit();
+ return ret;
+}
+subsys_initcall(ffa_init);
+
+static void __exit ffa_exit(void)
+{
+ ffa_rxtx_unmap(drv_info->vm_id);
+ free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
+ free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
+ kfree(drv_info);
+ arm_ffa_bus_exit();
+}
+module_exit(ffa_exit);
+
+MODULE_ALIAS("arm-ffa");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("Arm FF-A interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_ffa/smccc.c b/drivers/firmware/arm_ffa/smccc.c
new file mode 100644
index 0000000000..4d85bfff0a
--- /dev/null
+++ b/drivers/firmware/arm_ffa/smccc.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#include <linux/printk.h>
+
+#include "common.h"
+
+static void __arm_ffa_fn_smc(ffa_value_t args, ffa_value_t *res)
+{
+ arm_smccc_1_2_smc(&args, res);
+}
+
+static void __arm_ffa_fn_hvc(ffa_value_t args, ffa_value_t *res)
+{
+ arm_smccc_1_2_hvc(&args, res);
+}
+
+int __init ffa_transport_init(ffa_fn **invoke_ffa_fn)
+{
+ enum arm_smccc_conduit conduit;
+
+ if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2)
+ return -EOPNOTSUPP;
+
+ conduit = arm_smccc_1_1_get_conduit();
+ if (conduit == SMCCC_CONDUIT_NONE) {
+ pr_err("%s: invalid SMCCC conduit\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if (conduit == SMCCC_CONDUIT_SMC)
+ *invoke_ffa_fn = __arm_ffa_fn_smc;
+ else
+ *invoke_ffa_fn = __arm_ffa_fn_hvc;
+
+ return 0;
+}
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
new file mode 100644
index 0000000000..ea0f5083ac
--- /dev/null
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -0,0 +1,196 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "ARM System Control and Management Interface Protocol"
+
+config ARM_SCMI_PROTOCOL
+ tristate "ARM System Control and Management Interface (SCMI) Message Protocol"
+ depends on ARM || ARM64 || COMPILE_TEST
+ help
+ ARM System Control and Management Interface (SCMI) protocol is a
+ set of operating system-independent software interfaces that are
+ used in system management. SCMI is extensible and currently provides
+ interfaces for: Discovery and self-description of the interfaces
+ it supports, Power domain management which is the ability to place
+ a given device or domain into the various power-saving states that
+ it supports, Performance management which is the ability to control
+ the performance of a domain that is composed of compute engines
+ such as application processors and other accelerators, Clock
+ management which is the ability to set and inquire rates on platform
+ managed clocks and Sensor management which is the ability to read
+ sensor data, and be notified of sensor value.
+
+ This protocol library provides interface for all the client drivers
+ making use of the features offered by the SCMI.
+
+if ARM_SCMI_PROTOCOL
+
+config ARM_SCMI_NEED_DEBUGFS
+ bool
+ help
+ This declares whether at least one SCMI facility is configured
+ which needs debugfs support. When selected causess the creation
+ of a common SCMI debugfs root directory.
+
+config ARM_SCMI_RAW_MODE_SUPPORT
+ bool "Enable support for SCMI Raw transmission mode"
+ depends on DEBUG_FS
+ select ARM_SCMI_NEED_DEBUGFS
+ help
+ Enable support for SCMI Raw transmission mode.
+
+ If enabled allows the direct injection and snooping of SCMI bare
+ messages through a dedicated debugfs interface.
+ It is meant to be used by SCMI compliance/testing suites.
+
+ When enabled regular SCMI drivers interactions are inhibited in
+ order to avoid unexpected interactions with the SCMI Raw message
+ flow. If unsure say N.
+
+config ARM_SCMI_RAW_MODE_SUPPORT_COEX
+ bool "Allow SCMI Raw mode coexistence with normal SCMI stack"
+ depends on ARM_SCMI_RAW_MODE_SUPPORT
+ help
+ Allow SCMI Raw transmission mode to coexist with normal SCMI stack.
+
+ This will allow regular SCMI drivers to register with the core and
+ operate normally, thing which could make an SCMI test suite using the
+ SCMI Raw mode support unreliable. If unsure, say N.
+
+config ARM_SCMI_HAVE_TRANSPORT
+ bool
+ help
+ This declares whether at least one SCMI transport has been configured.
+ Used to trigger a build bug when trying to build SCMI without any
+ configured transport.
+
+config ARM_SCMI_HAVE_SHMEM
+ bool
+ help
+ This declares whether a shared memory based transport for SCMI is
+ available.
+
+config ARM_SCMI_HAVE_MSG
+ bool
+ help
+ This declares whether a message passing based transport for SCMI is
+ available.
+
+config ARM_SCMI_TRANSPORT_MAILBOX
+ bool "SCMI transport based on Mailbox"
+ depends on MAILBOX
+ select ARM_SCMI_HAVE_TRANSPORT
+ select ARM_SCMI_HAVE_SHMEM
+ default y
+ help
+ Enable mailbox based transport for SCMI.
+
+ If you want the ARM SCMI PROTOCOL stack to include support for a
+ transport based on mailboxes, answer Y.
+
+config ARM_SCMI_TRANSPORT_OPTEE
+ bool "SCMI transport based on OP-TEE service"
+ depends on OPTEE=y || OPTEE=ARM_SCMI_PROTOCOL
+ select ARM_SCMI_HAVE_TRANSPORT
+ select ARM_SCMI_HAVE_SHMEM
+ select ARM_SCMI_HAVE_MSG
+ default y
+ help
+ This enables the OP-TEE service based transport for SCMI.
+
+ If you want the ARM SCMI PROTOCOL stack to include support for a
+ transport based on OP-TEE SCMI service, answer Y.
+
+config ARM_SCMI_TRANSPORT_SMC
+ bool "SCMI transport based on SMC"
+ depends on HAVE_ARM_SMCCC_DISCOVERY
+ select ARM_SCMI_HAVE_TRANSPORT
+ select ARM_SCMI_HAVE_SHMEM
+ default y
+ help
+ Enable SMC based transport for SCMI.
+
+ If you want the ARM SCMI PROTOCOL stack to include support for a
+ transport based on SMC, answer Y.
+
+config ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE
+ bool "Enable atomic mode support for SCMI SMC transport"
+ depends on ARM_SCMI_TRANSPORT_SMC
+ help
+ Enable support of atomic operation for SCMI SMC based transport.
+
+ If you want the SCMI SMC based transport to operate in atomic
+ mode, avoiding any kind of sleeping behaviour for selected
+ transactions on the TX path, answer Y.
+ Enabling atomic mode operations allows any SCMI driver using this
+ transport to optionally ask for atomic SCMI transactions and operate
+ in atomic context too, at the price of using a number of busy-waiting
+ primitives all over instead. If unsure say N.
+
+config ARM_SCMI_TRANSPORT_VIRTIO
+ bool "SCMI transport based on VirtIO"
+ depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL
+ select ARM_SCMI_HAVE_TRANSPORT
+ select ARM_SCMI_HAVE_MSG
+ help
+ This enables the virtio based transport for SCMI.
+
+ If you want the ARM SCMI PROTOCOL stack to include support for a
+ transport based on VirtIO, answer Y.
+
+config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE
+ bool "SCMI VirtIO transport Version 1 compliance"
+ depends on ARM_SCMI_TRANSPORT_VIRTIO
+ default y
+ help
+ This enforces strict compliance with VirtIO Version 1 specification.
+
+ If you want the ARM SCMI VirtIO transport layer to refuse to work
+ with Legacy VirtIO backends and instead support only VirtIO Version 1
+ devices (or above), answer Y.
+
+ If you want instead to support also old Legacy VirtIO backends (like
+ the ones implemented by kvmtool) and let the core Kernel VirtIO layer
+ take care of the needed conversions, say N.
+
+config ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE
+ bool "Enable atomic mode for SCMI VirtIO transport"
+ depends on ARM_SCMI_TRANSPORT_VIRTIO
+ help
+ Enable support of atomic operation for SCMI VirtIO based transport.
+
+ If you want the SCMI VirtIO based transport to operate in atomic
+ mode, avoiding any kind of sleeping behaviour for selected
+ transactions on the TX path, answer Y.
+
+ Enabling atomic mode operations allows any SCMI driver using this
+ transport to optionally ask for atomic SCMI transactions and operate
+ in atomic context too, at the price of using a number of busy-waiting
+ primitives all over instead. If unsure say N.
+
+endif #ARM_SCMI_PROTOCOL
+
+config ARM_SCMI_POWER_DOMAIN
+ tristate "SCMI power domain driver"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ default y
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the SCMI power domains which can be
+ enabled or disabled via the SCP firmware
+
+ This driver can also be built as a module. If so, the module
+ will be called scmi_pm_domain. Note this may needed early in boot
+ before rootfs may be available.
+
+config ARM_SCMI_POWER_CONTROL
+ tristate "SCMI system power control driver"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ help
+ This enables System Power control logic which binds system shutdown or
+ reboot actions to SCMI System Power notifications generated by SCP
+ firmware.
+
+ This driver can also be built as a module. If so, the module will be
+ called scmi_power_control. Note this may needed early in boot to catch
+ early shutdown/reboot SCMI requests.
+
+endmenu
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
new file mode 100644
index 0000000000..b31d78fa66
--- /dev/null
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-only
+scmi-bus-y = bus.o
+scmi-core-objs := $(scmi-bus-y)
+
+scmi-driver-y = driver.o notify.o
+scmi-driver-$(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) += raw_mode.o
+scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o
+scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o
+scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
+scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
+scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
+scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o
+scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o
+scmi-module-objs := $(scmi-driver-y) $(scmi-protocols-y) $(scmi-transport-y)
+
+obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-core.o
+obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
+
+obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
+obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o
+
+ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy)
+# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame
+# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling
+# hooks are inserted via the -pg switch.
+CFLAGS_REMOVE_smc.o += $(CC_FLAGS_FTRACE)
+endif
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
new file mode 100644
index 0000000000..a52f084a6a
--- /dev/null
+++ b/drivers/firmware/arm_scmi/base.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Base Protocol
+ *
+ * Copyright (C) 2018-2021 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications BASE - " fmt
+
+#include <linux/module.h>
+#include <linux/scmi_protocol.h>
+
+#include "common.h"
+#include "notify.h"
+
+#define SCMI_BASE_NUM_SOURCES 1
+#define SCMI_BASE_MAX_CMD_ERR_COUNT 1024
+
+enum scmi_base_protocol_cmd {
+ BASE_DISCOVER_VENDOR = 0x3,
+ BASE_DISCOVER_SUB_VENDOR = 0x4,
+ BASE_DISCOVER_IMPLEMENT_VERSION = 0x5,
+ BASE_DISCOVER_LIST_PROTOCOLS = 0x6,
+ BASE_DISCOVER_AGENT = 0x7,
+ BASE_NOTIFY_ERRORS = 0x8,
+ BASE_SET_DEVICE_PERMISSIONS = 0x9,
+ BASE_SET_PROTOCOL_PERMISSIONS = 0xa,
+ BASE_RESET_AGENT_CONFIGURATION = 0xb,
+};
+
+struct scmi_msg_resp_base_attributes {
+ u8 num_protocols;
+ u8 num_agents;
+ __le16 reserved;
+};
+
+struct scmi_msg_resp_base_discover_agent {
+ __le32 agent_id;
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
+};
+
+
+struct scmi_msg_base_error_notify {
+ __le32 event_control;
+#define BASE_TP_NOTIFY_ALL BIT(0)
+};
+
+struct scmi_base_error_notify_payld {
+ __le32 agent_id;
+ __le32 error_status;
+#define IS_FATAL_ERROR(x) ((x) & BIT(31))
+#define ERROR_CMD_COUNT(x) FIELD_GET(GENMASK(9, 0), (x))
+ __le64 msg_reports[SCMI_BASE_MAX_CMD_ERR_COUNT];
+};
+
+/**
+ * scmi_base_attributes_get() - gets the implementation details
+ * that are associated with the base protocol.
+ *
+ * @ph: SCMI protocol handle
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int scmi_base_attributes_get(const struct scmi_protocol_handle *ph)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_base_attributes *attr_info;
+ struct scmi_revision_info *rev = ph->get_priv(ph);
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(*attr_info), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ attr_info = t->rx.buf;
+ rev->num_protocols = attr_info->num_protocols;
+ rev->num_agents = attr_info->num_agents;
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+/**
+ * scmi_base_vendor_id_get() - gets vendor/subvendor identifier ASCII string.
+ *
+ * @ph: SCMI protocol handle
+ * @sub_vendor: specify true if sub-vendor ID is needed
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int
+scmi_base_vendor_id_get(const struct scmi_protocol_handle *ph, bool sub_vendor)
+{
+ u8 cmd;
+ int ret, size;
+ char *vendor_id;
+ struct scmi_xfer *t;
+ struct scmi_revision_info *rev = ph->get_priv(ph);
+
+
+ if (sub_vendor) {
+ cmd = BASE_DISCOVER_SUB_VENDOR;
+ vendor_id = rev->sub_vendor_id;
+ size = ARRAY_SIZE(rev->sub_vendor_id);
+ } else {
+ cmd = BASE_DISCOVER_VENDOR;
+ vendor_id = rev->vendor_id;
+ size = ARRAY_SIZE(rev->vendor_id);
+ }
+
+ ret = ph->xops->xfer_get_init(ph, cmd, 0, size, &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ strscpy(vendor_id, t->rx.buf, size);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+/**
+ * scmi_base_implementation_version_get() - gets a vendor-specific
+ * implementation 32-bit version. The format of the version number is
+ * vendor-specific
+ *
+ * @ph: SCMI protocol handle
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int
+scmi_base_implementation_version_get(const struct scmi_protocol_handle *ph)
+{
+ int ret;
+ __le32 *impl_ver;
+ struct scmi_xfer *t;
+ struct scmi_revision_info *rev = ph->get_priv(ph);
+
+ ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_IMPLEMENT_VERSION,
+ 0, sizeof(*impl_ver), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ impl_ver = t->rx.buf;
+ rev->impl_ver = le32_to_cpu(*impl_ver);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+/**
+ * scmi_base_implementation_list_get() - gets the list of protocols it is
+ * OSPM is allowed to access
+ *
+ * @ph: SCMI protocol handle
+ * @protocols_imp: pointer to hold the list of protocol identifiers
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int
+scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
+ u8 *protocols_imp)
+{
+ u8 *list;
+ int ret, loop;
+ struct scmi_xfer *t;
+ __le32 *num_skip, *num_ret;
+ u32 tot_num_ret = 0, loop_num_ret;
+ struct device *dev = ph->dev;
+ struct scmi_revision_info *rev = ph->get_priv(ph);
+
+ ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_LIST_PROTOCOLS,
+ sizeof(*num_skip), 0, &t);
+ if (ret)
+ return ret;
+
+ num_skip = t->tx.buf;
+ num_ret = t->rx.buf;
+ list = t->rx.buf + sizeof(*num_ret);
+
+ do {
+ size_t real_list_sz;
+ u32 calc_list_sz;
+
+ /* Set the number of protocols to be skipped/already read */
+ *num_skip = cpu_to_le32(tot_num_ret);
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (ret)
+ break;
+
+ loop_num_ret = le32_to_cpu(*num_ret);
+ if (!loop_num_ret)
+ break;
+
+ if (loop_num_ret > rev->num_protocols - tot_num_ret) {
+ dev_err(dev,
+ "No. Returned protocols > Total protocols.\n");
+ break;
+ }
+
+ if (t->rx.len < (sizeof(u32) * 2)) {
+ dev_err(dev, "Truncated reply - rx.len:%zd\n",
+ t->rx.len);
+ ret = -EPROTO;
+ break;
+ }
+
+ real_list_sz = t->rx.len - sizeof(u32);
+ calc_list_sz = (1 + (loop_num_ret - 1) / sizeof(u32)) *
+ sizeof(u32);
+ if (calc_list_sz != real_list_sz) {
+ dev_warn(dev,
+ "Malformed reply - real_sz:%zd calc_sz:%u (loop_num_ret:%d)\n",
+ real_list_sz, calc_list_sz, loop_num_ret);
+ /*
+ * Bail out if the expected list size is bigger than the
+ * total payload size of the received reply.
+ */
+ if (calc_list_sz > real_list_sz) {
+ ret = -EPROTO;
+ break;
+ }
+ }
+
+ for (loop = 0; loop < loop_num_ret; loop++)
+ protocols_imp[tot_num_ret + loop] = *(list + loop);
+
+ tot_num_ret += loop_num_ret;
+
+ ph->xops->reset_rx_to_maxsz(ph, t);
+ } while (tot_num_ret < rev->num_protocols);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+/**
+ * scmi_base_discover_agent_get() - discover the name of an agent
+ *
+ * @ph: SCMI protocol handle
+ * @id: Agent identifier
+ * @name: Agent identifier ASCII string
+ *
+ * An agent id of 0 is reserved to identify the platform itself.
+ * Generally operating system is represented as "OSPM"
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph,
+ int id, char *name)
+{
+ int ret;
+ struct scmi_msg_resp_base_discover_agent *agent_info;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_AGENT,
+ sizeof(__le32), sizeof(*agent_info), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(id, t->tx.buf);
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ agent_info = t->rx.buf;
+ strscpy(name, agent_info->name, SCMI_SHORT_NAME_MAX_SIZE);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_base_error_notify(const struct scmi_protocol_handle *ph,
+ bool enable)
+{
+ int ret;
+ u32 evt_cntl = enable ? BASE_TP_NOTIFY_ALL : 0;
+ struct scmi_xfer *t;
+ struct scmi_msg_base_error_notify *cfg;
+
+ ret = ph->xops->xfer_get_init(ph, BASE_NOTIFY_ERRORS,
+ sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ cfg = t->tx.buf;
+ cfg->event_control = cpu_to_le32(evt_cntl);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_base_set_notify_enabled(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_base_error_notify(ph, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] ret:%d\n", evt_id, ret);
+
+ return ret;
+}
+
+static void *scmi_base_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ int i;
+ const struct scmi_base_error_notify_payld *p = payld;
+ struct scmi_base_error_report *r = report;
+
+ /*
+ * BaseError notification payload is variable in size but
+ * up to a maximum length determined by the struct ponted by p.
+ * Instead payld_sz is the effective length of this notification
+ * payload so cannot be greater of the maximum allowed size as
+ * pointed by p.
+ */
+ if (evt_id != SCMI_EVENT_BASE_ERROR_EVENT || sizeof(*p) < payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->fatal = IS_FATAL_ERROR(le32_to_cpu(p->error_status));
+ r->cmd_count = ERROR_CMD_COUNT(le32_to_cpu(p->error_status));
+ for (i = 0; i < r->cmd_count; i++)
+ r->reports[i] = le64_to_cpu(p->msg_reports[i]);
+ *src_id = 0;
+
+ return r;
+}
+
+static const struct scmi_event base_events[] = {
+ {
+ .id = SCMI_EVENT_BASE_ERROR_EVENT,
+ .max_payld_sz = sizeof(struct scmi_base_error_notify_payld),
+ .max_report_sz = sizeof(struct scmi_base_error_report) +
+ SCMI_BASE_MAX_CMD_ERR_COUNT * sizeof(u64),
+ },
+};
+
+static const struct scmi_event_ops base_event_ops = {
+ .set_notify_enabled = scmi_base_set_notify_enabled,
+ .fill_custom_report = scmi_base_fill_custom_report,
+};
+
+static const struct scmi_protocol_events base_protocol_events = {
+ .queue_sz = 4 * SCMI_PROTO_QUEUE_SZ,
+ .ops = &base_event_ops,
+ .evts = base_events,
+ .num_events = ARRAY_SIZE(base_events),
+ .num_sources = SCMI_BASE_NUM_SOURCES,
+};
+
+static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ int id, ret;
+ u8 *prot_imp;
+ u32 version;
+ char name[SCMI_SHORT_NAME_MAX_SIZE];
+ struct device *dev = ph->dev;
+ struct scmi_revision_info *rev = scmi_revision_area_get(ph);
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ rev->major_ver = PROTOCOL_REV_MAJOR(version),
+ rev->minor_ver = PROTOCOL_REV_MINOR(version);
+ ph->set_priv(ph, rev);
+
+ ret = scmi_base_attributes_get(ph);
+ if (ret)
+ return ret;
+
+ prot_imp = devm_kcalloc(dev, rev->num_protocols, sizeof(u8),
+ GFP_KERNEL);
+ if (!prot_imp)
+ return -ENOMEM;
+
+ scmi_base_vendor_id_get(ph, false);
+ scmi_base_vendor_id_get(ph, true);
+ scmi_base_implementation_version_get(ph);
+ scmi_base_implementation_list_get(ph, prot_imp);
+
+ scmi_setup_protocol_implemented(ph, prot_imp);
+
+ dev_info(dev, "SCMI Protocol v%d.%d '%s:%s' Firmware version 0x%x\n",
+ rev->major_ver, rev->minor_ver, rev->vendor_id,
+ rev->sub_vendor_id, rev->impl_ver);
+ dev_dbg(dev, "Found %d protocol(s) %d agent(s)\n", rev->num_protocols,
+ rev->num_agents);
+
+ for (id = 0; id < rev->num_agents; id++) {
+ scmi_base_discover_agent_get(ph, id, name);
+ dev_dbg(dev, "Agent %d: %s\n", id, name);
+ }
+
+ return 0;
+}
+
+static const struct scmi_protocol scmi_base = {
+ .id = SCMI_PROTOCOL_BASE,
+ .owner = NULL,
+ .instance_init = &scmi_base_protocol_init,
+ .ops = NULL,
+ .events = &base_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(base, scmi_base)
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
new file mode 100644
index 0000000000..c15928b8c5
--- /dev/null
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol bus layer
+ *
+ * Copyright (C) 2018-2021 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/atomic.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include "common.h"
+
+BLOCKING_NOTIFIER_HEAD(scmi_requested_devices_nh);
+EXPORT_SYMBOL_GPL(scmi_requested_devices_nh);
+
+static DEFINE_IDA(scmi_bus_id);
+
+static DEFINE_IDR(scmi_requested_devices);
+/* Protect access to scmi_requested_devices */
+static DEFINE_MUTEX(scmi_requested_devices_mtx);
+
+struct scmi_requested_dev {
+ const struct scmi_device_id *id_table;
+ struct list_head node;
+};
+
+/* Track globally the creation of SCMI SystemPower related devices */
+static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
+
+/**
+ * scmi_protocol_device_request - Helper to request a device
+ *
+ * @id_table: A protocol/name pair descriptor for the device to be created.
+ *
+ * This helper let an SCMI driver request specific devices identified by the
+ * @id_table to be created for each active SCMI instance.
+ *
+ * The requested device name MUST NOT be already existent for any protocol;
+ * at first the freshly requested @id_table is annotated in the IDR table
+ * @scmi_requested_devices and then the requested device is advertised to any
+ * registered party via the @scmi_requested_devices_nh notification chain.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
+{
+ int ret = 0;
+ unsigned int id = 0;
+ struct list_head *head, *phead = NULL;
+ struct scmi_requested_dev *rdev;
+
+ pr_debug("Requesting SCMI device (%s) for protocol %x\n",
+ id_table->name, id_table->protocol_id);
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) &&
+ !IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX)) {
+ pr_warn("SCMI Raw mode active. Rejecting '%s'/0x%02X\n",
+ id_table->name, id_table->protocol_id);
+ return -EINVAL;
+ }
+
+ /*
+ * Search for the matching protocol rdev list and then search
+ * of any existent equally named device...fails if any duplicate found.
+ */
+ mutex_lock(&scmi_requested_devices_mtx);
+ idr_for_each_entry(&scmi_requested_devices, head, id) {
+ if (!phead) {
+ /* A list found registered in the IDR is never empty */
+ rdev = list_first_entry(head, struct scmi_requested_dev,
+ node);
+ if (rdev->id_table->protocol_id ==
+ id_table->protocol_id)
+ phead = head;
+ }
+ list_for_each_entry(rdev, head, node) {
+ if (!strcmp(rdev->id_table->name, id_table->name)) {
+ pr_err("Ignoring duplicate request [%d] %s\n",
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ /*
+ * No duplicate found for requested id_table, so let's create a new
+ * requested device entry for this new valid request.
+ */
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
+ if (!rdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rdev->id_table = id_table;
+
+ /*
+ * Append the new requested device table descriptor to the head of the
+ * related protocol list, eventually creating such head if not already
+ * there.
+ */
+ if (!phead) {
+ phead = kzalloc(sizeof(*phead), GFP_KERNEL);
+ if (!phead) {
+ kfree(rdev);
+ ret = -ENOMEM;
+ goto out;
+ }
+ INIT_LIST_HEAD(phead);
+
+ ret = idr_alloc(&scmi_requested_devices, (void *)phead,
+ id_table->protocol_id,
+ id_table->protocol_id + 1, GFP_KERNEL);
+ if (ret != id_table->protocol_id) {
+ pr_err("Failed to save SCMI device - ret:%d\n", ret);
+ kfree(rdev);
+ kfree(phead);
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = 0;
+ }
+ list_add(&rdev->node, phead);
+
+out:
+ mutex_unlock(&scmi_requested_devices_mtx);
+
+ if (!ret)
+ blocking_notifier_call_chain(&scmi_requested_devices_nh,
+ SCMI_BUS_NOTIFY_DEVICE_REQUEST,
+ (void *)rdev->id_table);
+
+ return ret;
+}
+
+/**
+ * scmi_protocol_device_unrequest - Helper to unrequest a device
+ *
+ * @id_table: A protocol/name pair descriptor for the device to be unrequested.
+ *
+ * The unrequested device, described by the provided id_table, is at first
+ * removed from the IDR @scmi_requested_devices and then the removal is
+ * advertised to any registered party via the @scmi_requested_devices_nh
+ * notification chain.
+ */
+static void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
+{
+ struct list_head *phead;
+
+ pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
+ id_table->name, id_table->protocol_id);
+
+ mutex_lock(&scmi_requested_devices_mtx);
+ phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
+ if (phead) {
+ struct scmi_requested_dev *victim, *tmp;
+
+ list_for_each_entry_safe(victim, tmp, phead, node) {
+ if (!strcmp(victim->id_table->name, id_table->name)) {
+ list_del(&victim->node);
+
+ mutex_unlock(&scmi_requested_devices_mtx);
+ blocking_notifier_call_chain(&scmi_requested_devices_nh,
+ SCMI_BUS_NOTIFY_DEVICE_UNREQUEST,
+ (void *)victim->id_table);
+ kfree(victim);
+ mutex_lock(&scmi_requested_devices_mtx);
+ break;
+ }
+ }
+
+ if (list_empty(phead)) {
+ idr_remove(&scmi_requested_devices,
+ id_table->protocol_id);
+ kfree(phead);
+ }
+ }
+ mutex_unlock(&scmi_requested_devices_mtx);
+}
+
+static const struct scmi_device_id *
+scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
+{
+ const struct scmi_device_id *id = scmi_drv->id_table;
+
+ if (!id)
+ return NULL;
+
+ for (; id->protocol_id; id++)
+ if (id->protocol_id == scmi_dev->protocol_id) {
+ if (!id->name)
+ return id;
+ else if (!strcmp(id->name, scmi_dev->name))
+ return id;
+ }
+
+ return NULL;
+}
+
+static int scmi_dev_match(struct device *dev, struct device_driver *drv)
+{
+ struct scmi_driver *scmi_drv = to_scmi_driver(drv);
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+ const struct scmi_device_id *id;
+
+ id = scmi_dev_match_id(scmi_dev, scmi_drv);
+ if (id)
+ return 1;
+
+ return 0;
+}
+
+static int scmi_match_by_id_table(struct device *dev, void *data)
+{
+ struct scmi_device *sdev = to_scmi_dev(dev);
+ struct scmi_device_id *id_table = data;
+
+ return sdev->protocol_id == id_table->protocol_id &&
+ (id_table->name && !strcmp(sdev->name, id_table->name));
+}
+
+static struct scmi_device *scmi_child_dev_find(struct device *parent,
+ int prot_id, const char *name)
+{
+ struct scmi_device_id id_table;
+ struct device *dev;
+
+ id_table.protocol_id = prot_id;
+ id_table.name = name;
+
+ dev = device_find_child(parent, &id_table, scmi_match_by_id_table);
+ if (!dev)
+ return NULL;
+
+ return to_scmi_dev(dev);
+}
+
+static int scmi_dev_probe(struct device *dev)
+{
+ struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ if (!scmi_dev->handle)
+ return -EPROBE_DEFER;
+
+ return scmi_drv->probe(scmi_dev);
+}
+
+static void scmi_dev_remove(struct device *dev)
+{
+ struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ if (scmi_drv->remove)
+ scmi_drv->remove(scmi_dev);
+}
+
+struct bus_type scmi_bus_type = {
+ .name = "scmi_protocol",
+ .match = scmi_dev_match,
+ .probe = scmi_dev_probe,
+ .remove = scmi_dev_remove,
+};
+EXPORT_SYMBOL_GPL(scmi_bus_type);
+
+int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
+ const char *mod_name)
+{
+ int retval;
+
+ if (!driver->probe)
+ return -EINVAL;
+
+ retval = scmi_protocol_device_request(driver->id_table);
+ if (retval)
+ return retval;
+
+ driver->driver.bus = &scmi_bus_type;
+ driver->driver.name = driver->name;
+ driver->driver.owner = owner;
+ driver->driver.mod_name = mod_name;
+
+ retval = driver_register(&driver->driver);
+ if (!retval)
+ pr_debug("Registered new scmi driver %s\n", driver->name);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(scmi_driver_register);
+
+void scmi_driver_unregister(struct scmi_driver *driver)
+{
+ driver_unregister(&driver->driver);
+ scmi_protocol_device_unrequest(driver->id_table);
+}
+EXPORT_SYMBOL_GPL(scmi_driver_unregister);
+
+static void scmi_device_release(struct device *dev)
+{
+ kfree(to_scmi_dev(dev));
+}
+
+static void __scmi_device_destroy(struct scmi_device *scmi_dev)
+{
+ pr_debug("(%s) Destroying SCMI device '%s' for protocol 0x%x (%s)\n",
+ of_node_full_name(scmi_dev->dev.parent->of_node),
+ dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
+ scmi_dev->name);
+
+ if (scmi_dev->protocol_id == SCMI_PROTOCOL_SYSTEM)
+ atomic_set(&scmi_syspower_registered, 0);
+
+ kfree_const(scmi_dev->name);
+ ida_free(&scmi_bus_id, scmi_dev->id);
+ device_unregister(&scmi_dev->dev);
+}
+
+static struct scmi_device *
+__scmi_device_create(struct device_node *np, struct device *parent,
+ int protocol, const char *name)
+{
+ int id, retval;
+ struct scmi_device *scmi_dev;
+
+ /*
+ * If the same protocol/name device already exist under the same parent
+ * (i.e. SCMI instance) just return the existent device.
+ * This avoids any race between the SCMI driver, creating devices for
+ * each DT defined protocol at probe time, and the concurrent
+ * registration of SCMI drivers.
+ */
+ scmi_dev = scmi_child_dev_find(parent, protocol, name);
+ if (scmi_dev)
+ return scmi_dev;
+
+ /*
+ * Ignore any possible subsequent failures while creating the device
+ * since we are doomed anyway at that point; not using a mutex which
+ * spans across this whole function to keep things simple and to avoid
+ * to serialize all the __scmi_device_create calls across possibly
+ * different SCMI server instances (parent)
+ */
+ if (protocol == SCMI_PROTOCOL_SYSTEM &&
+ atomic_cmpxchg(&scmi_syspower_registered, 0, 1)) {
+ dev_warn(parent,
+ "SCMI SystemPower protocol device must be unique !\n");
+ return NULL;
+ }
+
+ scmi_dev = kzalloc(sizeof(*scmi_dev), GFP_KERNEL);
+ if (!scmi_dev)
+ return NULL;
+
+ scmi_dev->name = kstrdup_const(name ?: "unknown", GFP_KERNEL);
+ if (!scmi_dev->name) {
+ kfree(scmi_dev);
+ return NULL;
+ }
+
+ id = ida_alloc_min(&scmi_bus_id, 1, GFP_KERNEL);
+ if (id < 0) {
+ kfree_const(scmi_dev->name);
+ kfree(scmi_dev);
+ return NULL;
+ }
+
+ scmi_dev->id = id;
+ scmi_dev->protocol_id = protocol;
+ scmi_dev->dev.parent = parent;
+ device_set_node(&scmi_dev->dev, of_fwnode_handle(np));
+ scmi_dev->dev.bus = &scmi_bus_type;
+ scmi_dev->dev.release = scmi_device_release;
+ dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
+
+ retval = device_register(&scmi_dev->dev);
+ if (retval)
+ goto put_dev;
+
+ pr_debug("(%s) Created SCMI device '%s' for protocol 0x%x (%s)\n",
+ of_node_full_name(parent->of_node),
+ dev_name(&scmi_dev->dev), protocol, name);
+
+ return scmi_dev;
+put_dev:
+ kfree_const(scmi_dev->name);
+ put_device(&scmi_dev->dev);
+ ida_free(&scmi_bus_id, id);
+ return NULL;
+}
+
+/**
+ * scmi_device_create - A method to create one or more SCMI devices
+ *
+ * @np: A reference to the device node to use for the new device(s)
+ * @parent: The parent device to use identifying a specific SCMI instance
+ * @protocol: The SCMI protocol to be associated with this device
+ * @name: The requested-name of the device to be created; this is optional
+ * and if no @name is provided, all the devices currently known to
+ * be requested on the SCMI bus for @protocol will be created.
+ *
+ * This method can be invoked to create a single well-defined device (like
+ * a transport device or a device requested by an SCMI driver loaded after
+ * the core SCMI stack has been probed), or to create all the devices currently
+ * known to have been requested by the loaded SCMI drivers for a specific
+ * protocol (typically during SCMI core protocol enumeration at probe time).
+ *
+ * Return: The created device (or one of them if @name was NOT provided and
+ * multiple devices were created) or NULL if no device was created;
+ * note that NULL indicates an error ONLY in case a specific @name
+ * was provided: when @name param was not provided, a number of devices
+ * could have been potentially created for a whole protocol, unless no
+ * device was found to have been requested for that specific protocol.
+ */
+struct scmi_device *scmi_device_create(struct device_node *np,
+ struct device *parent, int protocol,
+ const char *name)
+{
+ struct list_head *phead;
+ struct scmi_requested_dev *rdev;
+ struct scmi_device *scmi_dev = NULL;
+
+ if (name)
+ return __scmi_device_create(np, parent, protocol, name);
+
+ mutex_lock(&scmi_requested_devices_mtx);
+ phead = idr_find(&scmi_requested_devices, protocol);
+ /* Nothing to do. */
+ if (!phead) {
+ mutex_unlock(&scmi_requested_devices_mtx);
+ return NULL;
+ }
+
+ /* Walk the list of requested devices for protocol and create them */
+ list_for_each_entry(rdev, phead, node) {
+ struct scmi_device *sdev;
+
+ sdev = __scmi_device_create(np, parent,
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
+ /* Report errors and carry on... */
+ if (sdev)
+ scmi_dev = sdev;
+ else
+ pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
+ of_node_full_name(parent->of_node),
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
+ }
+ mutex_unlock(&scmi_requested_devices_mtx);
+
+ return scmi_dev;
+}
+EXPORT_SYMBOL_GPL(scmi_device_create);
+
+void scmi_device_destroy(struct device *parent, int protocol, const char *name)
+{
+ struct scmi_device *scmi_dev;
+
+ scmi_dev = scmi_child_dev_find(parent, protocol, name);
+ if (scmi_dev)
+ __scmi_device_destroy(scmi_dev);
+}
+EXPORT_SYMBOL_GPL(scmi_device_destroy);
+
+static int __scmi_devices_unregister(struct device *dev, void *data)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ __scmi_device_destroy(scmi_dev);
+ return 0;
+}
+
+static void scmi_devices_unregister(void)
+{
+ bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister);
+}
+
+static int __init scmi_bus_init(void)
+{
+ int retval;
+
+ retval = bus_register(&scmi_bus_type);
+ if (retval)
+ pr_err("SCMI protocol bus register failed (%d)\n", retval);
+
+ pr_info("SCMI protocol bus registered\n");
+
+ return retval;
+}
+subsys_initcall(scmi_bus_init);
+
+static void __exit scmi_bus_exit(void)
+{
+ /*
+ * Destroy all remaining devices: just in case the drivers were
+ * manually unbound and at first and then the modules unloaded.
+ */
+ scmi_devices_unregister();
+ bus_unregister(&scmi_bus_type);
+ ida_destroy(&scmi_bus_id);
+}
+module_exit(scmi_bus_exit);
+
+MODULE_ALIAS("scmi-core");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI protocol bus");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
new file mode 100644
index 0000000000..96060bf90a
--- /dev/null
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -0,0 +1,619 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Clock Protocol
+ *
+ * Copyright (C) 2018-2022 ARM Ltd.
+ */
+
+#include <linux/module.h>
+#include <linux/limits.h>
+#include <linux/sort.h>
+
+#include "protocols.h"
+#include "notify.h"
+
+enum scmi_clock_protocol_cmd {
+ CLOCK_ATTRIBUTES = 0x3,
+ CLOCK_DESCRIBE_RATES = 0x4,
+ CLOCK_RATE_SET = 0x5,
+ CLOCK_RATE_GET = 0x6,
+ CLOCK_CONFIG_SET = 0x7,
+ CLOCK_NAME_GET = 0x8,
+ CLOCK_RATE_NOTIFY = 0x9,
+ CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA,
+};
+
+struct scmi_msg_resp_clock_protocol_attributes {
+ __le16 num_clocks;
+ u8 max_async_req;
+ u8 reserved;
+};
+
+struct scmi_msg_resp_clock_attributes {
+ __le32 attributes;
+#define CLOCK_ENABLE BIT(0)
+#define SUPPORTS_RATE_CHANGED_NOTIF(x) ((x) & BIT(31))
+#define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30))
+#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29))
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
+ __le32 clock_enable_latency;
+};
+
+struct scmi_clock_set_config {
+ __le32 id;
+ __le32 attributes;
+};
+
+struct scmi_msg_clock_describe_rates {
+ __le32 id;
+ __le32 rate_index;
+};
+
+struct scmi_msg_resp_clock_describe_rates {
+ __le32 num_rates_flags;
+#define NUM_RETURNED(x) ((x) & 0xfff)
+#define RATE_DISCRETE(x) !((x) & BIT(12))
+#define NUM_REMAINING(x) ((x) >> 16)
+ struct {
+ __le32 value_low;
+ __le32 value_high;
+ } rate[];
+#define RATE_TO_U64(X) \
+({ \
+ typeof(X) x = (X); \
+ le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
+})
+};
+
+struct scmi_clock_set_rate {
+ __le32 flags;
+#define CLOCK_SET_ASYNC BIT(0)
+#define CLOCK_SET_IGNORE_RESP BIT(1)
+#define CLOCK_SET_ROUND_UP BIT(2)
+#define CLOCK_SET_ROUND_AUTO BIT(3)
+ __le32 id;
+ __le32 value_low;
+ __le32 value_high;
+};
+
+struct scmi_msg_resp_set_rate_complete {
+ __le32 id;
+ __le32 rate_low;
+ __le32 rate_high;
+};
+
+struct scmi_msg_clock_rate_notify {
+ __le32 clk_id;
+ __le32 notify_enable;
+};
+
+struct scmi_clock_rate_notify_payld {
+ __le32 agent_id;
+ __le32 clock_id;
+ __le32 rate_low;
+ __le32 rate_high;
+};
+
+struct clock_info {
+ u32 version;
+ int num_clocks;
+ int max_async_req;
+ atomic_t cur_async_req;
+ struct scmi_clock_info *clk;
+};
+
+static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
+ CLOCK_RATE_NOTIFY,
+ CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
+};
+
+static int
+scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct clock_info *ci)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_clock_protocol_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ ci->num_clocks = le16_to_cpu(attr->num_clocks);
+ ci->max_async_req = attr->max_async_req;
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 clk_id, struct scmi_clock_info *clk,
+ u32 version)
+{
+ int ret;
+ u32 attributes;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_clock_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
+ sizeof(clk_id), sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(clk_id, t->tx.buf);
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ u32 latency = 0;
+ attributes = le32_to_cpu(attr->attributes);
+ strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
+ /* clock_enable_latency field is present only since SCMI v3.1 */
+ if (PROTOCOL_REV_MAJOR(version) >= 0x2)
+ latency = le32_to_cpu(attr->clock_enable_latency);
+ clk->enable_latency = latency ? : U32_MAX;
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) {
+ if (SUPPORTS_EXTENDED_NAMES(attributes))
+ ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id,
+ clk->name,
+ SCMI_MAX_STR_SIZE);
+
+ if (SUPPORTS_RATE_CHANGED_NOTIF(attributes))
+ clk->rate_changed_notifications = true;
+ if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
+ clk->rate_change_requested_notifications = true;
+ }
+
+ return ret;
+}
+
+static int rate_cmp_func(const void *_r1, const void *_r2)
+{
+ const u64 *r1 = _r1, *r2 = _r2;
+
+ if (*r1 < *r2)
+ return -1;
+ else if (*r1 == *r2)
+ return 0;
+ else
+ return 1;
+}
+
+struct scmi_clk_ipriv {
+ struct device *dev;
+ u32 clk_id;
+ struct scmi_clock_info *clk;
+};
+
+static void iter_clk_describe_prepare_message(void *message,
+ const unsigned int desc_index,
+ const void *priv)
+{
+ struct scmi_msg_clock_describe_rates *msg = message;
+ const struct scmi_clk_ipriv *p = priv;
+
+ msg->id = cpu_to_le32(p->clk_id);
+ /* Set the number of rates to be skipped/already read */
+ msg->rate_index = cpu_to_le32(desc_index);
+}
+
+static int
+iter_clk_describe_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ u32 flags;
+ struct scmi_clk_ipriv *p = priv;
+ const struct scmi_msg_resp_clock_describe_rates *r = response;
+
+ flags = le32_to_cpu(r->num_rates_flags);
+ st->num_remaining = NUM_REMAINING(flags);
+ st->num_returned = NUM_RETURNED(flags);
+ p->clk->rate_discrete = RATE_DISCRETE(flags);
+
+ /* Warn about out of spec replies ... */
+ if (!p->clk->rate_discrete &&
+ (st->num_returned != 3 || st->num_remaining != 0)) {
+ dev_warn(p->dev,
+ "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n",
+ p->clk->name, st->num_returned, st->num_remaining,
+ st->rx_len);
+
+ /*
+ * A known quirk: a triplet is returned but num_returned != 3
+ * Check for a safe payload size and fix.
+ */
+ if (st->num_returned != 3 && st->num_remaining == 0 &&
+ st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
+ st->num_returned = 3;
+ st->num_remaining = 0;
+ } else {
+ dev_err(p->dev,
+ "Cannot fix out-of-spec reply !\n");
+ return -EPROTO;
+ }
+ }
+
+ return 0;
+}
+
+static int
+iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv)
+{
+ int ret = 0;
+ struct scmi_clk_ipriv *p = priv;
+ const struct scmi_msg_resp_clock_describe_rates *r = response;
+
+ if (!p->clk->rate_discrete) {
+ switch (st->desc_index + st->loop_idx) {
+ case 0:
+ p->clk->range.min_rate = RATE_TO_U64(r->rate[0]);
+ break;
+ case 1:
+ p->clk->range.max_rate = RATE_TO_U64(r->rate[1]);
+ break;
+ case 2:
+ p->clk->range.step_size = RATE_TO_U64(r->rate[2]);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ } else {
+ u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx];
+
+ *rate = RATE_TO_U64(r->rate[st->loop_idx]);
+ p->clk->list.num_rates++;
+ }
+
+ return ret;
+}
+
+static int
+scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
+ struct scmi_clock_info *clk)
+{
+ int ret;
+ void *iter;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_clk_describe_prepare_message,
+ .update_state = iter_clk_describe_update_state,
+ .process_response = iter_clk_describe_process_response,
+ };
+ struct scmi_clk_ipriv cpriv = {
+ .clk_id = clk_id,
+ .clk = clk,
+ .dev = ph->dev,
+ };
+
+ iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
+ CLOCK_DESCRIBE_RATES,
+ sizeof(struct scmi_msg_clock_describe_rates),
+ &cpriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ ret = ph->hops->iter_response_run(iter);
+ if (ret)
+ return ret;
+
+ if (!clk->rate_discrete) {
+ dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
+ clk->range.min_rate, clk->range.max_rate,
+ clk->range.step_size);
+ } else if (clk->list.num_rates) {
+ sort(clk->list.rates, clk->list.num_rates,
+ sizeof(clk->list.rates[0]), rate_cmp_func, NULL);
+ }
+
+ return ret;
+}
+
+static int
+scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
+ u32 clk_id, u64 *value)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
+ sizeof(__le32), sizeof(u64), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(clk_id, t->tx.buf);
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *value = get_unaligned_le64(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
+ u32 clk_id, u64 rate)
+{
+ int ret;
+ u32 flags = 0;
+ struct scmi_xfer *t;
+ struct scmi_clock_set_rate *cfg;
+ struct clock_info *ci = ph->get_priv(ph);
+
+ ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ if (ci->max_async_req &&
+ atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
+ flags |= CLOCK_SET_ASYNC;
+
+ cfg = t->tx.buf;
+ cfg->flags = cpu_to_le32(flags);
+ cfg->id = cpu_to_le32(clk_id);
+ cfg->value_low = cpu_to_le32(rate & 0xffffffff);
+ cfg->value_high = cpu_to_le32(rate >> 32);
+
+ if (flags & CLOCK_SET_ASYNC) {
+ ret = ph->xops->do_xfer_with_response(ph, t);
+ if (!ret) {
+ struct scmi_msg_resp_set_rate_complete *resp;
+
+ resp = t->rx.buf;
+ if (le32_to_cpu(resp->id) == clk_id)
+ dev_dbg(ph->dev,
+ "Clk ID %d set async to %llu\n", clk_id,
+ get_unaligned_le64(&resp->rate_low));
+ else
+ ret = -EPROTO;
+ }
+ } else {
+ ret = ph->xops->do_xfer(ph, t);
+ }
+
+ if (ci->max_async_req)
+ atomic_dec(&ci->cur_async_req);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int
+scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
+ u32 config, bool atomic)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_clock_set_config *cfg;
+
+ ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
+ sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ t->hdr.poll_completion = atomic;
+
+ cfg = t->tx.buf;
+ cfg->id = cpu_to_le32(clk_id);
+ cfg->attributes = cpu_to_le32(config);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
+{
+ return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false);
+}
+
+static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
+{
+ return scmi_clock_config_set(ph, clk_id, 0, false);
+}
+
+static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph,
+ u32 clk_id)
+{
+ return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true);
+}
+
+static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph,
+ u32 clk_id)
+{
+ return scmi_clock_config_set(ph, clk_id, 0, true);
+}
+
+static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
+{
+ struct clock_info *ci = ph->get_priv(ph);
+
+ return ci->num_clocks;
+}
+
+static const struct scmi_clock_info *
+scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
+{
+ struct scmi_clock_info *clk;
+ struct clock_info *ci = ph->get_priv(ph);
+
+ if (clk_id >= ci->num_clocks)
+ return NULL;
+
+ clk = ci->clk + clk_id;
+ if (!clk->name[0])
+ return NULL;
+
+ return clk;
+}
+
+static const struct scmi_clk_proto_ops clk_proto_ops = {
+ .count_get = scmi_clock_count_get,
+ .info_get = scmi_clock_info_get,
+ .rate_get = scmi_clock_rate_get,
+ .rate_set = scmi_clock_rate_set,
+ .enable = scmi_clock_enable,
+ .disable = scmi_clock_disable,
+ .enable_atomic = scmi_clock_enable_atomic,
+ .disable_atomic = scmi_clock_disable_atomic,
+};
+
+static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
+ u32 clk_id, int message_id, bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_clock_rate_notify *notify;
+
+ ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->clk_id = cpu_to_le32(clk_id);
+ notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret, cmd_id;
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd))
+ return -EINVAL;
+
+ cmd_id = evt_2_cmd[evt_id];
+ ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_clock_rate_notify_payld *p = payld;
+ struct scmi_clock_rate_notif_report *r = report;
+
+ if (sizeof(*p) != payld_sz ||
+ (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED &&
+ evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED))
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->clock_id = le32_to_cpu(p->clock_id);
+ r->rate = get_unaligned_le64(&p->rate_low);
+ *src_id = r->clock_id;
+
+ return r;
+}
+
+static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct clock_info *ci = ph->get_priv(ph);
+
+ if (!ci)
+ return -EINVAL;
+
+ return ci->num_clocks;
+}
+
+static const struct scmi_event clk_events[] = {
+ {
+ .id = SCMI_EVENT_CLOCK_RATE_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
+ .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
+ },
+ {
+ .id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED,
+ .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
+ .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
+ },
+};
+
+static const struct scmi_event_ops clk_event_ops = {
+ .get_num_sources = scmi_clk_get_num_sources,
+ .set_notify_enabled = scmi_clk_set_notify_enabled,
+ .fill_custom_report = scmi_clk_fill_custom_report,
+};
+
+static const struct scmi_protocol_events clk_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &clk_event_ops,
+ .evts = clk_events,
+ .num_events = ARRAY_SIZE(clk_events),
+};
+
+static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ u32 version;
+ int clkid, ret;
+ struct clock_info *cinfo;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_dbg(ph->dev, "Clock Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
+ if (!cinfo)
+ return -ENOMEM;
+
+ ret = scmi_clock_protocol_attributes_get(ph, cinfo);
+ if (ret)
+ return ret;
+
+ cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
+ sizeof(*cinfo->clk), GFP_KERNEL);
+ if (!cinfo->clk)
+ return -ENOMEM;
+
+ for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
+ struct scmi_clock_info *clk = cinfo->clk + clkid;
+
+ ret = scmi_clock_attributes_get(ph, clkid, clk, version);
+ if (!ret)
+ scmi_clock_describe_rates_get(ph, clkid, clk);
+ }
+
+ cinfo->version = version;
+ return ph->set_priv(ph, cinfo);
+}
+
+static const struct scmi_protocol scmi_clock = {
+ .id = SCMI_PROTOCOL_CLOCK,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_clock_protocol_init,
+ .ops = &clk_proto_ops,
+ .events = &clk_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
new file mode 100644
index 0000000000..00b165d1f5
--- /dev/null
+++ b/drivers/firmware/arm_scmi/common.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * driver common header file containing some definitions, structures
+ * and function prototypes used in all the different SCMI protocols.
+ *
+ * Copyright (C) 2018-2022 ARM Ltd.
+ */
+#ifndef _SCMI_COMMON_H
+#define _SCMI_COMMON_H
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/hashtable.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/refcount.h>
+#include <linux/scmi_protocol.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
+
+#include "protocols.h"
+#include "notify.h"
+
+#define SCMI_MAX_CHANNELS 256
+
+#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
+
+enum scmi_error_codes {
+ SCMI_SUCCESS = 0, /* Success */
+ SCMI_ERR_SUPPORT = -1, /* Not supported */
+ SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
+ SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
+ SCMI_ERR_ENTRY = -4, /* Not found */
+ SCMI_ERR_RANGE = -5, /* Value out of range */
+ SCMI_ERR_BUSY = -6, /* Device busy */
+ SCMI_ERR_COMMS = -7, /* Communication Error */
+ SCMI_ERR_GENERIC = -8, /* Generic Error */
+ SCMI_ERR_HARDWARE = -9, /* Hardware Error */
+ SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
+};
+
+static const int scmi_linux_errmap[] = {
+ /* better than switch case as long as return value is continuous */
+ 0, /* SCMI_SUCCESS */
+ -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
+ -EINVAL, /* SCMI_ERR_PARAM */
+ -EACCES, /* SCMI_ERR_ACCESS */
+ -ENOENT, /* SCMI_ERR_ENTRY */
+ -ERANGE, /* SCMI_ERR_RANGE */
+ -EBUSY, /* SCMI_ERR_BUSY */
+ -ECOMM, /* SCMI_ERR_COMMS */
+ -EIO, /* SCMI_ERR_GENERIC */
+ -EREMOTEIO, /* SCMI_ERR_HARDWARE */
+ -EPROTO, /* SCMI_ERR_PROTOCOL */
+};
+
+static inline int scmi_to_linux_errno(int errno)
+{
+ int err_idx = -errno;
+
+ if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
+ return scmi_linux_errmap[err_idx];
+ return -EIO;
+}
+
+#define MSG_ID_MASK GENMASK(7, 0)
+#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
+#define MSG_TYPE_MASK GENMASK(9, 8)
+#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
+#define MSG_TYPE_COMMAND 0
+#define MSG_TYPE_DELAYED_RESP 2
+#define MSG_TYPE_NOTIFICATION 3
+#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
+#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
+#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
+#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
+#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
+
+/*
+ * Size of @pending_xfers hashtable included in @scmi_xfers_info; ideally, in
+ * order to minimize space and collisions, this should equal max_msg, i.e. the
+ * maximum number of in-flight messages on a specific platform, but such value
+ * is only available at runtime while kernel hashtables are statically sized:
+ * pick instead as a fixed static size the maximum number of entries that can
+ * fit the whole table into one 4k page.
+ */
+#define SCMI_PENDING_XFERS_HT_ORDER_SZ 9
+
+/**
+ * pack_scmi_header() - packs and returns 32-bit header
+ *
+ * @hdr: pointer to header containing all the information on message id,
+ * protocol id, sequence id and type.
+ *
+ * Return: 32-bit packed message header to be sent to the platform.
+ */
+static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
+{
+ return FIELD_PREP(MSG_ID_MASK, hdr->id) |
+ FIELD_PREP(MSG_TYPE_MASK, hdr->type) |
+ FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
+ FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
+}
+
+/**
+ * unpack_scmi_header() - unpacks and records message and protocol id
+ *
+ * @msg_hdr: 32-bit packed message header sent from the platform
+ * @hdr: pointer to header to fetch message and protocol id.
+ */
+static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
+{
+ hdr->id = MSG_XTRACT_ID(msg_hdr);
+ hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
+ hdr->type = MSG_XTRACT_TYPE(msg_hdr);
+}
+
+/*
+ * An helper macro to lookup an xfer from the @pending_xfers hashtable
+ * using the message sequence number token as a key.
+ */
+#define XFER_FIND(__ht, __k) \
+({ \
+ typeof(__k) k_ = __k; \
+ struct scmi_xfer *xfer_ = NULL; \
+ \
+ hash_for_each_possible((__ht), xfer_, node, k_) \
+ if (xfer_->hdr.seq == k_) \
+ break; \
+ xfer_; \
+})
+
+struct scmi_revision_info *
+scmi_revision_area_get(const struct scmi_protocol_handle *ph);
+void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
+ u8 *prot_imp);
+
+extern struct bus_type scmi_bus_type;
+
+#define SCMI_BUS_NOTIFY_DEVICE_REQUEST 0
+#define SCMI_BUS_NOTIFY_DEVICE_UNREQUEST 1
+extern struct blocking_notifier_head scmi_requested_devices_nh;
+
+struct scmi_device *scmi_device_create(struct device_node *np,
+ struct device *parent, int protocol,
+ const char *name);
+void scmi_device_destroy(struct device *parent, int protocol, const char *name);
+
+int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id);
+void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
+
+/* SCMI Transport */
+/**
+ * struct scmi_chan_info - Structure representing a SCMI channel information
+ *
+ * @id: An identifier for this channel: this matches the protocol number
+ * used to initialize this channel
+ * @dev: Reference to device in the SCMI hierarchy corresponding to this
+ * channel
+ * @rx_timeout_ms: The configured RX timeout in milliseconds.
+ * @handle: Pointer to SCMI entity handle
+ * @no_completion_irq: Flag to indicate that this channel has no completion
+ * interrupt mechanism for synchronous commands.
+ * This can be dynamically set by transports at run-time
+ * inside their provided .chan_setup().
+ * @transport_info: Transport layer related information
+ */
+struct scmi_chan_info {
+ int id;
+ struct device *dev;
+ unsigned int rx_timeout_ms;
+ struct scmi_handle *handle;
+ bool no_completion_irq;
+ void *transport_info;
+};
+
+/**
+ * struct scmi_transport_ops - Structure representing a SCMI transport ops
+ *
+ * @link_supplier: Optional callback to add link to a supplier device
+ * @chan_available: Callback to check if channel is available or not
+ * @chan_setup: Callback to allocate and setup a channel
+ * @chan_free: Callback to free a channel
+ * @get_max_msg: Optional callback to provide max_msg dynamically
+ * Returns the maximum number of messages for the channel type
+ * (tx or rx) that can be pending simultaneously in the system
+ * @send_message: Callback to send a message
+ * @mark_txdone: Callback to mark tx as done
+ * @fetch_response: Callback to fetch response
+ * @fetch_notification: Callback to fetch notification
+ * @clear_channel: Callback to clear a channel
+ * @poll_done: Callback to poll transfer status
+ */
+struct scmi_transport_ops {
+ int (*link_supplier)(struct device *dev);
+ bool (*chan_available)(struct device_node *of_node, int idx);
+ int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
+ bool tx);
+ int (*chan_free)(int id, void *p, void *data);
+ unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo);
+ int (*send_message)(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer);
+ void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret,
+ struct scmi_xfer *xfer);
+ void (*fetch_response)(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer);
+ void (*fetch_notification)(struct scmi_chan_info *cinfo,
+ size_t max_len, struct scmi_xfer *xfer);
+ void (*clear_channel)(struct scmi_chan_info *cinfo);
+ bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
+};
+
+/**
+ * struct scmi_desc - Description of SoC integration
+ *
+ * @transport_init: An optional function that a transport can provide to
+ * initialize some transport-specific setup during SCMI core
+ * initialization, so ahead of SCMI core probing.
+ * @transport_exit: An optional function that a transport can provide to
+ * de-initialize some transport-specific setup during SCMI core
+ * de-initialization, so after SCMI core removal.
+ * @ops: Pointer to the transport specific ops structure
+ * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
+ * @max_msg: Maximum number of messages for a channel type (tx or rx) that can
+ * be pending simultaneously in the system. May be overridden by the
+ * get_max_msg op.
+ * @max_msg_size: Maximum size of data per message that can be handled.
+ * @force_polling: Flag to force this whole transport to use SCMI core polling
+ * mechanism instead of completion interrupts even if available.
+ * @sync_cmds_completed_on_ret: Flag to indicate that the transport assures
+ * synchronous-command messages are atomically
+ * completed on .send_message: no need to poll
+ * actively waiting for a response.
+ * Used by core internally only when polling is
+ * selected as a waiting for reply method: i.e.
+ * if a completion irq was found use that anyway.
+ * @atomic_enabled: Flag to indicate that this transport, which is assured not
+ * to sleep anywhere on the TX path, can be used in atomic mode
+ * when requested.
+ */
+struct scmi_desc {
+ int (*transport_init)(void);
+ void (*transport_exit)(void);
+ const struct scmi_transport_ops *ops;
+ int max_rx_timeout_ms;
+ int max_msg;
+ int max_msg_size;
+ const bool force_polling;
+ const bool sync_cmds_completed_on_ret;
+ const bool atomic_enabled;
+};
+
+static inline bool is_polling_required(struct scmi_chan_info *cinfo,
+ const struct scmi_desc *desc)
+{
+ return cinfo->no_completion_irq || desc->force_polling;
+}
+
+static inline bool is_transport_polling_capable(const struct scmi_desc *desc)
+{
+ return desc->ops->poll_done || desc->sync_cmds_completed_on_ret;
+}
+
+static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
+ const struct scmi_desc *desc)
+{
+ return is_polling_required(cinfo, desc) &&
+ is_transport_polling_capable(desc);
+}
+
+void scmi_xfer_raw_put(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer);
+struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle);
+struct scmi_chan_info *
+scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id);
+
+int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer);
+
+int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer,
+ unsigned int timeout_ms);
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
+extern const struct scmi_desc scmi_mailbox_desc;
+#endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
+extern const struct scmi_desc scmi_smc_desc;
+#endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
+extern const struct scmi_desc scmi_virtio_desc;
+#endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
+extern const struct scmi_desc scmi_optee_desc;
+#endif
+
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv);
+
+/* shmem related declarations */
+struct scmi_shared_mem;
+
+void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer, struct scmi_chan_info *cinfo);
+u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem);
+void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
+void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+ size_t max_len, struct scmi_xfer *xfer);
+void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
+bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
+bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem);
+
+/* declarations for message passing transports */
+struct scmi_msg_payld;
+
+/* Maximum overhead of message w.r.t. struct scmi_desc.max_msg_size */
+#define SCMI_MSG_MAX_PROT_OVERHEAD (2 * sizeof(__le32))
+
+size_t msg_response_size(struct scmi_xfer *xfer);
+size_t msg_command_size(struct scmi_xfer *xfer);
+void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer);
+u32 msg_read_header(struct scmi_msg_payld *msg);
+void msg_fetch_response(struct scmi_msg_payld *msg, size_t len,
+ struct scmi_xfer *xfer);
+void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len,
+ size_t max_len, struct scmi_xfer *xfer);
+
+void scmi_notification_instance_data_set(const struct scmi_handle *handle,
+ void *priv);
+void *scmi_notification_instance_data_get(const struct scmi_handle *handle);
+#endif /* _SCMI_COMMON_H */
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
new file mode 100644
index 0000000000..87383c0542
--- /dev/null
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -0,0 +1,3054 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol driver
+ *
+ * SCMI Message Protocol is used between the System Control Processor(SCP)
+ * and the Application Processors(AP). The Message Handling Unit(MHU)
+ * provides a mechanism for inter-processor communication between SCP's
+ * Cortex M3 and AP.
+ *
+ * SCP offers control and management of the core/cluster power states,
+ * various power domain DVFS including the core/cluster, certain system
+ * clocks configuration, thermal sensors and many others.
+ *
+ * Copyright (C) 2018-2021 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/hashtable.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/processor.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+
+#include "common.h"
+#include "notify.h"
+
+#include "raw_mode.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/scmi.h>
+
+static DEFINE_IDA(scmi_id);
+
+static DEFINE_IDR(scmi_protocols);
+static DEFINE_SPINLOCK(protocol_lock);
+
+/* List of all SCMI devices active in system */
+static LIST_HEAD(scmi_list);
+/* Protection for the entire list */
+static DEFINE_MUTEX(scmi_list_mutex);
+/* Track the unique id for the transfers for debug & profiling purpose */
+static atomic_t transfer_last_id;
+
+static struct dentry *scmi_top_dentry;
+
+/**
+ * struct scmi_xfers_info - Structure to manage transfer information
+ *
+ * @xfer_alloc_table: Bitmap table for allocated messages.
+ * Index of this bitmap table is also used for message
+ * sequence identifier.
+ * @xfer_lock: Protection for message allocation
+ * @max_msg: Maximum number of messages that can be pending
+ * @free_xfers: A free list for available to use xfers. It is initialized with
+ * a number of xfers equal to the maximum allowed in-flight
+ * messages.
+ * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
+ * currently in-flight messages.
+ */
+struct scmi_xfers_info {
+ unsigned long *xfer_alloc_table;
+ spinlock_t xfer_lock;
+ int max_msg;
+ struct hlist_head free_xfers;
+ DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
+};
+
+/**
+ * struct scmi_protocol_instance - Describe an initialized protocol instance.
+ * @handle: Reference to the SCMI handle associated to this protocol instance.
+ * @proto: A reference to the protocol descriptor.
+ * @gid: A reference for per-protocol devres management.
+ * @users: A refcount to track effective users of this protocol.
+ * @priv: Reference for optional protocol private data.
+ * @ph: An embedded protocol handle that will be passed down to protocol
+ * initialization code to identify this instance.
+ *
+ * Each protocol is initialized independently once for each SCMI platform in
+ * which is defined by DT and implemented by the SCMI server fw.
+ */
+struct scmi_protocol_instance {
+ const struct scmi_handle *handle;
+ const struct scmi_protocol *proto;
+ void *gid;
+ refcount_t users;
+ void *priv;
+ struct scmi_protocol_handle ph;
+};
+
+#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
+
+/**
+ * struct scmi_debug_info - Debug common info
+ * @top_dentry: A reference to the top debugfs dentry
+ * @name: Name of this SCMI instance
+ * @type: Type of this SCMI instance
+ * @is_atomic: Flag to state if the transport of this instance is atomic
+ */
+struct scmi_debug_info {
+ struct dentry *top_dentry;
+ const char *name;
+ const char *type;
+ bool is_atomic;
+};
+
+/**
+ * struct scmi_info - Structure representing a SCMI instance
+ *
+ * @id: A sequence number starting from zero identifying this instance
+ * @dev: Device pointer
+ * @desc: SoC description for this instance
+ * @version: SCMI revision information containing protocol version,
+ * implementation version and (sub-)vendor identification.
+ * @handle: Instance of SCMI handle to send to clients
+ * @tx_minfo: Universal Transmit Message management info
+ * @rx_minfo: Universal Receive Message management info
+ * @tx_idr: IDR object to map protocol id to Tx channel info pointer
+ * @rx_idr: IDR object to map protocol id to Rx channel info pointer
+ * @protocols: IDR for protocols' instance descriptors initialized for
+ * this SCMI instance: populated on protocol's first attempted
+ * usage.
+ * @protocols_mtx: A mutex to protect protocols instances initialization.
+ * @protocols_imp: List of protocols implemented, currently maximum of
+ * scmi_revision_info.num_protocols elements allocated by the
+ * base protocol
+ * @active_protocols: IDR storing device_nodes for protocols actually defined
+ * in the DT and confirmed as implemented by fw.
+ * @atomic_threshold: Optional system wide DT-configured threshold, expressed
+ * in microseconds, for atomic operations.
+ * Only SCMI synchronous commands reported by the platform
+ * to have an execution latency lesser-equal to the threshold
+ * should be considered for atomic mode operation: such
+ * decision is finally left up to the SCMI drivers.
+ * @notify_priv: Pointer to private data structure specific to notifications.
+ * @node: List head
+ * @users: Number of users of this instance
+ * @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
+ * @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
+ * bus
+ * @devreq_mtx: A mutex to serialize device creation for this SCMI instance
+ * @dbg: A pointer to debugfs related data (if any)
+ * @raw: An opaque reference handle used by SCMI Raw mode.
+ */
+struct scmi_info {
+ int id;
+ struct device *dev;
+ const struct scmi_desc *desc;
+ struct scmi_revision_info version;
+ struct scmi_handle handle;
+ struct scmi_xfers_info tx_minfo;
+ struct scmi_xfers_info rx_minfo;
+ struct idr tx_idr;
+ struct idr rx_idr;
+ struct idr protocols;
+ /* Ensure mutual exclusive access to protocols instance array */
+ struct mutex protocols_mtx;
+ u8 *protocols_imp;
+ struct idr active_protocols;
+ unsigned int atomic_threshold;
+ void *notify_priv;
+ struct list_head node;
+ int users;
+ struct notifier_block bus_nb;
+ struct notifier_block dev_req_nb;
+ /* Serialize device creation process for this instance */
+ struct mutex devreq_mtx;
+ struct scmi_debug_info *dbg;
+ void *raw;
+};
+
+#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
+#define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb)
+#define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
+
+static const struct scmi_protocol *scmi_protocol_get(int protocol_id)
+{
+ const struct scmi_protocol *proto;
+
+ proto = idr_find(&scmi_protocols, protocol_id);
+ if (!proto || !try_module_get(proto->owner)) {
+ pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
+ return NULL;
+ }
+
+ pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
+
+ return proto;
+}
+
+static void scmi_protocol_put(int protocol_id)
+{
+ const struct scmi_protocol *proto;
+
+ proto = idr_find(&scmi_protocols, protocol_id);
+ if (proto)
+ module_put(proto->owner);
+}
+
+int scmi_protocol_register(const struct scmi_protocol *proto)
+{
+ int ret;
+
+ if (!proto) {
+ pr_err("invalid protocol\n");
+ return -EINVAL;
+ }
+
+ if (!proto->instance_init) {
+ pr_err("missing init for protocol 0x%x\n", proto->id);
+ return -EINVAL;
+ }
+
+ spin_lock(&protocol_lock);
+ ret = idr_alloc(&scmi_protocols, (void *)proto,
+ proto->id, proto->id + 1, GFP_ATOMIC);
+ spin_unlock(&protocol_lock);
+ if (ret != proto->id) {
+ pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
+ proto->id, ret);
+ return ret;
+ }
+
+ pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_register);
+
+void scmi_protocol_unregister(const struct scmi_protocol *proto)
+{
+ spin_lock(&protocol_lock);
+ idr_remove(&scmi_protocols, proto->id);
+ spin_unlock(&protocol_lock);
+
+ pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
+
+/**
+ * scmi_create_protocol_devices - Create devices for all pending requests for
+ * this SCMI instance.
+ *
+ * @np: The device node describing the protocol
+ * @info: The SCMI instance descriptor
+ * @prot_id: The protocol ID
+ * @name: The optional name of the device to be created: if not provided this
+ * call will lead to the creation of all the devices currently requested
+ * for the specified protocol.
+ */
+static void scmi_create_protocol_devices(struct device_node *np,
+ struct scmi_info *info,
+ int prot_id, const char *name)
+{
+ struct scmi_device *sdev;
+
+ mutex_lock(&info->devreq_mtx);
+ sdev = scmi_device_create(np, info->dev, prot_id, name);
+ if (name && !sdev)
+ dev_err(info->dev,
+ "failed to create device for protocol 0x%X (%s)\n",
+ prot_id, name);
+ mutex_unlock(&info->devreq_mtx);
+}
+
+static void scmi_destroy_protocol_devices(struct scmi_info *info,
+ int prot_id, const char *name)
+{
+ mutex_lock(&info->devreq_mtx);
+ scmi_device_destroy(info->dev, prot_id, name);
+ mutex_unlock(&info->devreq_mtx);
+}
+
+void scmi_notification_instance_data_set(const struct scmi_handle *handle,
+ void *priv)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ info->notify_priv = priv;
+ /* Ensure updated protocol private date are visible */
+ smp_wmb();
+}
+
+void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ /* Ensure protocols_private_data has been updated */
+ smp_rmb();
+ return info->notify_priv;
+}
+
+/**
+ * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
+ *
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ * @xfer: The xfer to act upon
+ *
+ * Pick the next unused monotonically increasing token and set it into
+ * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
+ * reuse of freshly completed or timed-out xfers, thus mitigating the risk
+ * of incorrect association of a late and expired xfer with a live in-flight
+ * transaction, both happening to re-use the same token identifier.
+ *
+ * Since platform is NOT required to answer our request in-order we should
+ * account for a few rare but possible scenarios:
+ *
+ * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
+ * using find_next_zero_bit() starting from candidate next_token bit
+ *
+ * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
+ * are plenty of free tokens at start, so try a second pass using
+ * find_next_zero_bit() and starting from 0.
+ *
+ * X = used in-flight
+ *
+ * Normal
+ * ------
+ *
+ * |- xfer_id picked
+ * -----------+----------------------------------------------------------
+ * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
+ * ----------------------------------------------------------------------
+ * ^
+ * |- next_token
+ *
+ * Out-of-order pending at start
+ * -----------------------------
+ *
+ * |- xfer_id picked, last_token fixed
+ * -----+----------------------------------------------------------------
+ * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
+ * ----------------------------------------------------------------------
+ * ^
+ * |- next_token
+ *
+ *
+ * Out-of-order pending at end
+ * ---------------------------
+ *
+ * |- xfer_id picked, last_token fixed
+ * -----+----------------------------------------------------------------
+ * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
+ * ----------------------------------------------------------------------
+ * ^
+ * |- next_token
+ *
+ * Context: Assumes to be called with @xfer_lock already acquired.
+ *
+ * Return: 0 on Success or error
+ */
+static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
+ struct scmi_xfer *xfer)
+{
+ unsigned long xfer_id, next_token;
+
+ /*
+ * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
+ * using the pre-allocated transfer_id as a base.
+ * Note that the global transfer_id is shared across all message types
+ * so there could be holes in the allocated set of monotonic sequence
+ * numbers, but that is going to limit the effectiveness of the
+ * mitigation only in very rare limit conditions.
+ */
+ next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
+
+ /* Pick the next available xfer_id >= next_token */
+ xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
+ MSG_TOKEN_MAX, next_token);
+ if (xfer_id == MSG_TOKEN_MAX) {
+ /*
+ * After heavily out-of-order responses, there are no free
+ * tokens ahead, but only at start of xfer_alloc_table so
+ * try again from the beginning.
+ */
+ xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
+ MSG_TOKEN_MAX, 0);
+ /*
+ * Something is wrong if we got here since there can be a
+ * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
+ * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
+ */
+ if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
+ return -ENOMEM;
+ }
+
+ /* Update +/- last_token accordingly if we skipped some hole */
+ if (xfer_id != next_token)
+ atomic_add((int)(xfer_id - next_token), &transfer_last_id);
+
+ xfer->hdr.seq = (u16)xfer_id;
+
+ return 0;
+}
+
+/**
+ * scmi_xfer_token_clear - Release the token
+ *
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ * @xfer: The xfer to act upon
+ */
+static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
+ struct scmi_xfer *xfer)
+{
+ clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
+}
+
+/**
+ * scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
+ *
+ * @xfer: The xfer to register
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Note that this helper assumes that the xfer to be registered as in-flight
+ * had been built using an xfer sequence number which still corresponds to a
+ * free slot in the xfer_alloc_table.
+ *
+ * Context: Assumes to be called with @xfer_lock already acquired.
+ */
+static inline void
+scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
+ struct scmi_xfers_info *minfo)
+{
+ /* Set in-flight */
+ set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
+ hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
+ xfer->pending = true;
+}
+
+/**
+ * scmi_xfer_inflight_register - Try to register an xfer as in-flight
+ *
+ * @xfer: The xfer to register
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Note that this helper does NOT assume anything about the sequence number
+ * that was baked into the provided xfer, so it checks at first if it can
+ * be mapped to a free slot and fails with an error if another xfer with the
+ * same sequence number is currently still registered as in-flight.
+ *
+ * Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
+ * could not rbe mapped to a free slot in the xfer_alloc_table.
+ */
+static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
+ struct scmi_xfers_info *minfo)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
+ scmi_xfer_inflight_register_unlocked(xfer, minfo);
+ else
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ return ret;
+}
+
+/**
+ * scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
+ * flight on the TX channel, if possible.
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: The xfer to register
+ *
+ * Return: 0 on Success, error otherwise
+ */
+int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
+}
+
+/**
+ * scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
+ * as pending in-flight
+ *
+ * @xfer: The xfer to act upon
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Return: 0 on Success or error otherwise
+ */
+static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
+ struct scmi_xfers_info *minfo)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ /* Set a new monotonic token as the xfer sequence number */
+ ret = scmi_xfer_token_set(minfo, xfer);
+ if (!ret)
+ scmi_xfer_inflight_register_unlocked(xfer, minfo);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ return ret;
+}
+
+/**
+ * scmi_xfer_get() - Allocate one message
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Helper function which is used by various message functions that are
+ * exposed to clients of this driver for allocating a message traffic event.
+ *
+ * Picks an xfer from the free list @free_xfers (if any available) and perform
+ * a basic initialization.
+ *
+ * Note that, at this point, still no sequence number is assigned to the
+ * allocated xfer, nor it is registered as a pending transaction.
+ *
+ * The successfully initialized xfer is refcounted.
+ *
+ * Context: Holds @xfer_lock while manipulating @free_xfers.
+ *
+ * Return: An initialized xfer if all went fine, else pointer error.
+ */
+static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
+ struct scmi_xfers_info *minfo)
+{
+ unsigned long flags;
+ struct scmi_xfer *xfer;
+
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ if (hlist_empty(&minfo->free_xfers)) {
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* grab an xfer from the free_list */
+ xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
+ hlist_del_init(&xfer->node);
+
+ /*
+ * Allocate transfer_id early so that can be used also as base for
+ * monotonic sequence number generation if needed.
+ */
+ xfer->transfer_id = atomic_inc_return(&transfer_last_id);
+
+ refcount_set(&xfer->users, 1);
+ atomic_set(&xfer->busy, SCMI_XFER_FREE);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ return xfer;
+}
+
+/**
+ * scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
+ *
+ * @handle: Pointer to SCMI entity handle
+ *
+ * Note that xfer is taken from the TX channel structures.
+ *
+ * Return: A valid xfer on Success, or an error-pointer otherwise
+ */
+struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle)
+{
+ struct scmi_xfer *xfer;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ xfer = scmi_xfer_get(handle, &info->tx_minfo);
+ if (!IS_ERR(xfer))
+ xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
+
+ return xfer;
+}
+
+/**
+ * scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
+ * to use for a specific protocol_id Raw transaction.
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @protocol_id: Identifier of the protocol
+ *
+ * Note that in a regular SCMI stack, usually, a protocol has to be defined in
+ * the DT to have an associated channel and be usable; but in Raw mode any
+ * protocol in range is allowed, re-using the Base channel, so as to enable
+ * fuzzing on any protocol without the need of a fully compiled DT.
+ *
+ * Return: A reference to the channel to use, or an ERR_PTR
+ */
+struct scmi_chan_info *
+scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id)
+{
+ struct scmi_chan_info *cinfo;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ cinfo = idr_find(&info->tx_idr, protocol_id);
+ if (!cinfo) {
+ if (protocol_id == SCMI_PROTOCOL_BASE)
+ return ERR_PTR(-EINVAL);
+ /* Use Base channel for protocols not defined for DT */
+ cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
+ if (!cinfo)
+ return ERR_PTR(-EINVAL);
+ dev_warn_once(handle->dev,
+ "Using Base channel for protocol 0x%X\n",
+ protocol_id);
+ }
+
+ return cinfo;
+}
+
+/**
+ * __scmi_xfer_put() - Release a message
+ *
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ * @xfer: message that was reserved by scmi_xfer_get
+ *
+ * After refcount check, possibly release an xfer, clearing the token slot,
+ * removing xfer from @pending_xfers and putting it back into free_xfers.
+ *
+ * This holds a spinlock to maintain integrity of internal data structures.
+ */
+static void
+__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ if (refcount_dec_and_test(&xfer->users)) {
+ if (xfer->pending) {
+ scmi_xfer_token_clear(minfo, xfer);
+ hash_del(&xfer->node);
+ xfer->pending = false;
+ }
+ hlist_add_head(&xfer->node, &minfo->free_xfers);
+ }
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+}
+
+/**
+ * scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: A reference to the xfer to put
+ *
+ * Note that as with other xfer_put() handlers the xfer is really effectively
+ * released only if there are no more users on the system.
+ */
+void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
+ xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
+ return __scmi_xfer_put(&info->tx_minfo, xfer);
+}
+
+/**
+ * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
+ *
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ * @xfer_id: Token ID to lookup in @pending_xfers
+ *
+ * Refcounting is untouched.
+ *
+ * Context: Assumes to be called with @xfer_lock already acquired.
+ *
+ * Return: A valid xfer on Success or error otherwise
+ */
+static struct scmi_xfer *
+scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
+{
+ struct scmi_xfer *xfer = NULL;
+
+ if (test_bit(xfer_id, minfo->xfer_alloc_table))
+ xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
+
+ return xfer ?: ERR_PTR(-EINVAL);
+}
+
+/**
+ * scmi_msg_response_validate - Validate message type against state of related
+ * xfer
+ *
+ * @cinfo: A reference to the channel descriptor.
+ * @msg_type: Message type to check
+ * @xfer: A reference to the xfer to validate against @msg_type
+ *
+ * This function checks if @msg_type is congruent with the current state of
+ * a pending @xfer; if an asynchronous delayed response is received before the
+ * related synchronous response (Out-of-Order Delayed Response) the missing
+ * synchronous response is assumed to be OK and completed, carrying on with the
+ * Delayed Response: this is done to address the case in which the underlying
+ * SCMI transport can deliver such out-of-order responses.
+ *
+ * Context: Assumes to be called with xfer->lock already acquired.
+ *
+ * Return: 0 on Success, error otherwise
+ */
+static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
+ u8 msg_type,
+ struct scmi_xfer *xfer)
+{
+ /*
+ * Even if a response was indeed expected on this slot at this point,
+ * a buggy platform could wrongly reply feeding us an unexpected
+ * delayed response we're not prepared to handle: bail-out safely
+ * blaming firmware.
+ */
+ if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
+ dev_err(cinfo->dev,
+ "Delayed Response for %d not expected! Buggy F/W ?\n",
+ xfer->hdr.seq);
+ return -EINVAL;
+ }
+
+ switch (xfer->state) {
+ case SCMI_XFER_SENT_OK:
+ if (msg_type == MSG_TYPE_DELAYED_RESP) {
+ /*
+ * Delayed Response expected but delivered earlier.
+ * Assume message RESPONSE was OK and skip state.
+ */
+ xfer->hdr.status = SCMI_SUCCESS;
+ xfer->state = SCMI_XFER_RESP_OK;
+ complete(&xfer->done);
+ dev_warn(cinfo->dev,
+ "Received valid OoO Delayed Response for %d\n",
+ xfer->hdr.seq);
+ }
+ break;
+ case SCMI_XFER_RESP_OK:
+ if (msg_type != MSG_TYPE_DELAYED_RESP)
+ return -EINVAL;
+ break;
+ case SCMI_XFER_DRESP_OK:
+ /* No further message expected once in SCMI_XFER_DRESP_OK */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * scmi_xfer_state_update - Update xfer state
+ *
+ * @xfer: A reference to the xfer to update
+ * @msg_type: Type of message being processed.
+ *
+ * Note that this message is assumed to have been already successfully validated
+ * by @scmi_msg_response_validate(), so here we just update the state.
+ *
+ * Context: Assumes to be called on an xfer exclusively acquired using the
+ * busy flag.
+ */
+static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
+{
+ xfer->hdr.type = msg_type;
+
+ /* Unknown command types were already discarded earlier */
+ if (xfer->hdr.type == MSG_TYPE_COMMAND)
+ xfer->state = SCMI_XFER_RESP_OK;
+ else
+ xfer->state = SCMI_XFER_DRESP_OK;
+}
+
+static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
+{
+ int ret;
+
+ ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
+
+ return ret == SCMI_XFER_FREE;
+}
+
+/**
+ * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
+ *
+ * @cinfo: A reference to the channel descriptor.
+ * @msg_hdr: A message header to use as lookup key
+ *
+ * When a valid xfer is found for the sequence number embedded in the provided
+ * msg_hdr, reference counting is properly updated and exclusive access to this
+ * xfer is granted till released with @scmi_xfer_command_release.
+ *
+ * Return: A valid @xfer on Success or error otherwise.
+ */
+static inline struct scmi_xfer *
+scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
+{
+ int ret;
+ unsigned long flags;
+ struct scmi_xfer *xfer;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_xfers_info *minfo = &info->tx_minfo;
+ u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
+ u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
+
+ /* Are we even expecting this? */
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
+ if (IS_ERR(xfer)) {
+ dev_err(cinfo->dev,
+ "Message for %d type %d is not expected!\n",
+ xfer_id, msg_type);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+ return xfer;
+ }
+ refcount_inc(&xfer->users);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ spin_lock_irqsave(&xfer->lock, flags);
+ ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
+ /*
+ * If a pending xfer was found which was also in a congruent state with
+ * the received message, acquire exclusive access to it setting the busy
+ * flag.
+ * Spins only on the rare limit condition of concurrent reception of
+ * RESP and DRESP for the same xfer.
+ */
+ if (!ret) {
+ spin_until_cond(scmi_xfer_acquired(xfer));
+ scmi_xfer_state_update(xfer, msg_type);
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+
+ if (ret) {
+ dev_err(cinfo->dev,
+ "Invalid message type:%d for %d - HDR:0x%X state:%d\n",
+ msg_type, xfer_id, msg_hdr, xfer->state);
+ /* On error the refcount incremented above has to be dropped */
+ __scmi_xfer_put(minfo, xfer);
+ xfer = ERR_PTR(-EINVAL);
+ }
+
+ return xfer;
+}
+
+static inline void scmi_xfer_command_release(struct scmi_info *info,
+ struct scmi_xfer *xfer)
+{
+ atomic_set(&xfer->busy, SCMI_XFER_FREE);
+ __scmi_xfer_put(&info->tx_minfo, xfer);
+}
+
+static inline void scmi_clear_channel(struct scmi_info *info,
+ struct scmi_chan_info *cinfo)
+{
+ if (info->desc->ops->clear_channel)
+ info->desc->ops->clear_channel(cinfo);
+}
+
+static void scmi_handle_notification(struct scmi_chan_info *cinfo,
+ u32 msg_hdr, void *priv)
+{
+ struct scmi_xfer *xfer;
+ struct device *dev = cinfo->dev;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_xfers_info *minfo = &info->rx_minfo;
+ ktime_t ts;
+
+ ts = ktime_get_boottime();
+ xfer = scmi_xfer_get(cinfo->handle, minfo);
+ if (IS_ERR(xfer)) {
+ dev_err(dev, "failed to get free message slot (%ld)\n",
+ PTR_ERR(xfer));
+ scmi_clear_channel(info, cinfo);
+ return;
+ }
+
+ unpack_scmi_header(msg_hdr, &xfer->hdr);
+ if (priv)
+ /* Ensure order between xfer->priv store and following ops */
+ smp_store_mb(xfer->priv, priv);
+ info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
+ xfer);
+
+ trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
+ xfer->hdr.id, "NOTI", xfer->hdr.seq,
+ xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
+
+ scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
+ xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
+
+ trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ MSG_TYPE_NOTIFICATION);
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
+ scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
+ cinfo->id);
+ }
+
+ __scmi_xfer_put(minfo, xfer);
+
+ scmi_clear_channel(info, cinfo);
+}
+
+static void scmi_handle_response(struct scmi_chan_info *cinfo,
+ u32 msg_hdr, void *priv)
+{
+ struct scmi_xfer *xfer;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+
+ xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
+ if (IS_ERR(xfer)) {
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
+ scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv);
+
+ if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
+ scmi_clear_channel(info, cinfo);
+ return;
+ }
+
+ /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
+ if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
+ xfer->rx.len = info->desc->max_msg_size;
+
+ if (priv)
+ /* Ensure order between xfer->priv store and following ops */
+ smp_store_mb(xfer->priv, priv);
+ info->desc->ops->fetch_response(cinfo, xfer);
+
+ trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
+ xfer->hdr.id,
+ xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
+ (!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
+ (!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
+ xfer->hdr.seq, xfer->hdr.status,
+ xfer->rx.buf, xfer->rx.len);
+
+ trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ xfer->hdr.type);
+
+ if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
+ scmi_clear_channel(info, cinfo);
+ complete(xfer->async_done);
+ } else {
+ complete(&xfer->done);
+ }
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ /*
+ * When in polling mode avoid to queue the Raw xfer on the IRQ
+ * RX path since it will be already queued at the end of the TX
+ * poll loop.
+ */
+ if (!xfer->hdr.poll_completion)
+ scmi_raw_message_report(info->raw, xfer,
+ SCMI_RAW_REPLY_QUEUE,
+ cinfo->id);
+ }
+
+ scmi_xfer_command_release(info, xfer);
+}
+
+/**
+ * scmi_rx_callback() - callback for receiving messages
+ *
+ * @cinfo: SCMI channel info
+ * @msg_hdr: Message header
+ * @priv: Transport specific private data.
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
+{
+ u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
+
+ switch (msg_type) {
+ case MSG_TYPE_NOTIFICATION:
+ scmi_handle_notification(cinfo, msg_hdr, priv);
+ break;
+ case MSG_TYPE_COMMAND:
+ case MSG_TYPE_DELAYED_RESP:
+ scmi_handle_response(cinfo, msg_hdr, priv);
+ break;
+ default:
+ WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
+ break;
+ }
+}
+
+/**
+ * xfer_put() - Release a transmit message
+ *
+ * @ph: Pointer to SCMI protocol handle
+ * @xfer: message that was reserved by xfer_get_init
+ */
+static void xfer_put(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
+
+ __scmi_xfer_put(&info->tx_minfo, xfer);
+}
+
+static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer, ktime_t stop)
+{
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+
+ /*
+ * Poll also on xfer->done so that polling can be forcibly terminated
+ * in case of out-of-order receptions of delayed responses
+ */
+ return info->desc->ops->poll_done(cinfo, xfer) ||
+ try_wait_for_completion(&xfer->done) ||
+ ktime_after(ktime_get(), stop);
+}
+
+static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
+ struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer, unsigned int timeout_ms)
+{
+ int ret = 0;
+
+ if (xfer->hdr.poll_completion) {
+ /*
+ * Real polling is needed only if transport has NOT declared
+ * itself to support synchronous commands replies.
+ */
+ if (!desc->sync_cmds_completed_on_ret) {
+ /*
+ * Poll on xfer using transport provided .poll_done();
+ * assumes no completion interrupt was available.
+ */
+ ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
+
+ spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
+ xfer, stop));
+ if (ktime_after(ktime_get(), stop)) {
+ dev_err(dev,
+ "timed out in resp(caller: %pS) - polling\n",
+ (void *)_RET_IP_);
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ if (!ret) {
+ unsigned long flags;
+ struct scmi_info *info =
+ handle_to_scmi_info(cinfo->handle);
+
+ /*
+ * Do not fetch_response if an out-of-order delayed
+ * response is being processed.
+ */
+ spin_lock_irqsave(&xfer->lock, flags);
+ if (xfer->state == SCMI_XFER_SENT_OK) {
+ desc->ops->fetch_response(cinfo, xfer);
+ xfer->state = SCMI_XFER_RESP_OK;
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+
+ /* Trace polled replies. */
+ trace_scmi_msg_dump(info->id, cinfo->id,
+ xfer->hdr.protocol_id, xfer->hdr.id,
+ !SCMI_XFER_IS_RAW(xfer) ?
+ "RESP" : "resp",
+ xfer->hdr.seq, xfer->hdr.status,
+ xfer->rx.buf, xfer->rx.len);
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ struct scmi_info *info =
+ handle_to_scmi_info(cinfo->handle);
+
+ scmi_raw_message_report(info->raw, xfer,
+ SCMI_RAW_REPLY_QUEUE,
+ cinfo->id);
+ }
+ }
+ } else {
+ /* And we wait for the response. */
+ if (!wait_for_completion_timeout(&xfer->done,
+ msecs_to_jiffies(timeout_ms))) {
+ dev_err(dev, "timed out in resp(caller: %pS)\n",
+ (void *)_RET_IP_);
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * scmi_wait_for_message_response - An helper to group all the possible ways of
+ * waiting for a synchronous message response.
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being waited for.
+ *
+ * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
+ * configuration flags like xfer->hdr.poll_completion.
+ *
+ * Return: 0 on Success, error otherwise.
+ */
+static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct device *dev = info->dev;
+
+ trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ info->desc->max_rx_timeout_ms,
+ xfer->hdr.poll_completion);
+
+ return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
+ info->desc->max_rx_timeout_ms);
+}
+
+/**
+ * scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
+ * reply to an xfer raw request on a specific channel for the required timeout.
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being waited for.
+ * @timeout_ms: The maximum timeout in milliseconds
+ *
+ * Return: 0 on Success, error otherwise.
+ */
+int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer,
+ unsigned int timeout_ms)
+{
+ int ret;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct device *dev = info->dev;
+
+ ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
+ if (ret)
+ dev_dbg(dev, "timed out in RAW response - HDR:%08X\n",
+ pack_scmi_header(&xfer->hdr));
+
+ return ret;
+}
+
+/**
+ * do_xfer() - Do one transfer
+ *
+ * @ph: Pointer to SCMI protocol handle
+ * @xfer: Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no response, if transmit error,
+ * return corresponding error, else if all goes well,
+ * return 0.
+ */
+static int do_xfer(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer)
+{
+ int ret;
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
+ struct device *dev = info->dev;
+ struct scmi_chan_info *cinfo;
+
+ /* Check for polling request on custom command xfers at first */
+ if (xfer->hdr.poll_completion &&
+ !is_transport_polling_capable(info->desc)) {
+ dev_warn_once(dev,
+ "Polling mode is not supported by transport.\n");
+ return -EINVAL;
+ }
+
+ cinfo = idr_find(&info->tx_idr, pi->proto->id);
+ if (unlikely(!cinfo))
+ return -EINVAL;
+
+ /* True ONLY if also supported by transport. */
+ if (is_polling_enabled(cinfo, info->desc))
+ xfer->hdr.poll_completion = true;
+
+ /*
+ * Initialise protocol id now from protocol handle to avoid it being
+ * overridden by mistake (or malice) by the protocol code mangling with
+ * the scmi_xfer structure prior to this.
+ */
+ xfer->hdr.protocol_id = pi->proto->id;
+ reinit_completion(&xfer->done);
+
+ trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ xfer->hdr.poll_completion);
+
+ /* Clear any stale status */
+ xfer->hdr.status = SCMI_SUCCESS;
+ xfer->state = SCMI_XFER_SENT_OK;
+ /*
+ * Even though spinlocking is not needed here since no race is possible
+ * on xfer->state due to the monotonically increasing tokens allocation,
+ * we must anyway ensure xfer->state initialization is not re-ordered
+ * after the .send_message() to be sure that on the RX path an early
+ * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
+ */
+ smp_mb();
+
+ ret = info->desc->ops->send_message(cinfo, xfer);
+ if (ret < 0) {
+ dev_dbg(dev, "Failed to send message %d\n", ret);
+ return ret;
+ }
+
+ trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
+ xfer->hdr.id, "CMND", xfer->hdr.seq,
+ xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
+
+ ret = scmi_wait_for_message_response(cinfo, xfer);
+ if (!ret && xfer->hdr.status)
+ ret = scmi_to_linux_errno(xfer->hdr.status);
+
+ if (info->desc->ops->mark_txdone)
+ info->desc->ops->mark_txdone(cinfo, ret, xfer);
+
+ trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq, ret);
+
+ return ret;
+}
+
+static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
+
+ xfer->rx.len = info->desc->max_msg_size;
+}
+
+/**
+ * do_xfer_with_response() - Do one transfer and wait until the delayed
+ * response is received
+ *
+ * @ph: Pointer to SCMI protocol handle
+ * @xfer: Transfer to initiate and wait for response
+ *
+ * Using asynchronous commands in atomic/polling mode should be avoided since
+ * it could cause long busy-waiting here, so ignore polling for the delayed
+ * response and WARN if it was requested for this command transaction since
+ * upper layers should refrain from issuing such kind of requests.
+ *
+ * The only other option would have been to refrain from using any asynchronous
+ * command even if made available, when an atomic transport is detected, and
+ * instead forcibly use the synchronous version (thing that can be easily
+ * attained at the protocol layer), but this would also have led to longer
+ * stalls of the channel for synchronous commands and possibly timeouts.
+ * (in other words there is usually a good reason if a platform provides an
+ * asynchronous version of a command and we should prefer to use it...just not
+ * when using atomic/polling mode)
+ *
+ * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
+ * return corresponding error, else if all goes well, return 0.
+ */
+static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer)
+{
+ int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
+ DECLARE_COMPLETION_ONSTACK(async_response);
+
+ xfer->async_done = &async_response;
+
+ /*
+ * Delayed responses should not be polled, so an async command should
+ * not have been used when requiring an atomic/poll context; WARN and
+ * perform instead a sleeping wait.
+ * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
+ */
+ WARN_ON_ONCE(xfer->hdr.poll_completion);
+
+ ret = do_xfer(ph, xfer);
+ if (!ret) {
+ if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
+ dev_err(ph->dev,
+ "timed out in delayed resp(caller: %pS)\n",
+ (void *)_RET_IP_);
+ ret = -ETIMEDOUT;
+ } else if (xfer->hdr.status) {
+ ret = scmi_to_linux_errno(xfer->hdr.status);
+ }
+ }
+
+ xfer->async_done = NULL;
+ return ret;
+}
+
+/**
+ * xfer_get_init() - Allocate and initialise one message for transmit
+ *
+ * @ph: Pointer to SCMI protocol handle
+ * @msg_id: Message identifier
+ * @tx_size: transmit message size
+ * @rx_size: receive message size
+ * @p: pointer to the allocated and initialised message
+ *
+ * This function allocates the message using @scmi_xfer_get and
+ * initialise the header.
+ *
+ * Return: 0 if all went fine with @p pointing to message, else
+ * corresponding error.
+ */
+static int xfer_get_init(const struct scmi_protocol_handle *ph,
+ u8 msg_id, size_t tx_size, size_t rx_size,
+ struct scmi_xfer **p)
+{
+ int ret;
+ struct scmi_xfer *xfer;
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
+ struct scmi_xfers_info *minfo = &info->tx_minfo;
+ struct device *dev = info->dev;
+
+ /* Ensure we have sane transfer sizes */
+ if (rx_size > info->desc->max_msg_size ||
+ tx_size > info->desc->max_msg_size)
+ return -ERANGE;
+
+ xfer = scmi_xfer_get(pi->handle, minfo);
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "failed to get free message slot(%d)\n", ret);
+ return ret;
+ }
+
+ /* Pick a sequence number and register this xfer as in-flight */
+ ret = scmi_xfer_pending_set(xfer, minfo);
+ if (ret) {
+ dev_err(pi->handle->dev,
+ "Failed to get monotonic token %d\n", ret);
+ __scmi_xfer_put(minfo, xfer);
+ return ret;
+ }
+
+ xfer->tx.len = tx_size;
+ xfer->rx.len = rx_size ? : info->desc->max_msg_size;
+ xfer->hdr.type = MSG_TYPE_COMMAND;
+ xfer->hdr.id = msg_id;
+ xfer->hdr.poll_completion = false;
+
+ *p = xfer;
+
+ return 0;
+}
+
+/**
+ * version_get() - command to get the revision of the SCMI entity
+ *
+ * @ph: Pointer to SCMI protocol handle
+ * @version: Holds returned version of protocol.
+ *
+ * Updates the SCMI information in the internal data structure.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
+{
+ int ret;
+ __le32 *rev_info;
+ struct scmi_xfer *t;
+
+ ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
+ if (ret)
+ return ret;
+
+ ret = do_xfer(ph, t);
+ if (!ret) {
+ rev_info = t->rx.buf;
+ *version = le32_to_cpu(*rev_info);
+ }
+
+ xfer_put(ph, t);
+ return ret;
+}
+
+/**
+ * scmi_set_protocol_priv - Set protocol specific data at init time
+ *
+ * @ph: A reference to the protocol handle.
+ * @priv: The private data to set.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
+ void *priv)
+{
+ struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ pi->priv = priv;
+
+ return 0;
+}
+
+/**
+ * scmi_get_protocol_priv - Set protocol specific data at init time
+ *
+ * @ph: A reference to the protocol handle.
+ *
+ * Return: Protocol private data if any was set.
+ */
+static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ return pi->priv;
+}
+
+static const struct scmi_xfer_ops xfer_ops = {
+ .version_get = version_get,
+ .xfer_get_init = xfer_get_init,
+ .reset_rx_to_maxsz = reset_rx_to_maxsz,
+ .do_xfer = do_xfer,
+ .do_xfer_with_response = do_xfer_with_response,
+ .xfer_put = xfer_put,
+};
+
+struct scmi_msg_resp_domain_name_get {
+ __le32 flags;
+ u8 name[SCMI_MAX_STR_SIZE];
+};
+
+/**
+ * scmi_common_extended_name_get - Common helper to get extended resources name
+ * @ph: A protocol handle reference.
+ * @cmd_id: The specific command ID to use.
+ * @res_id: The specific resource ID to use.
+ * @name: A pointer to the preallocated area where the retrieved name will be
+ * stored as a NULL terminated string.
+ * @len: The len in bytes of the @name char array.
+ *
+ * Return: 0 on Succcess
+ */
+static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
+ u8 cmd_id, u32 res_id, char *name,
+ size_t len)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_domain_name_get *resp;
+
+ ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id),
+ sizeof(*resp), &t);
+ if (ret)
+ goto out;
+
+ put_unaligned_le32(res_id, t->tx.buf);
+ resp = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ strscpy(name, resp->name, len);
+
+ ph->xops->xfer_put(ph, t);
+out:
+ if (ret)
+ dev_warn(ph->dev,
+ "Failed to get extended name - id:%u (ret:%d). Using %s\n",
+ res_id, ret, name);
+ return ret;
+}
+
+/**
+ * struct scmi_iterator - Iterator descriptor
+ * @msg: A reference to the message TX buffer; filled by @prepare_message with
+ * a proper custom command payload for each multi-part command request.
+ * @resp: A reference to the response RX buffer; used by @update_state and
+ * @process_response to parse the multi-part replies.
+ * @t: A reference to the underlying xfer initialized and used transparently by
+ * the iterator internal routines.
+ * @ph: A reference to the associated protocol handle to be used.
+ * @ops: A reference to the custom provided iterator operations.
+ * @state: The current iterator state; used and updated in turn by the iterators
+ * internal routines and by the caller-provided @scmi_iterator_ops.
+ * @priv: A reference to optional private data as provided by the caller and
+ * passed back to the @@scmi_iterator_ops.
+ */
+struct scmi_iterator {
+ void *msg;
+ void *resp;
+ struct scmi_xfer *t;
+ const struct scmi_protocol_handle *ph;
+ struct scmi_iterator_ops *ops;
+ struct scmi_iterator_state state;
+ void *priv;
+};
+
+static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
+ struct scmi_iterator_ops *ops,
+ unsigned int max_resources, u8 msg_id,
+ size_t tx_size, void *priv)
+{
+ int ret;
+ struct scmi_iterator *i;
+
+ i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
+ if (!i)
+ return ERR_PTR(-ENOMEM);
+
+ i->ph = ph;
+ i->ops = ops;
+ i->priv = priv;
+
+ ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
+ if (ret) {
+ devm_kfree(ph->dev, i);
+ return ERR_PTR(ret);
+ }
+
+ i->state.max_resources = max_resources;
+ i->msg = i->t->tx.buf;
+ i->resp = i->t->rx.buf;
+
+ return i;
+}
+
+static int scmi_iterator_run(void *iter)
+{
+ int ret = -EINVAL;
+ struct scmi_iterator_ops *iops;
+ const struct scmi_protocol_handle *ph;
+ struct scmi_iterator_state *st;
+ struct scmi_iterator *i = iter;
+
+ if (!i || !i->ops || !i->ph)
+ return ret;
+
+ iops = i->ops;
+ ph = i->ph;
+ st = &i->state;
+
+ do {
+ iops->prepare_message(i->msg, st->desc_index, i->priv);
+ ret = ph->xops->do_xfer(ph, i->t);
+ if (ret)
+ break;
+
+ st->rx_len = i->t->rx.len;
+ ret = iops->update_state(st, i->resp, i->priv);
+ if (ret)
+ break;
+
+ if (st->num_returned > st->max_resources - st->desc_index) {
+ dev_err(ph->dev,
+ "No. of resources can't exceed %d\n",
+ st->max_resources);
+ ret = -EINVAL;
+ break;
+ }
+
+ for (st->loop_idx = 0; st->loop_idx < st->num_returned;
+ st->loop_idx++) {
+ ret = iops->process_response(ph, i->resp, st, i->priv);
+ if (ret)
+ goto out;
+ }
+
+ st->desc_index += st->num_returned;
+ ph->xops->reset_rx_to_maxsz(ph, i->t);
+ /*
+ * check for both returned and remaining to avoid infinite
+ * loop due to buggy firmware
+ */
+ } while (st->num_returned && st->num_remaining);
+
+out:
+ /* Finalize and destroy iterator */
+ ph->xops->xfer_put(ph, i->t);
+ devm_kfree(ph->dev, i);
+
+ return ret;
+}
+
+struct scmi_msg_get_fc_info {
+ __le32 domain;
+ __le32 message_id;
+};
+
+struct scmi_msg_resp_desc_fc {
+ __le32 attr;
+#define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
+#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
+ __le32 rate_limit;
+ __le32 chan_addr_low;
+ __le32 chan_addr_high;
+ __le32 chan_size;
+ __le32 db_addr_low;
+ __le32 db_addr_high;
+ __le32 db_set_lmask;
+ __le32 db_set_hmask;
+ __le32 db_preserve_lmask;
+ __le32 db_preserve_hmask;
+};
+
+static void
+scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
+ u8 describe_id, u32 message_id, u32 valid_size,
+ u32 domain, void __iomem **p_addr,
+ struct scmi_fc_db_info **p_db)
+{
+ int ret;
+ u32 flags;
+ u64 phys_addr;
+ u8 size;
+ void __iomem *addr;
+ struct scmi_xfer *t;
+ struct scmi_fc_db_info *db = NULL;
+ struct scmi_msg_get_fc_info *info;
+ struct scmi_msg_resp_desc_fc *resp;
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ if (!p_addr) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ret = ph->xops->xfer_get_init(ph, describe_id,
+ sizeof(*info), sizeof(*resp), &t);
+ if (ret)
+ goto err_out;
+
+ info = t->tx.buf;
+ info->domain = cpu_to_le32(domain);
+ info->message_id = cpu_to_le32(message_id);
+
+ /*
+ * Bail out on error leaving fc_info addresses zeroed; this includes
+ * the case in which the requested domain/message_id does NOT support
+ * fastchannels at all.
+ */
+ ret = ph->xops->do_xfer(ph, t);
+ if (ret)
+ goto err_xfer;
+
+ resp = t->rx.buf;
+ flags = le32_to_cpu(resp->attr);
+ size = le32_to_cpu(resp->chan_size);
+ if (size != valid_size) {
+ ret = -EINVAL;
+ goto err_xfer;
+ }
+
+ phys_addr = le32_to_cpu(resp->chan_addr_low);
+ phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
+ addr = devm_ioremap(ph->dev, phys_addr, size);
+ if (!addr) {
+ ret = -EADDRNOTAVAIL;
+ goto err_xfer;
+ }
+
+ *p_addr = addr;
+
+ if (p_db && SUPPORTS_DOORBELL(flags)) {
+ db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
+ if (!db) {
+ ret = -ENOMEM;
+ goto err_db;
+ }
+
+ size = 1 << DOORBELL_REG_WIDTH(flags);
+ phys_addr = le32_to_cpu(resp->db_addr_low);
+ phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
+ addr = devm_ioremap(ph->dev, phys_addr, size);
+ if (!addr) {
+ ret = -EADDRNOTAVAIL;
+ goto err_db_mem;
+ }
+
+ db->addr = addr;
+ db->width = size;
+ db->set = le32_to_cpu(resp->db_set_lmask);
+ db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
+ db->mask = le32_to_cpu(resp->db_preserve_lmask);
+ db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
+
+ *p_db = db;
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ dev_dbg(ph->dev,
+ "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
+ pi->proto->id, message_id, domain);
+
+ return;
+
+err_db_mem:
+ devm_kfree(ph->dev, db);
+
+err_db:
+ *p_addr = NULL;
+
+err_xfer:
+ ph->xops->xfer_put(ph, t);
+
+err_out:
+ dev_warn(ph->dev,
+ "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
+ pi->proto->id, message_id, domain, ret);
+}
+
+#define SCMI_PROTO_FC_RING_DB(w) \
+do { \
+ u##w val = 0; \
+ \
+ if (db->mask) \
+ val = ioread##w(db->addr) & db->mask; \
+ iowrite##w((u##w)db->set | val, db->addr); \
+} while (0)
+
+static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
+{
+ if (!db || !db->addr)
+ return;
+
+ if (db->width == 1)
+ SCMI_PROTO_FC_RING_DB(8);
+ else if (db->width == 2)
+ SCMI_PROTO_FC_RING_DB(16);
+ else if (db->width == 4)
+ SCMI_PROTO_FC_RING_DB(32);
+ else /* db->width == 8 */
+#ifdef CONFIG_64BIT
+ SCMI_PROTO_FC_RING_DB(64);
+#else
+ {
+ u64 val = 0;
+
+ if (db->mask)
+ val = ioread64_hi_lo(db->addr) & db->mask;
+ iowrite64_hi_lo(db->set | val, db->addr);
+ }
+#endif
+}
+
+static const struct scmi_proto_helpers_ops helpers_ops = {
+ .extended_name_get = scmi_common_extended_name_get,
+ .iter_response_init = scmi_iterator_init,
+ .iter_response_run = scmi_iterator_run,
+ .fastchannel_init = scmi_common_fastchannel_init,
+ .fastchannel_db_ring = scmi_common_fastchannel_db_ring,
+};
+
+/**
+ * scmi_revision_area_get - Retrieve version memory area.
+ *
+ * @ph: A reference to the protocol handle.
+ *
+ * A helper to grab the version memory area reference during SCMI Base protocol
+ * initialization.
+ *
+ * Return: A reference to the version memory area associated to the SCMI
+ * instance underlying this protocol handle.
+ */
+struct scmi_revision_info *
+scmi_revision_area_get(const struct scmi_protocol_handle *ph)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ return pi->handle->version;
+}
+
+/**
+ * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
+ * instance descriptor.
+ * @info: The reference to the related SCMI instance.
+ * @proto: The protocol descriptor.
+ *
+ * Allocate a new protocol instance descriptor, using the provided @proto
+ * description, against the specified SCMI instance @info, and initialize it;
+ * all resources management is handled via a dedicated per-protocol devres
+ * group.
+ *
+ * Context: Assumes to be called with @protocols_mtx already acquired.
+ * Return: A reference to a freshly allocated and initialized protocol instance
+ * or ERR_PTR on failure. On failure the @proto reference is at first
+ * put using @scmi_protocol_put() before releasing all the devres group.
+ */
+static struct scmi_protocol_instance *
+scmi_alloc_init_protocol_instance(struct scmi_info *info,
+ const struct scmi_protocol *proto)
+{
+ int ret = -ENOMEM;
+ void *gid;
+ struct scmi_protocol_instance *pi;
+ const struct scmi_handle *handle = &info->handle;
+
+ /* Protocol specific devres group */
+ gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
+ if (!gid) {
+ scmi_protocol_put(proto->id);
+ goto out;
+ }
+
+ pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
+ if (!pi)
+ goto clean;
+
+ pi->gid = gid;
+ pi->proto = proto;
+ pi->handle = handle;
+ pi->ph.dev = handle->dev;
+ pi->ph.xops = &xfer_ops;
+ pi->ph.hops = &helpers_ops;
+ pi->ph.set_priv = scmi_set_protocol_priv;
+ pi->ph.get_priv = scmi_get_protocol_priv;
+ refcount_set(&pi->users, 1);
+ /* proto->init is assured NON NULL by scmi_protocol_register */
+ ret = pi->proto->instance_init(&pi->ph);
+ if (ret)
+ goto clean;
+
+ ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
+ GFP_KERNEL);
+ if (ret != proto->id)
+ goto clean;
+
+ /*
+ * Warn but ignore events registration errors since we do not want
+ * to skip whole protocols if their notifications are messed up.
+ */
+ if (pi->proto->events) {
+ ret = scmi_register_protocol_events(handle, pi->proto->id,
+ &pi->ph,
+ pi->proto->events);
+ if (ret)
+ dev_warn(handle->dev,
+ "Protocol:%X - Events Registration Failed - err:%d\n",
+ pi->proto->id, ret);
+ }
+
+ devres_close_group(handle->dev, pi->gid);
+ dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
+
+ return pi;
+
+clean:
+ /* Take care to put the protocol module's owner before releasing all */
+ scmi_protocol_put(proto->id);
+ devres_release_group(handle->dev, gid);
+out:
+ return ERR_PTR(ret);
+}
+
+/**
+ * scmi_get_protocol_instance - Protocol initialization helper.
+ * @handle: A reference to the SCMI platform instance.
+ * @protocol_id: The protocol being requested.
+ *
+ * In case the required protocol has never been requested before for this
+ * instance, allocate and initialize all the needed structures while handling
+ * resource allocation with a dedicated per-protocol devres subgroup.
+ *
+ * Return: A reference to an initialized protocol instance or error on failure:
+ * in particular returns -EPROBE_DEFER when the desired protocol could
+ * NOT be found.
+ */
+static struct scmi_protocol_instance * __must_check
+scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
+{
+ struct scmi_protocol_instance *pi;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ mutex_lock(&info->protocols_mtx);
+ pi = idr_find(&info->protocols, protocol_id);
+
+ if (pi) {
+ refcount_inc(&pi->users);
+ } else {
+ const struct scmi_protocol *proto;
+
+ /* Fails if protocol not registered on bus */
+ proto = scmi_protocol_get(protocol_id);
+ if (proto)
+ pi = scmi_alloc_init_protocol_instance(info, proto);
+ else
+ pi = ERR_PTR(-EPROBE_DEFER);
+ }
+ mutex_unlock(&info->protocols_mtx);
+
+ return pi;
+}
+
+/**
+ * scmi_protocol_acquire - Protocol acquire
+ * @handle: A reference to the SCMI platform instance.
+ * @protocol_id: The protocol being requested.
+ *
+ * Register a new user for the requested protocol on the specified SCMI
+ * platform instance, possibly triggering its initialization on first user.
+ *
+ * Return: 0 if protocol was acquired successfully.
+ */
+int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
+{
+ return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
+}
+
+/**
+ * scmi_protocol_release - Protocol de-initialization helper.
+ * @handle: A reference to the SCMI platform instance.
+ * @protocol_id: The protocol being requested.
+ *
+ * Remove one user for the specified protocol and triggers de-initialization
+ * and resources de-allocation once the last user has gone.
+ */
+void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+ struct scmi_protocol_instance *pi;
+
+ mutex_lock(&info->protocols_mtx);
+ pi = idr_find(&info->protocols, protocol_id);
+ if (WARN_ON(!pi))
+ goto out;
+
+ if (refcount_dec_and_test(&pi->users)) {
+ void *gid = pi->gid;
+
+ if (pi->proto->events)
+ scmi_deregister_protocol_events(handle, protocol_id);
+
+ if (pi->proto->instance_deinit)
+ pi->proto->instance_deinit(&pi->ph);
+
+ idr_remove(&info->protocols, protocol_id);
+
+ scmi_protocol_put(protocol_id);
+
+ devres_release_group(handle->dev, gid);
+ dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
+ protocol_id);
+ }
+
+out:
+ mutex_unlock(&info->protocols_mtx);
+}
+
+void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
+ u8 *prot_imp)
+{
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
+
+ info->protocols_imp = prot_imp;
+}
+
+static bool
+scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
+{
+ int i;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+ struct scmi_revision_info *rev = handle->version;
+
+ if (!info->protocols_imp)
+ return false;
+
+ for (i = 0; i < rev->num_protocols; i++)
+ if (info->protocols_imp[i] == prot_id)
+ return true;
+ return false;
+}
+
+struct scmi_protocol_devres {
+ const struct scmi_handle *handle;
+ u8 protocol_id;
+};
+
+static void scmi_devm_release_protocol(struct device *dev, void *res)
+{
+ struct scmi_protocol_devres *dres = res;
+
+ scmi_protocol_release(dres->handle, dres->protocol_id);
+}
+
+static struct scmi_protocol_instance __must_check *
+scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
+{
+ struct scmi_protocol_instance *pi;
+ struct scmi_protocol_devres *dres;
+
+ dres = devres_alloc(scmi_devm_release_protocol,
+ sizeof(*dres), GFP_KERNEL);
+ if (!dres)
+ return ERR_PTR(-ENOMEM);
+
+ pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
+ if (IS_ERR(pi)) {
+ devres_free(dres);
+ return pi;
+ }
+
+ dres->handle = sdev->handle;
+ dres->protocol_id = protocol_id;
+ devres_add(&sdev->dev, dres);
+
+ return pi;
+}
+
+/**
+ * scmi_devm_protocol_get - Devres managed get protocol operations and handle
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
+ * be used for devres accounting.
+ * @protocol_id: The protocol being requested.
+ * @ph: A pointer reference used to pass back the associated protocol handle.
+ *
+ * Get hold of a protocol accounting for its usage, eventually triggering its
+ * initialization, and returning the protocol specific operations and related
+ * protocol handle which will be used as first argument in most of the
+ * protocols operations methods.
+ * Being a devres based managed method, protocol hold will be automatically
+ * released, and possibly de-initialized on last user, once the SCMI driver
+ * owning the scmi_device is unbound from it.
+ *
+ * Return: A reference to the requested protocol operations or error.
+ * Must be checked for errors by caller.
+ */
+static const void __must_check *
+scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
+ struct scmi_protocol_handle **ph)
+{
+ struct scmi_protocol_instance *pi;
+
+ if (!ph)
+ return ERR_PTR(-EINVAL);
+
+ pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
+ if (IS_ERR(pi))
+ return pi;
+
+ *ph = &pi->ph;
+
+ return pi->proto->ops;
+}
+
+/**
+ * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
+ * be used for devres accounting.
+ * @protocol_id: The protocol being requested.
+ *
+ * Get hold of a protocol accounting for its usage, possibly triggering its
+ * initialization but without getting access to its protocol specific operations
+ * and handle.
+ *
+ * Being a devres based managed method, protocol hold will be automatically
+ * released, and possibly de-initialized on last user, once the SCMI driver
+ * owning the scmi_device is unbound from it.
+ *
+ * Return: 0 on SUCCESS
+ */
+static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
+ u8 protocol_id)
+{
+ struct scmi_protocol_instance *pi;
+
+ pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
+ if (IS_ERR(pi))
+ return PTR_ERR(pi);
+
+ return 0;
+}
+
+static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
+{
+ struct scmi_protocol_devres *dres = res;
+
+ if (WARN_ON(!dres || !data))
+ return 0;
+
+ return dres->protocol_id == *((u8 *)data);
+}
+
+/**
+ * scmi_devm_protocol_put - Devres managed put protocol operations and handle
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
+ * be used for devres accounting.
+ * @protocol_id: The protocol being requested.
+ *
+ * Explicitly release a protocol hold previously obtained calling the above
+ * @scmi_devm_protocol_get.
+ */
+static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
+{
+ int ret;
+
+ ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
+ scmi_devm_protocol_match, &protocol_id);
+ WARN_ON(ret);
+}
+
+/**
+ * scmi_is_transport_atomic - Method to check if underlying transport for an
+ * SCMI instance is configured as atomic.
+ *
+ * @handle: A reference to the SCMI platform instance.
+ * @atomic_threshold: An optional return value for the system wide currently
+ * configured threshold for atomic operations.
+ *
+ * Return: True if transport is configured as atomic
+ */
+static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
+ unsigned int *atomic_threshold)
+{
+ bool ret;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ ret = info->desc->atomic_enabled &&
+ is_transport_polling_capable(info->desc);
+ if (ret && atomic_threshold)
+ *atomic_threshold = info->atomic_threshold;
+
+ return ret;
+}
+
+/**
+ * scmi_handle_get() - Get the SCMI handle for a device
+ *
+ * @dev: pointer to device for which we want SCMI handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of SCMI protocol library.
+ * scmi_handle_put must be balanced with successful scmi_handle_get
+ *
+ * Return: pointer to handle if successful, NULL on error
+ */
+static struct scmi_handle *scmi_handle_get(struct device *dev)
+{
+ struct list_head *p;
+ struct scmi_info *info;
+ struct scmi_handle *handle = NULL;
+
+ mutex_lock(&scmi_list_mutex);
+ list_for_each(p, &scmi_list) {
+ info = list_entry(p, struct scmi_info, node);
+ if (dev->parent == info->dev) {
+ info->users++;
+ handle = &info->handle;
+ break;
+ }
+ }
+ mutex_unlock(&scmi_list_mutex);
+
+ return handle;
+}
+
+/**
+ * scmi_handle_put() - Release the handle acquired by scmi_handle_get
+ *
+ * @handle: handle acquired by scmi_handle_get
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of SCMI protocol library.
+ * scmi_handle_put must be balanced with successful scmi_handle_get
+ *
+ * Return: 0 is successfully released
+ * if null was passed, it returns -EINVAL;
+ */
+static int scmi_handle_put(const struct scmi_handle *handle)
+{
+ struct scmi_info *info;
+
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_scmi_info(handle);
+ mutex_lock(&scmi_list_mutex);
+ if (!WARN_ON(!info->users))
+ info->users--;
+ mutex_unlock(&scmi_list_mutex);
+
+ return 0;
+}
+
+static void scmi_device_link_add(struct device *consumer,
+ struct device *supplier)
+{
+ struct device_link *link;
+
+ link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
+
+ WARN_ON(!link);
+}
+
+static void scmi_set_handle(struct scmi_device *scmi_dev)
+{
+ scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
+ if (scmi_dev->handle)
+ scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
+}
+
+static int __scmi_xfer_info_init(struct scmi_info *sinfo,
+ struct scmi_xfers_info *info)
+{
+ int i;
+ struct scmi_xfer *xfer;
+ struct device *dev = sinfo->dev;
+ const struct scmi_desc *desc = sinfo->desc;
+
+ /* Pre-allocated messages, no more than what hdr.seq can support */
+ if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
+ dev_err(dev,
+ "Invalid maximum messages %d, not in range [1 - %lu]\n",
+ info->max_msg, MSG_TOKEN_MAX);
+ return -EINVAL;
+ }
+
+ hash_init(info->pending_xfers);
+
+ /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
+ info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
+ GFP_KERNEL);
+ if (!info->xfer_alloc_table)
+ return -ENOMEM;
+
+ /*
+ * Preallocate a number of xfers equal to max inflight messages,
+ * pre-initialize the buffer pointer to pre-allocated buffers and
+ * attach all of them to the free list
+ */
+ INIT_HLIST_HEAD(&info->free_xfers);
+ for (i = 0; i < info->max_msg; i++) {
+ xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
+ if (!xfer)
+ return -ENOMEM;
+
+ xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
+ GFP_KERNEL);
+ if (!xfer->rx.buf)
+ return -ENOMEM;
+
+ xfer->tx.buf = xfer->rx.buf;
+ init_completion(&xfer->done);
+ spin_lock_init(&xfer->lock);
+
+ /* Add initialized xfer to the free list */
+ hlist_add_head(&xfer->node, &info->free_xfers);
+ }
+
+ spin_lock_init(&info->xfer_lock);
+
+ return 0;
+}
+
+static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
+{
+ const struct scmi_desc *desc = sinfo->desc;
+
+ if (!desc->ops->get_max_msg) {
+ sinfo->tx_minfo.max_msg = desc->max_msg;
+ sinfo->rx_minfo.max_msg = desc->max_msg;
+ } else {
+ struct scmi_chan_info *base_cinfo;
+
+ base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
+ if (!base_cinfo)
+ return -EINVAL;
+ sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
+
+ /* RX channel is optional so can be skipped */
+ base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
+ if (base_cinfo)
+ sinfo->rx_minfo.max_msg =
+ desc->ops->get_max_msg(base_cinfo);
+ }
+
+ return 0;
+}
+
+static int scmi_xfer_info_init(struct scmi_info *sinfo)
+{
+ int ret;
+
+ ret = scmi_channels_max_msg_configure(sinfo);
+ if (ret)
+ return ret;
+
+ ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
+ if (!ret && !idr_is_empty(&sinfo->rx_idr))
+ ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
+
+ return ret;
+}
+
+static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
+ int prot_id, bool tx)
+{
+ int ret, idx;
+ char name[32];
+ struct scmi_chan_info *cinfo;
+ struct idr *idr;
+ struct scmi_device *tdev = NULL;
+
+ /* Transmit channel is first entry i.e. index 0 */
+ idx = tx ? 0 : 1;
+ idr = tx ? &info->tx_idr : &info->rx_idr;
+
+ if (!info->desc->ops->chan_available(of_node, idx)) {
+ cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
+ if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
+ return -EINVAL;
+ goto idr_alloc;
+ }
+
+ cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
+ if (!cinfo)
+ return -ENOMEM;
+
+ cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
+
+ /* Create a unique name for this transport device */
+ snprintf(name, 32, "__scmi_transport_device_%s_%02X",
+ idx ? "rx" : "tx", prot_id);
+ /* Create a uniquely named, dedicated transport device for this chan */
+ tdev = scmi_device_create(of_node, info->dev, prot_id, name);
+ if (!tdev) {
+ dev_err(info->dev,
+ "failed to create transport device (%s)\n", name);
+ devm_kfree(info->dev, cinfo);
+ return -EINVAL;
+ }
+ of_node_get(of_node);
+
+ cinfo->id = prot_id;
+ cinfo->dev = &tdev->dev;
+ ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
+ if (ret) {
+ of_node_put(of_node);
+ scmi_device_destroy(info->dev, prot_id, name);
+ devm_kfree(info->dev, cinfo);
+ return ret;
+ }
+
+ if (tx && is_polling_required(cinfo, info->desc)) {
+ if (is_transport_polling_capable(info->desc))
+ dev_info(&tdev->dev,
+ "Enabled polling mode TX channel - prot_id:%d\n",
+ prot_id);
+ else
+ dev_warn(&tdev->dev,
+ "Polling mode NOT supported by transport.\n");
+ }
+
+idr_alloc:
+ ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
+ if (ret != prot_id) {
+ dev_err(info->dev,
+ "unable to allocate SCMI idr slot err %d\n", ret);
+ /* Destroy channel and device only if created by this call. */
+ if (tdev) {
+ of_node_put(of_node);
+ scmi_device_destroy(info->dev, prot_id, name);
+ devm_kfree(info->dev, cinfo);
+ }
+ return ret;
+ }
+
+ cinfo->handle = &info->handle;
+ return 0;
+}
+
+static inline int
+scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
+ int prot_id)
+{
+ int ret = scmi_chan_setup(info, of_node, prot_id, true);
+
+ if (!ret) {
+ /* Rx is optional, report only memory errors */
+ ret = scmi_chan_setup(info, of_node, prot_id, false);
+ if (ret && ret != -ENOMEM)
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * scmi_channels_setup - Helper to initialize all required channels
+ *
+ * @info: The SCMI instance descriptor.
+ *
+ * Initialize all the channels found described in the DT against the underlying
+ * configured transport using custom defined dedicated devices instead of
+ * borrowing devices from the SCMI drivers; this way channels are initialized
+ * upfront during core SCMI stack probing and are no more coupled with SCMI
+ * devices used by SCMI drivers.
+ *
+ * Note that, even though a pair of TX/RX channels is associated to each
+ * protocol defined in the DT, a distinct freshly initialized channel is
+ * created only if the DT node for the protocol at hand describes a dedicated
+ * channel: in all the other cases the common BASE protocol channel is reused.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_channels_setup(struct scmi_info *info)
+{
+ int ret;
+ struct device_node *child, *top_np = info->dev->of_node;
+
+ /* Initialize a common generic channel at first */
+ ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
+ if (ret)
+ return ret;
+
+ for_each_available_child_of_node(top_np, child) {
+ u32 prot_id;
+
+ if (of_property_read_u32(child, "reg", &prot_id))
+ continue;
+
+ if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
+ dev_err(info->dev,
+ "Out of range protocol %d\n", prot_id);
+
+ ret = scmi_txrx_setup(info, child, prot_id);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int scmi_chan_destroy(int id, void *p, void *idr)
+{
+ struct scmi_chan_info *cinfo = p;
+
+ if (cinfo->dev) {
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
+
+ of_node_put(cinfo->dev->of_node);
+ scmi_device_destroy(info->dev, id, sdev->name);
+ cinfo->dev = NULL;
+ }
+
+ idr_remove(idr, id);
+
+ return 0;
+}
+
+static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr)
+{
+ /* At first free all channels at the transport layer ... */
+ idr_for_each(idr, info->desc->ops->chan_free, idr);
+
+ /* ...then destroy all underlying devices */
+ idr_for_each(idr, scmi_chan_destroy, idr);
+
+ idr_destroy(idr);
+}
+
+static void scmi_cleanup_txrx_channels(struct scmi_info *info)
+{
+ scmi_cleanup_channels(info, &info->tx_idr);
+
+ scmi_cleanup_channels(info, &info->rx_idr);
+}
+
+static int scmi_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct scmi_info *info = bus_nb_to_scmi_info(nb);
+ struct scmi_device *sdev = to_scmi_dev(data);
+
+ /* Skip transport devices and devices of different SCMI instances */
+ if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
+ sdev->dev.parent != info->dev)
+ return NOTIFY_DONE;
+
+ switch (action) {
+ case BUS_NOTIFY_BIND_DRIVER:
+ /* setup handle now as the transport is ready */
+ scmi_set_handle(sdev);
+ break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ scmi_handle_put(sdev->handle);
+ sdev->handle = NULL;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev),
+ sdev->name, action == BUS_NOTIFY_BIND_DRIVER ?
+ "about to be BOUND." : "UNBOUND.");
+
+ return NOTIFY_OK;
+}
+
+static int scmi_device_request_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device_node *np;
+ struct scmi_device_id *id_table = data;
+ struct scmi_info *info = req_nb_to_scmi_info(nb);
+
+ np = idr_find(&info->active_protocols, id_table->protocol_id);
+ if (!np)
+ return NOTIFY_DONE;
+
+ dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
+ action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
+ id_table->name, id_table->protocol_id);
+
+ switch (action) {
+ case SCMI_BUS_NOTIFY_DEVICE_REQUEST:
+ scmi_create_protocol_devices(np, info, id_table->protocol_id,
+ id_table->name);
+ break;
+ case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST:
+ scmi_destroy_protocol_devices(info, id_table->protocol_id,
+ id_table->name);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static void scmi_debugfs_common_cleanup(void *d)
+{
+ struct scmi_debug_info *dbg = d;
+
+ if (!dbg)
+ return;
+
+ debugfs_remove_recursive(dbg->top_dentry);
+ kfree(dbg->name);
+ kfree(dbg->type);
+}
+
+static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
+{
+ char top_dir[16];
+ struct dentry *trans, *top_dentry;
+ struct scmi_debug_info *dbg;
+ const char *c_ptr = NULL;
+
+ dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL);
+ if (!dbg)
+ return NULL;
+
+ dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL);
+ if (!dbg->name) {
+ devm_kfree(info->dev, dbg);
+ return NULL;
+ }
+
+ of_property_read_string(info->dev->of_node, "compatible", &c_ptr);
+ dbg->type = kstrdup(c_ptr, GFP_KERNEL);
+ if (!dbg->type) {
+ kfree(dbg->name);
+ devm_kfree(info->dev, dbg);
+ return NULL;
+ }
+
+ snprintf(top_dir, 16, "%d", info->id);
+ top_dentry = debugfs_create_dir(top_dir, scmi_top_dentry);
+ trans = debugfs_create_dir("transport", top_dentry);
+
+ dbg->is_atomic = info->desc->atomic_enabled &&
+ is_transport_polling_capable(info->desc);
+
+ debugfs_create_str("instance_name", 0400, top_dentry,
+ (char **)&dbg->name);
+
+ debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
+ &info->atomic_threshold);
+
+ debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
+
+ debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic);
+
+ debugfs_create_u32("max_rx_timeout_ms", 0400, trans,
+ (u32 *)&info->desc->max_rx_timeout_ms);
+
+ debugfs_create_u32("max_msg_size", 0400, trans,
+ (u32 *)&info->desc->max_msg_size);
+
+ debugfs_create_u32("tx_max_msg", 0400, trans,
+ (u32 *)&info->tx_minfo.max_msg);
+
+ debugfs_create_u32("rx_max_msg", 0400, trans,
+ (u32 *)&info->rx_minfo.max_msg);
+
+ dbg->top_dentry = top_dentry;
+
+ if (devm_add_action_or_reset(info->dev,
+ scmi_debugfs_common_cleanup, dbg)) {
+ scmi_debugfs_common_cleanup(dbg);
+ return NULL;
+ }
+
+ return dbg;
+}
+
+static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
+{
+ int id, num_chans = 0, ret = 0;
+ struct scmi_chan_info *cinfo;
+ u8 channels[SCMI_MAX_CHANNELS] = {};
+ DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
+
+ if (!info->dbg)
+ return -EINVAL;
+
+ /* Enumerate all channels to collect their ids */
+ idr_for_each_entry(&info->tx_idr, cinfo, id) {
+ /*
+ * Cannot happen, but be defensive.
+ * Zero as num_chans is ok, warn and carry on.
+ */
+ if (num_chans >= SCMI_MAX_CHANNELS || !cinfo) {
+ dev_warn(info->dev,
+ "SCMI RAW - Error enumerating channels\n");
+ break;
+ }
+
+ if (!test_bit(cinfo->id, protos)) {
+ channels[num_chans++] = cinfo->id;
+ set_bit(cinfo->id, protos);
+ }
+ }
+
+ info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry,
+ info->id, channels, num_chans,
+ info->desc, info->tx_minfo.max_msg);
+ if (IS_ERR(info->raw)) {
+ dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n");
+ ret = PTR_ERR(info->raw);
+ info->raw = NULL;
+ }
+
+ return ret;
+}
+
+static int scmi_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct scmi_handle *handle;
+ const struct scmi_desc *desc;
+ struct scmi_info *info;
+ bool coex = IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
+ struct device *dev = &pdev->dev;
+ struct device_node *child, *np = dev->of_node;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL);
+ if (info->id < 0)
+ return info->id;
+
+ info->dev = dev;
+ info->desc = desc;
+ info->bus_nb.notifier_call = scmi_bus_notifier;
+ info->dev_req_nb.notifier_call = scmi_device_request_notifier;
+ INIT_LIST_HEAD(&info->node);
+ idr_init(&info->protocols);
+ mutex_init(&info->protocols_mtx);
+ idr_init(&info->active_protocols);
+ mutex_init(&info->devreq_mtx);
+
+ platform_set_drvdata(pdev, info);
+ idr_init(&info->tx_idr);
+ idr_init(&info->rx_idr);
+
+ handle = &info->handle;
+ handle->dev = info->dev;
+ handle->version = &info->version;
+ handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
+ handle->devm_protocol_get = scmi_devm_protocol_get;
+ handle->devm_protocol_put = scmi_devm_protocol_put;
+
+ /* System wide atomic threshold for atomic ops .. if any */
+ if (!of_property_read_u32(np, "atomic-threshold-us",
+ &info->atomic_threshold))
+ dev_info(dev,
+ "SCMI System wide atomic threshold set to %d us\n",
+ info->atomic_threshold);
+ handle->is_transport_atomic = scmi_is_transport_atomic;
+
+ if (desc->ops->link_supplier) {
+ ret = desc->ops->link_supplier(dev);
+ if (ret)
+ goto clear_ida;
+ }
+
+ /* Setup all channels described in the DT at first */
+ ret = scmi_channels_setup(info);
+ if (ret)
+ goto clear_ida;
+
+ ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
+ if (ret)
+ goto clear_txrx_setup;
+
+ ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
+ &info->dev_req_nb);
+ if (ret)
+ goto clear_bus_notifier;
+
+ ret = scmi_xfer_info_init(info);
+ if (ret)
+ goto clear_dev_req_notifier;
+
+ if (scmi_top_dentry) {
+ info->dbg = scmi_debugfs_common_setup(info);
+ if (!info->dbg)
+ dev_warn(dev, "Failed to setup SCMI debugfs.\n");
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ ret = scmi_debugfs_raw_mode_setup(info);
+ if (!coex) {
+ if (ret)
+ goto clear_dev_req_notifier;
+
+ /* Bail out anyway when coex disabled. */
+ return 0;
+ }
+
+ /* Coex enabled, carry on in any case. */
+ dev_info(dev, "SCMI RAW Mode COEX enabled !\n");
+ }
+ }
+
+ if (scmi_notification_init(handle))
+ dev_err(dev, "SCMI Notifications NOT available.\n");
+
+ if (info->desc->atomic_enabled &&
+ !is_transport_polling_capable(info->desc))
+ dev_err(dev,
+ "Transport is not polling capable. Atomic mode not supported.\n");
+
+ /*
+ * Trigger SCMI Base protocol initialization.
+ * It's mandatory and won't be ever released/deinit until the
+ * SCMI stack is shutdown/unloaded as a whole.
+ */
+ ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
+ if (ret) {
+ dev_err(dev, "unable to communicate with SCMI\n");
+ if (coex)
+ return 0;
+ goto notification_exit;
+ }
+
+ mutex_lock(&scmi_list_mutex);
+ list_add_tail(&info->node, &scmi_list);
+ mutex_unlock(&scmi_list_mutex);
+
+ for_each_available_child_of_node(np, child) {
+ u32 prot_id;
+
+ if (of_property_read_u32(child, "reg", &prot_id))
+ continue;
+
+ if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
+ dev_err(dev, "Out of range protocol %d\n", prot_id);
+
+ if (!scmi_is_protocol_implemented(handle, prot_id)) {
+ dev_err(dev, "SCMI protocol %d not implemented\n",
+ prot_id);
+ continue;
+ }
+
+ /*
+ * Save this valid DT protocol descriptor amongst
+ * @active_protocols for this SCMI instance/
+ */
+ ret = idr_alloc(&info->active_protocols, child,
+ prot_id, prot_id + 1, GFP_KERNEL);
+ if (ret != prot_id) {
+ dev_err(dev, "SCMI protocol %d already activated. Skip\n",
+ prot_id);
+ continue;
+ }
+
+ of_node_get(child);
+ scmi_create_protocol_devices(child, info, prot_id, NULL);
+ }
+
+ return 0;
+
+notification_exit:
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
+ scmi_raw_mode_cleanup(info->raw);
+ scmi_notification_exit(&info->handle);
+clear_dev_req_notifier:
+ blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
+ &info->dev_req_nb);
+clear_bus_notifier:
+ bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
+clear_txrx_setup:
+ scmi_cleanup_txrx_channels(info);
+clear_ida:
+ ida_free(&scmi_id, info->id);
+ return ret;
+}
+
+static int scmi_remove(struct platform_device *pdev)
+{
+ int id;
+ struct scmi_info *info = platform_get_drvdata(pdev);
+ struct device_node *child;
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
+ scmi_raw_mode_cleanup(info->raw);
+
+ mutex_lock(&scmi_list_mutex);
+ if (info->users)
+ dev_warn(&pdev->dev,
+ "Still active SCMI users will be forcibly unbound.\n");
+ list_del(&info->node);
+ mutex_unlock(&scmi_list_mutex);
+
+ scmi_notification_exit(&info->handle);
+
+ mutex_lock(&info->protocols_mtx);
+ idr_destroy(&info->protocols);
+ mutex_unlock(&info->protocols_mtx);
+
+ idr_for_each_entry(&info->active_protocols, child, id)
+ of_node_put(child);
+ idr_destroy(&info->active_protocols);
+
+ blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
+ &info->dev_req_nb);
+ bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
+
+ /* Safe to free channels since no more users */
+ scmi_cleanup_txrx_channels(info);
+
+ ida_free(&scmi_id, info->id);
+
+ return 0;
+}
+
+static ssize_t protocol_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u.%u\n", info->version.major_ver,
+ info->version.minor_ver);
+}
+static DEVICE_ATTR_RO(protocol_version);
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%x\n", info->version.impl_ver);
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static ssize_t vendor_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", info->version.vendor_id);
+}
+static DEVICE_ATTR_RO(vendor_id);
+
+static ssize_t sub_vendor_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", info->version.sub_vendor_id);
+}
+static DEVICE_ATTR_RO(sub_vendor_id);
+
+static struct attribute *versions_attrs[] = {
+ &dev_attr_firmware_version.attr,
+ &dev_attr_protocol_version.attr,
+ &dev_attr_vendor_id.attr,
+ &dev_attr_sub_vendor_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(versions);
+
+/* Each compatible listed below must have descriptor associated with it */
+static const struct of_device_id scmi_of_match[] = {
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
+ { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
+#endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
+ { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
+#endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
+ { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
+ { .compatible = "arm,scmi-smc-param", .data = &scmi_smc_desc},
+#endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
+ { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
+#endif
+ { /* Sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, scmi_of_match);
+
+static struct platform_driver scmi_driver = {
+ .driver = {
+ .name = "arm-scmi",
+ .suppress_bind_attrs = true,
+ .of_match_table = scmi_of_match,
+ .dev_groups = versions_groups,
+ },
+ .probe = scmi_probe,
+ .remove = scmi_remove,
+};
+
+/**
+ * __scmi_transports_setup - Common helper to call transport-specific
+ * .init/.exit code if provided.
+ *
+ * @init: A flag to distinguish between init and exit.
+ *
+ * Note that, if provided, we invoke .init/.exit functions for all the
+ * transports currently compiled in.
+ *
+ * Return: 0 on Success.
+ */
+static inline int __scmi_transports_setup(bool init)
+{
+ int ret = 0;
+ const struct of_device_id *trans;
+
+ for (trans = scmi_of_match; trans->data; trans++) {
+ const struct scmi_desc *tdesc = trans->data;
+
+ if ((init && !tdesc->transport_init) ||
+ (!init && !tdesc->transport_exit))
+ continue;
+
+ if (init)
+ ret = tdesc->transport_init();
+ else
+ tdesc->transport_exit();
+
+ if (ret) {
+ pr_err("SCMI transport %s FAILED initialization!\n",
+ trans->compatible);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int __init scmi_transports_init(void)
+{
+ return __scmi_transports_setup(true);
+}
+
+static void __exit scmi_transports_exit(void)
+{
+ __scmi_transports_setup(false);
+}
+
+static struct dentry *scmi_debugfs_init(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_dir("scmi", NULL);
+ if (IS_ERR(d)) {
+ pr_err("Could NOT create SCMI top dentry.\n");
+ return NULL;
+ }
+
+ return d;
+}
+
+static int __init scmi_driver_init(void)
+{
+ int ret;
+
+ /* Bail out if no SCMI transport was configured */
+ if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
+ return -EINVAL;
+
+ /* Initialize any compiled-in transport which provided an init/exit */
+ ret = scmi_transports_init();
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS))
+ scmi_top_dentry = scmi_debugfs_init();
+
+ scmi_base_register();
+
+ scmi_clock_register();
+ scmi_perf_register();
+ scmi_power_register();
+ scmi_reset_register();
+ scmi_sensors_register();
+ scmi_voltage_register();
+ scmi_system_register();
+ scmi_powercap_register();
+
+ return platform_driver_register(&scmi_driver);
+}
+module_init(scmi_driver_init);
+
+static void __exit scmi_driver_exit(void)
+{
+ scmi_base_unregister();
+
+ scmi_clock_unregister();
+ scmi_perf_unregister();
+ scmi_power_unregister();
+ scmi_reset_unregister();
+ scmi_sensors_unregister();
+ scmi_voltage_unregister();
+ scmi_system_unregister();
+ scmi_powercap_unregister();
+
+ scmi_transports_exit();
+
+ platform_driver_unregister(&scmi_driver);
+
+ debugfs_remove_recursive(scmi_top_dentry);
+}
+module_exit(scmi_driver_exit);
+
+MODULE_ALIAS("platform:arm-scmi");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
new file mode 100644
index 0000000000..b8d470417e
--- /dev/null
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Mailbox Transport
+ * driver.
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/mailbox_client.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+/**
+ * struct scmi_mailbox - Structure representing a SCMI mailbox transport
+ *
+ * @cl: Mailbox Client
+ * @chan: Transmit/Receive mailbox uni/bi-directional channel
+ * @chan_receiver: Optional Receiver mailbox unidirectional channel
+ * @cinfo: SCMI channel info
+ * @shmem: Transmit/Receive shared memory area
+ */
+struct scmi_mailbox {
+ struct mbox_client cl;
+ struct mbox_chan *chan;
+ struct mbox_chan *chan_receiver;
+ struct scmi_chan_info *cinfo;
+ struct scmi_shared_mem __iomem *shmem;
+};
+
+#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
+
+static void tx_prepare(struct mbox_client *cl, void *m)
+{
+ struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
+
+ shmem_tx_prepare(smbox->shmem, m, smbox->cinfo);
+}
+
+static void rx_callback(struct mbox_client *cl, void *m)
+{
+ struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
+
+ /*
+ * An A2P IRQ is NOT valid when received while the platform still has
+ * the ownership of the channel, because the platform at first releases
+ * the SMT channel and then sends the completion interrupt.
+ *
+ * This addresses a possible race condition in which a spurious IRQ from
+ * a previous timed-out reply which arrived late could be wrongly
+ * associated with the next pending transaction.
+ */
+ if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) {
+ dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n");
+ return;
+ }
+
+ scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
+}
+
+static bool mailbox_chan_available(struct device_node *of_node, int idx)
+{
+ int num_mb;
+
+ /*
+ * Just check if bidirrectional channels are involved, and check the
+ * index accordingly; proper full validation will be made later
+ * in mailbox_chan_setup().
+ */
+ num_mb = of_count_phandle_with_args(of_node, "mboxes", "#mbox-cells");
+ if (num_mb == 3 && idx == 1)
+ idx = 2;
+
+ return !of_parse_phandle_with_args(of_node, "mboxes",
+ "#mbox-cells", idx, NULL);
+}
+
+/**
+ * mailbox_chan_validate - Validate transport configuration and map channels
+ *
+ * @cdev: Reference to the underlying transport device carrying the
+ * of_node descriptor to analyze.
+ * @a2p_rx_chan: A reference to an optional unidirectional channel to use
+ * for replies on the a2p channel. Set as zero if not present.
+ * @p2a_chan: A reference to the optional p2a channel.
+ * Set as zero if not present.
+ *
+ * At first, validate the transport configuration as described in terms of
+ * 'mboxes' and 'shmem', then determin which mailbox channel indexes are
+ * appropriate to be use in the current configuration.
+ *
+ * Return: 0 on Success or error
+ */
+static int mailbox_chan_validate(struct device *cdev,
+ int *a2p_rx_chan, int *p2a_chan)
+{
+ int num_mb, num_sh, ret = 0;
+ struct device_node *np = cdev->of_node;
+
+ num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
+ num_sh = of_count_phandle_with_args(np, "shmem", NULL);
+ dev_dbg(cdev, "Found %d mboxes and %d shmems !\n", num_mb, num_sh);
+
+ /* Bail out if mboxes and shmem descriptors are inconsistent */
+ if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 3 ||
+ (num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2)) {
+ dev_warn(cdev,
+ "Invalid channel descriptor for '%s' - mbs:%d shm:%d\n",
+ of_node_full_name(np), num_mb, num_sh);
+ return -EINVAL;
+ }
+
+ /* Bail out if provided shmem descriptors do not refer distinct areas */
+ if (num_sh > 1) {
+ struct device_node *np_tx, *np_rx;
+
+ np_tx = of_parse_phandle(np, "shmem", 0);
+ np_rx = of_parse_phandle(np, "shmem", 1);
+ if (!np_tx || !np_rx || np_tx == np_rx) {
+ dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
+ of_node_full_name(np));
+ ret = -EINVAL;
+ }
+
+ of_node_put(np_tx);
+ of_node_put(np_rx);
+ }
+
+ /* Calculate channels IDs to use depending on mboxes/shmem layout */
+ if (!ret) {
+ switch (num_mb) {
+ case 1:
+ *a2p_rx_chan = 0;
+ *p2a_chan = 0;
+ break;
+ case 2:
+ if (num_sh == 2) {
+ *a2p_rx_chan = 0;
+ *p2a_chan = 1;
+ } else {
+ *a2p_rx_chan = 1;
+ *p2a_chan = 0;
+ }
+ break;
+ case 3:
+ *a2p_rx_chan = 1;
+ *p2a_chan = 2;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ bool tx)
+{
+ const char *desc = tx ? "Tx" : "Rx";
+ struct device *cdev = cinfo->dev;
+ struct scmi_mailbox *smbox;
+ struct device_node *shmem;
+ int ret, a2p_rx_chan, p2a_chan, idx = tx ? 0 : 1;
+ struct mbox_client *cl;
+ resource_size_t size;
+ struct resource res;
+
+ ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan);
+ if (ret)
+ return ret;
+
+ if (!tx && !p2a_chan)
+ return -ENODEV;
+
+ smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);
+ if (!smbox)
+ return -ENOMEM;
+
+ shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
+ if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) {
+ of_node_put(shmem);
+ return -ENXIO;
+ }
+
+ ret = of_address_to_resource(shmem, 0, &res);
+ of_node_put(shmem);
+ if (ret) {
+ dev_err(cdev, "failed to get SCMI %s shared memory\n", desc);
+ return ret;
+ }
+
+ size = resource_size(&res);
+ smbox->shmem = devm_ioremap(dev, res.start, size);
+ if (!smbox->shmem) {
+ dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
+ return -EADDRNOTAVAIL;
+ }
+
+ cl = &smbox->cl;
+ cl->dev = cdev;
+ cl->tx_prepare = tx ? tx_prepare : NULL;
+ cl->rx_callback = rx_callback;
+ cl->tx_block = false;
+ cl->knows_txdone = tx;
+
+ smbox->chan = mbox_request_channel(cl, tx ? 0 : p2a_chan);
+ if (IS_ERR(smbox->chan)) {
+ ret = PTR_ERR(smbox->chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(cdev,
+ "failed to request SCMI %s mailbox\n", desc);
+ return ret;
+ }
+
+ /* Additional unidirectional channel for TX if needed */
+ if (tx && a2p_rx_chan) {
+ smbox->chan_receiver = mbox_request_channel(cl, a2p_rx_chan);
+ if (IS_ERR(smbox->chan_receiver)) {
+ ret = PTR_ERR(smbox->chan_receiver);
+ if (ret != -EPROBE_DEFER)
+ dev_err(cdev, "failed to request SCMI Tx Receiver mailbox\n");
+ return ret;
+ }
+ }
+
+ cinfo->transport_info = smbox;
+ smbox->cinfo = cinfo;
+
+ return 0;
+}
+
+static int mailbox_chan_free(int id, void *p, void *data)
+{
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ if (smbox && !IS_ERR(smbox->chan)) {
+ mbox_free_channel(smbox->chan);
+ mbox_free_channel(smbox->chan_receiver);
+ cinfo->transport_info = NULL;
+ smbox->chan = NULL;
+ smbox->chan_receiver = NULL;
+ smbox->cinfo = NULL;
+ }
+
+ return 0;
+}
+
+static int mailbox_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+ int ret;
+
+ ret = mbox_send_message(smbox->chan, xfer);
+
+ /* mbox_send_message returns non-negative value on success, so reset */
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+ struct scmi_xfer *__unused)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ /*
+ * NOTE: we might prefer not to need the mailbox ticker to manage the
+ * transfer queueing since the protocol layer queues things by itself.
+ * Unfortunately, we have to kick the mailbox framework after we have
+ * received our message.
+ */
+ mbox_client_txdone(smbox->chan, ret);
+}
+
+static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ shmem_fetch_response(smbox->shmem, xfer);
+}
+
+static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
+ size_t max_len, struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ shmem_fetch_notification(smbox->shmem, max_len, xfer);
+}
+
+static void mailbox_clear_channel(struct scmi_chan_info *cinfo)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ shmem_clear_channel(smbox->shmem);
+}
+
+static bool
+mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ return shmem_poll_done(smbox->shmem, xfer);
+}
+
+static const struct scmi_transport_ops scmi_mailbox_ops = {
+ .chan_available = mailbox_chan_available,
+ .chan_setup = mailbox_chan_setup,
+ .chan_free = mailbox_chan_free,
+ .send_message = mailbox_send_message,
+ .mark_txdone = mailbox_mark_txdone,
+ .fetch_response = mailbox_fetch_response,
+ .fetch_notification = mailbox_fetch_notification,
+ .clear_channel = mailbox_clear_channel,
+ .poll_done = mailbox_poll_done,
+};
+
+const struct scmi_desc scmi_mailbox_desc = {
+ .ops = &scmi_mailbox_ops,
+ .max_rx_timeout_ms = 30, /* We may increase this if required */
+ .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
+ .max_msg_size = 128,
+};
diff --git a/drivers/firmware/arm_scmi/msg.c b/drivers/firmware/arm_scmi/msg.c
new file mode 100644
index 0000000000..d33a704e58
--- /dev/null
+++ b/drivers/firmware/arm_scmi/msg.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * For transports using message passing.
+ *
+ * Derived from shm.c.
+ *
+ * Copyright (C) 2019-2021 ARM Ltd.
+ * Copyright (C) 2020-2021 OpenSynergy GmbH
+ */
+
+#include <linux/types.h>
+
+#include "common.h"
+
+/*
+ * struct scmi_msg_payld - Transport SDU layout
+ *
+ * The SCMI specification requires all parameters, message headers, return
+ * arguments or any protocol data to be expressed in little endian format only.
+ */
+struct scmi_msg_payld {
+ __le32 msg_header;
+ __le32 msg_payload[];
+};
+
+/**
+ * msg_command_size() - Actual size of transport SDU for command.
+ *
+ * @xfer: message which core has prepared for sending
+ *
+ * Return: transport SDU size.
+ */
+size_t msg_command_size(struct scmi_xfer *xfer)
+{
+ return sizeof(struct scmi_msg_payld) + xfer->tx.len;
+}
+
+/**
+ * msg_response_size() - Maximum size of transport SDU for response.
+ *
+ * @xfer: message which core has prepared for sending
+ *
+ * Return: transport SDU size.
+ */
+size_t msg_response_size(struct scmi_xfer *xfer)
+{
+ return sizeof(struct scmi_msg_payld) + sizeof(__le32) + xfer->rx.len;
+}
+
+/**
+ * msg_tx_prepare() - Set up transport SDU for command.
+ *
+ * @msg: transport SDU for command
+ * @xfer: message which is being sent
+ */
+void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer)
+{
+ msg->msg_header = cpu_to_le32(pack_scmi_header(&xfer->hdr));
+ if (xfer->tx.buf)
+ memcpy(msg->msg_payload, xfer->tx.buf, xfer->tx.len);
+}
+
+/**
+ * msg_read_header() - Read SCMI header from transport SDU.
+ *
+ * @msg: transport SDU
+ *
+ * Return: SCMI header
+ */
+u32 msg_read_header(struct scmi_msg_payld *msg)
+{
+ return le32_to_cpu(msg->msg_header);
+}
+
+/**
+ * msg_fetch_response() - Fetch response SCMI payload from transport SDU.
+ *
+ * @msg: transport SDU with response
+ * @len: transport SDU size
+ * @xfer: message being responded to
+ */
+void msg_fetch_response(struct scmi_msg_payld *msg, size_t len,
+ struct scmi_xfer *xfer)
+{
+ size_t prefix_len = sizeof(*msg) + sizeof(msg->msg_payload[0]);
+
+ xfer->hdr.status = le32_to_cpu(msg->msg_payload[0]);
+ xfer->rx.len = min_t(size_t, xfer->rx.len,
+ len >= prefix_len ? len - prefix_len : 0);
+
+ /* Take a copy to the rx buffer.. */
+ memcpy(xfer->rx.buf, &msg->msg_payload[1], xfer->rx.len);
+}
+
+/**
+ * msg_fetch_notification() - Fetch notification payload from transport SDU.
+ *
+ * @msg: transport SDU with notification
+ * @len: transport SDU size
+ * @max_len: maximum SCMI payload size to fetch
+ * @xfer: notification message
+ */
+void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len,
+ size_t max_len, struct scmi_xfer *xfer)
+{
+ xfer->rx.len = min_t(size_t, max_len,
+ len >= sizeof(*msg) ? len - sizeof(*msg) : 0);
+
+ /* Take a copy to the rx buffer.. */
+ memcpy(xfer->rx.buf, msg->msg_payload, xfer->rx.len);
+}
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
new file mode 100644
index 0000000000..0efd20cd9d
--- /dev/null
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -0,0 +1,1712 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Notification support
+ *
+ * Copyright (C) 2020-2021 ARM Ltd.
+ */
+/**
+ * DOC: Theory of operation
+ *
+ * SCMI Protocol specification allows the platform to signal events to
+ * interested agents via notification messages: this is an implementation
+ * of the dispatch and delivery of such notifications to the interested users
+ * inside the Linux kernel.
+ *
+ * An SCMI Notification core instance is initialized for each active platform
+ * instance identified by the means of the usual &struct scmi_handle.
+ *
+ * Each SCMI Protocol implementation, during its initialization, registers with
+ * this core its set of supported events using scmi_register_protocol_events():
+ * all the needed descriptors are stored in the &struct registered_protocols and
+ * &struct registered_events arrays.
+ *
+ * Kernel users interested in some specific event can register their callbacks
+ * providing the usual notifier_block descriptor, since this core implements
+ * events' delivery using the standard Kernel notification chains machinery.
+ *
+ * Given the number of possible events defined by SCMI and the extensibility
+ * of the SCMI Protocol itself, the underlying notification chains are created
+ * and destroyed dynamically on demand depending on the number of users
+ * effectively registered for an event, so that no support structures or chains
+ * are allocated until at least one user has registered a notifier_block for
+ * such event. Similarly, events' generation itself is enabled at the platform
+ * level only after at least one user has registered, and it is shutdown after
+ * the last user for that event has gone.
+ *
+ * All users provided callbacks and allocated notification-chains are stored in
+ * the @registered_events_handlers hashtable. Callbacks' registration requests
+ * for still to be registered events are instead kept in the dedicated common
+ * hashtable @pending_events_handlers.
+ *
+ * An event is identified univocally by the tuple (proto_id, evt_id, src_id)
+ * and is served by its own dedicated notification chain; information contained
+ * in such tuples is used, in a few different ways, to generate the needed
+ * hash-keys.
+ *
+ * Here proto_id and evt_id are simply the protocol_id and message_id numbers
+ * as described in the SCMI Protocol specification, while src_id represents an
+ * optional, protocol dependent, source identifier (like domain_id, perf_id
+ * or sensor_id and so forth).
+ *
+ * Upon reception of a notification message from the platform the SCMI RX ISR
+ * passes the received message payload and some ancillary information (including
+ * an arrival timestamp in nanoseconds) to the core via @scmi_notify() which
+ * pushes the event-data itself on a protocol-dedicated kfifo queue for further
+ * deferred processing as specified in @scmi_events_dispatcher().
+ *
+ * Each protocol has it own dedicated work_struct and worker which, once kicked
+ * by the ISR, takes care to empty its own dedicated queue, deliverying the
+ * queued items into the proper notification-chain: notifications processing can
+ * proceed concurrently on distinct workers only between events belonging to
+ * different protocols while delivery of events within the same protocol is
+ * still strictly sequentially ordered by time of arrival.
+ *
+ * Events' information is then extracted from the SCMI Notification messages and
+ * conveyed, converted into a custom per-event report struct, as the void *data
+ * param to the user callback provided by the registered notifier_block, so that
+ * from the user perspective his callback will look invoked like:
+ *
+ * int user_cb(struct notifier_block *nb, unsigned long event_id, void *report)
+ *
+ */
+
+#define dev_fmt(fmt) "SCMI Notifications - " fmt
+#define pr_fmt(fmt) "SCMI Notifications - " fmt
+
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/kfifo.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/refcount.h>
+#include <linux/scmi_protocol.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "common.h"
+#include "notify.h"
+
+#define SCMI_MAX_PROTO 256
+
+#define PROTO_ID_MASK GENMASK(31, 24)
+#define EVT_ID_MASK GENMASK(23, 16)
+#define SRC_ID_MASK GENMASK(15, 0)
+
+/*
+ * Builds an unsigned 32bit key from the given input tuple to be used
+ * as a key in hashtables.
+ */
+#define MAKE_HASH_KEY(p, e, s) \
+ (FIELD_PREP(PROTO_ID_MASK, (p)) | \
+ FIELD_PREP(EVT_ID_MASK, (e)) | \
+ FIELD_PREP(SRC_ID_MASK, (s)))
+
+#define MAKE_ALL_SRCS_KEY(p, e) MAKE_HASH_KEY((p), (e), SRC_ID_MASK)
+
+/*
+ * Assumes that the stored obj includes its own hash-key in a field named 'key':
+ * with this simplification this macro can be equally used for all the objects'
+ * types hashed by this implementation.
+ *
+ * @__ht: The hashtable name
+ * @__obj: A pointer to the object type to be retrieved from the hashtable;
+ * it will be used as a cursor while scanning the hastable and it will
+ * be possibly left as NULL when @__k is not found
+ * @__k: The key to search for
+ */
+#define KEY_FIND(__ht, __obj, __k) \
+({ \
+ typeof(__k) k_ = __k; \
+ typeof(__obj) obj_; \
+ \
+ hash_for_each_possible((__ht), obj_, hash, k_) \
+ if (obj_->key == k_) \
+ break; \
+ __obj = obj_; \
+})
+
+#define KEY_XTRACT_PROTO_ID(key) FIELD_GET(PROTO_ID_MASK, (key))
+#define KEY_XTRACT_EVT_ID(key) FIELD_GET(EVT_ID_MASK, (key))
+#define KEY_XTRACT_SRC_ID(key) FIELD_GET(SRC_ID_MASK, (key))
+
+/*
+ * A set of macros used to access safely @registered_protocols and
+ * @registered_events arrays; these are fixed in size and each entry is possibly
+ * populated at protocols' registration time and then only read but NEVER
+ * modified or removed.
+ */
+#define SCMI_GET_PROTO(__ni, __pid) \
+({ \
+ typeof(__ni) ni_ = __ni; \
+ struct scmi_registered_events_desc *__pd = NULL; \
+ \
+ if (ni_) \
+ __pd = READ_ONCE(ni_->registered_protocols[(__pid)]); \
+ __pd; \
+})
+
+#define SCMI_GET_REVT_FROM_PD(__pd, __eid) \
+({ \
+ typeof(__pd) pd_ = __pd; \
+ typeof(__eid) eid_ = __eid; \
+ struct scmi_registered_event *__revt = NULL; \
+ \
+ if (pd_ && eid_ < pd_->num_events) \
+ __revt = READ_ONCE(pd_->registered_events[eid_]); \
+ __revt; \
+})
+
+#define SCMI_GET_REVT(__ni, __pid, __eid) \
+({ \
+ struct scmi_registered_event *__revt; \
+ struct scmi_registered_events_desc *__pd; \
+ \
+ __pd = SCMI_GET_PROTO((__ni), (__pid)); \
+ __revt = SCMI_GET_REVT_FROM_PD(__pd, (__eid)); \
+ __revt; \
+})
+
+/* A couple of utility macros to limit cruft when calling protocols' helpers */
+#define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state) \
+({ \
+ typeof(revt) r = revt; \
+ r->proto->ops->set_notify_enabled(r->proto->ph, \
+ (eid), (sid), (state)); \
+})
+
+#define REVT_NOTIFY_ENABLE(revt, eid, sid) \
+ REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), true)
+
+#define REVT_NOTIFY_DISABLE(revt, eid, sid) \
+ REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), false)
+
+#define REVT_FILL_REPORT(revt, ...) \
+({ \
+ typeof(revt) r = revt; \
+ r->proto->ops->fill_custom_report(r->proto->ph, \
+ __VA_ARGS__); \
+})
+
+#define SCMI_PENDING_HASH_SZ 4
+#define SCMI_REGISTERED_HASH_SZ 6
+
+struct scmi_registered_events_desc;
+
+/**
+ * struct scmi_notify_instance - Represents an instance of the notification
+ * core
+ * @gid: GroupID used for devres
+ * @handle: A reference to the platform instance
+ * @init_work: A work item to perform final initializations of pending handlers
+ * @notify_wq: A reference to the allocated Kernel cmwq
+ * @pending_mtx: A mutex to protect @pending_events_handlers
+ * @registered_protocols: A statically allocated array containing pointers to
+ * all the registered protocol-level specific information
+ * related to events' handling
+ * @pending_events_handlers: An hashtable containing all pending events'
+ * handlers descriptors
+ *
+ * Each platform instance, represented by a handle, has its own instance of
+ * the notification subsystem represented by this structure.
+ */
+struct scmi_notify_instance {
+ void *gid;
+ struct scmi_handle *handle;
+ struct work_struct init_work;
+ struct workqueue_struct *notify_wq;
+ /* lock to protect pending_events_handlers */
+ struct mutex pending_mtx;
+ struct scmi_registered_events_desc **registered_protocols;
+ DECLARE_HASHTABLE(pending_events_handlers, SCMI_PENDING_HASH_SZ);
+};
+
+/**
+ * struct events_queue - Describes a queue and its associated worker
+ * @sz: Size in bytes of the related kfifo
+ * @kfifo: A dedicated Kernel kfifo descriptor
+ * @notify_work: A custom work item bound to this queue
+ * @wq: A reference to the associated workqueue
+ *
+ * Each protocol has its own dedicated events_queue descriptor.
+ */
+struct events_queue {
+ size_t sz;
+ struct kfifo kfifo;
+ struct work_struct notify_work;
+ struct workqueue_struct *wq;
+};
+
+/**
+ * struct scmi_event_header - A utility header
+ * @timestamp: The timestamp, in nanoseconds (boottime), which was associated
+ * to this event as soon as it entered the SCMI RX ISR
+ * @payld_sz: Effective size of the embedded message payload which follows
+ * @evt_id: Event ID (corresponds to the Event MsgID for this Protocol)
+ * @payld: A reference to the embedded event payload
+ *
+ * This header is prepended to each received event message payload before
+ * queueing it on the related &struct events_queue.
+ */
+struct scmi_event_header {
+ ktime_t timestamp;
+ size_t payld_sz;
+ unsigned char evt_id;
+ unsigned char payld[];
+};
+
+struct scmi_registered_event;
+
+/**
+ * struct scmi_registered_events_desc - Protocol Specific information
+ * @id: Protocol ID
+ * @ops: Protocol specific and event-related operations
+ * @equeue: The embedded per-protocol events_queue
+ * @ni: A reference to the initialized instance descriptor
+ * @eh: A reference to pre-allocated buffer to be used as a scratch area by the
+ * deferred worker when fetching data from the kfifo
+ * @eh_sz: Size of the pre-allocated buffer @eh
+ * @in_flight: A reference to an in flight &struct scmi_registered_event
+ * @num_events: Number of events in @registered_events
+ * @registered_events: A dynamically allocated array holding all the registered
+ * events' descriptors, whose fixed-size is determined at
+ * compile time.
+ * @registered_mtx: A mutex to protect @registered_events_handlers
+ * @ph: SCMI protocol handle reference
+ * @registered_events_handlers: An hashtable containing all events' handlers
+ * descriptors registered for this protocol
+ *
+ * All protocols that register at least one event have their protocol-specific
+ * information stored here, together with the embedded allocated events_queue.
+ * These descriptors are stored in the @registered_protocols array at protocol
+ * registration time.
+ *
+ * Once these descriptors are successfully registered, they are NEVER again
+ * removed or modified since protocols do not unregister ever, so that, once
+ * we safely grab a NON-NULL reference from the array we can keep it and use it.
+ */
+struct scmi_registered_events_desc {
+ u8 id;
+ const struct scmi_event_ops *ops;
+ struct events_queue equeue;
+ struct scmi_notify_instance *ni;
+ struct scmi_event_header *eh;
+ size_t eh_sz;
+ void *in_flight;
+ int num_events;
+ struct scmi_registered_event **registered_events;
+ /* mutex to protect registered_events_handlers */
+ struct mutex registered_mtx;
+ const struct scmi_protocol_handle *ph;
+ DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ);
+};
+
+/**
+ * struct scmi_registered_event - Event Specific Information
+ * @proto: A reference to the associated protocol descriptor
+ * @evt: A reference to the associated event descriptor (as provided at
+ * registration time)
+ * @report: A pre-allocated buffer used by the deferred worker to fill a
+ * customized event report
+ * @num_sources: The number of possible sources for this event as stated at
+ * events' registration time
+ * @sources: A reference to a dynamically allocated array used to refcount the
+ * events' enable requests for all the existing sources
+ * @sources_mtx: A mutex to serialize the access to @sources
+ *
+ * All registered events are represented by one of these structures that are
+ * stored in the @registered_events array at protocol registration time.
+ *
+ * Once these descriptors are successfully registered, they are NEVER again
+ * removed or modified since protocols do not unregister ever, so that once we
+ * safely grab a NON-NULL reference from the table we can keep it and use it.
+ */
+struct scmi_registered_event {
+ struct scmi_registered_events_desc *proto;
+ const struct scmi_event *evt;
+ void *report;
+ u32 num_sources;
+ refcount_t *sources;
+ /* locking to serialize the access to sources */
+ struct mutex sources_mtx;
+};
+
+/**
+ * struct scmi_event_handler - Event handler information
+ * @key: The used hashkey
+ * @users: A reference count for number of active users for this handler
+ * @r_evt: A reference to the associated registered event; when this is NULL
+ * this handler is pending, which means that identifies a set of
+ * callbacks intended to be attached to an event which is still not
+ * known nor registered by any protocol at that point in time
+ * @chain: The notification chain dedicated to this specific event tuple
+ * @hash: The hlist_node used for collision handling
+ * @enabled: A boolean which records if event's generation has been already
+ * enabled for this handler as a whole
+ *
+ * This structure collects all the information needed to process a received
+ * event identified by the tuple (proto_id, evt_id, src_id).
+ * These descriptors are stored in a per-protocol @registered_events_handlers
+ * table using as a key a value derived from that tuple.
+ */
+struct scmi_event_handler {
+ u32 key;
+ refcount_t users;
+ struct scmi_registered_event *r_evt;
+ struct blocking_notifier_head chain;
+ struct hlist_node hash;
+ bool enabled;
+};
+
+#define IS_HNDL_PENDING(hndl) (!(hndl)->r_evt)
+
+static struct scmi_event_handler *
+scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key);
+static void scmi_put_active_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl);
+static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl);
+
+/**
+ * scmi_lookup_and_call_event_chain() - Lookup the proper chain and call it
+ * @ni: A reference to the notification instance to use
+ * @evt_key: The key to use to lookup the related notification chain
+ * @report: The customized event-specific report to pass down to the callbacks
+ * as their *data parameter.
+ */
+static inline void
+scmi_lookup_and_call_event_chain(struct scmi_notify_instance *ni,
+ u32 evt_key, void *report)
+{
+ int ret;
+ struct scmi_event_handler *hndl;
+
+ /*
+ * Here ensure the event handler cannot vanish while using it.
+ * It is legitimate, though, for an handler not to be found at all here,
+ * e.g. when it has been unregistered by the user after some events had
+ * already been queued.
+ */
+ hndl = scmi_get_active_handler(ni, evt_key);
+ if (!hndl)
+ return;
+
+ ret = blocking_notifier_call_chain(&hndl->chain,
+ KEY_XTRACT_EVT_ID(evt_key),
+ report);
+ /* Notifiers are NOT supposed to cut the chain ... */
+ WARN_ON_ONCE(ret & NOTIFY_STOP_MASK);
+
+ scmi_put_active_handler(ni, hndl);
+}
+
+/**
+ * scmi_process_event_header() - Dequeue and process an event header
+ * @eq: The queue to use
+ * @pd: The protocol descriptor to use
+ *
+ * Read an event header from the protocol queue into the dedicated scratch
+ * buffer and looks for a matching registered event; in case an anomalously
+ * sized read is detected just flush the queue.
+ *
+ * Return:
+ * * a reference to the matching registered event when found
+ * * ERR_PTR(-EINVAL) when NO registered event could be found
+ * * NULL when the queue is empty
+ */
+static inline struct scmi_registered_event *
+scmi_process_event_header(struct events_queue *eq,
+ struct scmi_registered_events_desc *pd)
+{
+ unsigned int outs;
+ struct scmi_registered_event *r_evt;
+
+ outs = kfifo_out(&eq->kfifo, pd->eh,
+ sizeof(struct scmi_event_header));
+ if (!outs)
+ return NULL;
+ if (outs != sizeof(struct scmi_event_header)) {
+ dev_err(pd->ni->handle->dev, "corrupted EVT header. Flush.\n");
+ kfifo_reset_out(&eq->kfifo);
+ return NULL;
+ }
+
+ r_evt = SCMI_GET_REVT_FROM_PD(pd, pd->eh->evt_id);
+ if (!r_evt)
+ r_evt = ERR_PTR(-EINVAL);
+
+ return r_evt;
+}
+
+/**
+ * scmi_process_event_payload() - Dequeue and process an event payload
+ * @eq: The queue to use
+ * @pd: The protocol descriptor to use
+ * @r_evt: The registered event descriptor to use
+ *
+ * Read an event payload from the protocol queue into the dedicated scratch
+ * buffer, fills a custom report and then look for matching event handlers and
+ * call them; skip any unknown event (as marked by scmi_process_event_header())
+ * and in case an anomalously sized read is detected just flush the queue.
+ *
+ * Return: False when the queue is empty
+ */
+static inline bool
+scmi_process_event_payload(struct events_queue *eq,
+ struct scmi_registered_events_desc *pd,
+ struct scmi_registered_event *r_evt)
+{
+ u32 src_id, key;
+ unsigned int outs;
+ void *report = NULL;
+
+ outs = kfifo_out(&eq->kfifo, pd->eh->payld, pd->eh->payld_sz);
+ if (!outs)
+ return false;
+
+ /* Any in-flight event has now been officially processed */
+ pd->in_flight = NULL;
+
+ if (outs != pd->eh->payld_sz) {
+ dev_err(pd->ni->handle->dev, "corrupted EVT Payload. Flush.\n");
+ kfifo_reset_out(&eq->kfifo);
+ return false;
+ }
+
+ if (IS_ERR(r_evt)) {
+ dev_warn(pd->ni->handle->dev,
+ "SKIP UNKNOWN EVT - proto:%X evt:%d\n",
+ pd->id, pd->eh->evt_id);
+ return true;
+ }
+
+ report = REVT_FILL_REPORT(r_evt, pd->eh->evt_id, pd->eh->timestamp,
+ pd->eh->payld, pd->eh->payld_sz,
+ r_evt->report, &src_id);
+ if (!report) {
+ dev_err(pd->ni->handle->dev,
+ "report not available - proto:%X evt:%d\n",
+ pd->id, pd->eh->evt_id);
+ return true;
+ }
+
+ /* At first search for a generic ALL src_ids handler... */
+ key = MAKE_ALL_SRCS_KEY(pd->id, pd->eh->evt_id);
+ scmi_lookup_and_call_event_chain(pd->ni, key, report);
+
+ /* ...then search for any specific src_id */
+ key = MAKE_HASH_KEY(pd->id, pd->eh->evt_id, src_id);
+ scmi_lookup_and_call_event_chain(pd->ni, key, report);
+
+ return true;
+}
+
+/**
+ * scmi_events_dispatcher() - Common worker logic for all work items.
+ * @work: The work item to use, which is associated to a dedicated events_queue
+ *
+ * Logic:
+ * 1. dequeue one pending RX notification (queued in SCMI RX ISR context)
+ * 2. generate a custom event report from the received event message
+ * 3. lookup for any registered ALL_SRC_IDs handler:
+ * - > call the related notification chain passing in the report
+ * 4. lookup for any registered specific SRC_ID handler:
+ * - > call the related notification chain passing in the report
+ *
+ * Note that:
+ * * a dedicated per-protocol kfifo queue is used: in this way an anomalous
+ * flood of events cannot saturate other protocols' queues.
+ * * each per-protocol queue is associated to a distinct work_item, which
+ * means, in turn, that:
+ * + all protocols can process their dedicated queues concurrently
+ * (since notify_wq:max_active != 1)
+ * + anyway at most one worker instance is allowed to run on the same queue
+ * concurrently: this ensures that we can have only one concurrent
+ * reader/writer on the associated kfifo, so that we can use it lock-less
+ *
+ * Context: Process context.
+ */
+static void scmi_events_dispatcher(struct work_struct *work)
+{
+ struct events_queue *eq;
+ struct scmi_registered_events_desc *pd;
+ struct scmi_registered_event *r_evt;
+
+ eq = container_of(work, struct events_queue, notify_work);
+ pd = container_of(eq, struct scmi_registered_events_desc, equeue);
+ /*
+ * In order to keep the queue lock-less and the number of memcopies
+ * to the bare minimum needed, the dispatcher accounts for the
+ * possibility of per-protocol in-flight events: i.e. an event whose
+ * reception could end up being split across two subsequent runs of this
+ * worker, first the header, then the payload.
+ */
+ do {
+ if (!pd->in_flight) {
+ r_evt = scmi_process_event_header(eq, pd);
+ if (!r_evt)
+ break;
+ pd->in_flight = r_evt;
+ } else {
+ r_evt = pd->in_flight;
+ }
+ } while (scmi_process_event_payload(eq, pd, r_evt));
+}
+
+/**
+ * scmi_notify() - Queues a notification for further deferred processing
+ * @handle: The handle identifying the platform instance from which the
+ * dispatched event is generated
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID (msgID)
+ * @buf: Event Message Payload (without the header)
+ * @len: Event Message Payload size
+ * @ts: RX Timestamp in nanoseconds (boottime)
+ *
+ * Context: Called in interrupt context to queue a received event for
+ * deferred processing.
+ *
+ * Return: 0 on Success
+ */
+int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
+ const void *buf, size_t len, ktime_t ts)
+{
+ struct scmi_registered_event *r_evt;
+ struct scmi_event_header eh;
+ struct scmi_notify_instance *ni;
+
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
+ return 0;
+
+ r_evt = SCMI_GET_REVT(ni, proto_id, evt_id);
+ if (!r_evt)
+ return -EINVAL;
+
+ if (len > r_evt->evt->max_payld_sz) {
+ dev_err(handle->dev, "discard badly sized message\n");
+ return -EINVAL;
+ }
+ if (kfifo_avail(&r_evt->proto->equeue.kfifo) < sizeof(eh) + len) {
+ dev_warn(handle->dev,
+ "queue full, dropping proto_id:%d evt_id:%d ts:%lld\n",
+ proto_id, evt_id, ktime_to_ns(ts));
+ return -ENOMEM;
+ }
+
+ eh.timestamp = ts;
+ eh.evt_id = evt_id;
+ eh.payld_sz = len;
+ /*
+ * Header and payload are enqueued with two distinct kfifo_in() (so non
+ * atomic), but this situation is handled properly on the consumer side
+ * with in-flight events tracking.
+ */
+ kfifo_in(&r_evt->proto->equeue.kfifo, &eh, sizeof(eh));
+ kfifo_in(&r_evt->proto->equeue.kfifo, buf, len);
+ /*
+ * Don't care about return value here since we just want to ensure that
+ * a work is queued all the times whenever some items have been pushed
+ * on the kfifo:
+ * - if work was already queued it will simply fail to queue a new one
+ * since it is not needed
+ * - if work was not queued already it will be now, even in case work
+ * was in fact already running: this behavior avoids any possible race
+ * when this function pushes new items onto the kfifos after the
+ * related executing worker had already determined the kfifo to be
+ * empty and it was terminating.
+ */
+ queue_work(r_evt->proto->equeue.wq,
+ &r_evt->proto->equeue.notify_work);
+
+ return 0;
+}
+
+/**
+ * scmi_kfifo_free() - Devres action helper to free the kfifo
+ * @kfifo: The kfifo to free
+ */
+static void scmi_kfifo_free(void *kfifo)
+{
+ kfifo_free((struct kfifo *)kfifo);
+}
+
+/**
+ * scmi_initialize_events_queue() - Allocate/Initialize a kfifo buffer
+ * @ni: A reference to the notification instance to use
+ * @equeue: The events_queue to initialize
+ * @sz: Size of the kfifo buffer to allocate
+ *
+ * Allocate a buffer for the kfifo and initialize it.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_initialize_events_queue(struct scmi_notify_instance *ni,
+ struct events_queue *equeue, size_t sz)
+{
+ int ret;
+
+ if (kfifo_alloc(&equeue->kfifo, sz, GFP_KERNEL))
+ return -ENOMEM;
+ /* Size could have been roundup to power-of-two */
+ equeue->sz = kfifo_size(&equeue->kfifo);
+
+ ret = devm_add_action_or_reset(ni->handle->dev, scmi_kfifo_free,
+ &equeue->kfifo);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&equeue->notify_work, scmi_events_dispatcher);
+ equeue->wq = ni->notify_wq;
+
+ return ret;
+}
+
+/**
+ * scmi_allocate_registered_events_desc() - Allocate a registered events'
+ * descriptor
+ * @ni: A reference to the &struct scmi_notify_instance notification instance
+ * to use
+ * @proto_id: Protocol ID
+ * @queue_sz: Size of the associated queue to allocate
+ * @eh_sz: Size of the event header scratch area to pre-allocate
+ * @num_events: Number of events to support (size of @registered_events)
+ * @ops: Pointer to a struct holding references to protocol specific helpers
+ * needed during events handling
+ *
+ * It is supposed to be called only once for each protocol at protocol
+ * initialization time, so it warns if the requested protocol is found already
+ * registered.
+ *
+ * Return: The allocated and registered descriptor on Success
+ */
+static struct scmi_registered_events_desc *
+scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni,
+ u8 proto_id, size_t queue_sz, size_t eh_sz,
+ int num_events,
+ const struct scmi_event_ops *ops)
+{
+ int ret;
+ struct scmi_registered_events_desc *pd;
+
+ /* Ensure protocols are up to date */
+ smp_rmb();
+ if (WARN_ON(ni->registered_protocols[proto_id]))
+ return ERR_PTR(-EINVAL);
+
+ pd = devm_kzalloc(ni->handle->dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+ pd->id = proto_id;
+ pd->ops = ops;
+ pd->ni = ni;
+
+ ret = scmi_initialize_events_queue(ni, &pd->equeue, queue_sz);
+ if (ret)
+ return ERR_PTR(ret);
+
+ pd->eh = devm_kzalloc(ni->handle->dev, eh_sz, GFP_KERNEL);
+ if (!pd->eh)
+ return ERR_PTR(-ENOMEM);
+ pd->eh_sz = eh_sz;
+
+ pd->registered_events = devm_kcalloc(ni->handle->dev, num_events,
+ sizeof(char *), GFP_KERNEL);
+ if (!pd->registered_events)
+ return ERR_PTR(-ENOMEM);
+ pd->num_events = num_events;
+
+ /* Initialize per protocol handlers table */
+ mutex_init(&pd->registered_mtx);
+ hash_init(pd->registered_events_handlers);
+
+ return pd;
+}
+
+/**
+ * scmi_register_protocol_events() - Register Protocol Events with the core
+ * @handle: The handle identifying the platform instance against which the
+ * protocol's events are registered
+ * @proto_id: Protocol ID
+ * @ph: SCMI protocol handle.
+ * @ee: A structure describing the events supported by this protocol.
+ *
+ * Used by SCMI Protocols initialization code to register with the notification
+ * core the list of supported events and their descriptors: takes care to
+ * pre-allocate and store all needed descriptors, scratch buffers and event
+ * queues.
+ *
+ * Return: 0 on Success
+ */
+int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
+ const struct scmi_protocol_handle *ph,
+ const struct scmi_protocol_events *ee)
+{
+ int i;
+ unsigned int num_sources;
+ size_t payld_sz = 0;
+ struct scmi_registered_events_desc *pd;
+ struct scmi_notify_instance *ni;
+ const struct scmi_event *evt;
+
+ if (!ee || !ee->ops || !ee->evts || !ph ||
+ (!ee->num_sources && !ee->ops->get_num_sources))
+ return -EINVAL;
+
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
+ return -ENOMEM;
+
+ /* num_sources cannot be <= 0 */
+ if (ee->num_sources) {
+ num_sources = ee->num_sources;
+ } else {
+ int nsrc = ee->ops->get_num_sources(ph);
+
+ if (nsrc <= 0)
+ return -EINVAL;
+ num_sources = nsrc;
+ }
+
+ evt = ee->evts;
+ for (i = 0; i < ee->num_events; i++)
+ payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz);
+ payld_sz += sizeof(struct scmi_event_header);
+
+ pd = scmi_allocate_registered_events_desc(ni, proto_id, ee->queue_sz,
+ payld_sz, ee->num_events,
+ ee->ops);
+ if (IS_ERR(pd))
+ return PTR_ERR(pd);
+
+ pd->ph = ph;
+ for (i = 0; i < ee->num_events; i++, evt++) {
+ struct scmi_registered_event *r_evt;
+
+ r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt),
+ GFP_KERNEL);
+ if (!r_evt)
+ return -ENOMEM;
+ r_evt->proto = pd;
+ r_evt->evt = evt;
+
+ r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources,
+ sizeof(refcount_t), GFP_KERNEL);
+ if (!r_evt->sources)
+ return -ENOMEM;
+ r_evt->num_sources = num_sources;
+ mutex_init(&r_evt->sources_mtx);
+
+ r_evt->report = devm_kzalloc(ni->handle->dev,
+ evt->max_report_sz, GFP_KERNEL);
+ if (!r_evt->report)
+ return -ENOMEM;
+
+ pd->registered_events[i] = r_evt;
+ /* Ensure events are updated */
+ smp_wmb();
+ dev_dbg(handle->dev, "registered event - %lX\n",
+ MAKE_ALL_SRCS_KEY(r_evt->proto->id, r_evt->evt->id));
+ }
+
+ /* Register protocol and events...it will never be removed */
+ ni->registered_protocols[proto_id] = pd;
+ /* Ensure protocols are updated */
+ smp_wmb();
+
+ /*
+ * Finalize any pending events' handler which could have been waiting
+ * for this protocol's events registration.
+ */
+ schedule_work(&ni->init_work);
+
+ return 0;
+}
+
+/**
+ * scmi_deregister_protocol_events - Deregister protocol events with the core
+ * @handle: The handle identifying the platform instance against which the
+ * protocol's events are registered
+ * @proto_id: Protocol ID
+ */
+void scmi_deregister_protocol_events(const struct scmi_handle *handle,
+ u8 proto_id)
+{
+ struct scmi_notify_instance *ni;
+ struct scmi_registered_events_desc *pd;
+
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
+ return;
+
+ pd = ni->registered_protocols[proto_id];
+ if (!pd)
+ return;
+
+ ni->registered_protocols[proto_id] = NULL;
+ /* Ensure protocols are updated */
+ smp_wmb();
+
+ cancel_work_sync(&pd->equeue.notify_work);
+}
+
+/**
+ * scmi_allocate_event_handler() - Allocate Event handler
+ * @ni: A reference to the notification instance to use
+ * @evt_key: 32bit key uniquely bind to the event identified by the tuple
+ * (proto_id, evt_id, src_id)
+ *
+ * Allocate an event handler and related notification chain associated with
+ * the provided event handler key.
+ * Note that, at this point, a related registered_event is still to be
+ * associated to this handler descriptor (hndl->r_evt == NULL), so the handler
+ * is initialized as pending.
+ *
+ * Context: Assumes to be called with @pending_mtx already acquired.
+ * Return: the freshly allocated structure on Success
+ */
+static struct scmi_event_handler *
+scmi_allocate_event_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+ struct scmi_event_handler *hndl;
+
+ hndl = kzalloc(sizeof(*hndl), GFP_KERNEL);
+ if (!hndl)
+ return NULL;
+ hndl->key = evt_key;
+ BLOCKING_INIT_NOTIFIER_HEAD(&hndl->chain);
+ refcount_set(&hndl->users, 1);
+ /* New handlers are created pending */
+ hash_add(ni->pending_events_handlers, &hndl->hash, hndl->key);
+
+ return hndl;
+}
+
+/**
+ * scmi_free_event_handler() - Free the provided Event handler
+ * @hndl: The event handler structure to free
+ *
+ * Context: Assumes to be called with proper locking acquired depending
+ * on the situation.
+ */
+static void scmi_free_event_handler(struct scmi_event_handler *hndl)
+{
+ hash_del(&hndl->hash);
+ kfree(hndl);
+}
+
+/**
+ * scmi_bind_event_handler() - Helper to attempt binding an handler to an event
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to bind
+ *
+ * If an associated registered event is found, move the handler from the pending
+ * into the registered table.
+ *
+ * Context: Assumes to be called with @pending_mtx already acquired.
+ *
+ * Return: 0 on Success
+ */
+static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ struct scmi_registered_event *r_evt;
+
+ r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(hndl->key),
+ KEY_XTRACT_EVT_ID(hndl->key));
+ if (!r_evt)
+ return -EINVAL;
+
+ /*
+ * Remove from pending and insert into registered while getting hold
+ * of protocol instance.
+ */
+ hash_del(&hndl->hash);
+ /*
+ * Acquire protocols only for NON pending handlers, so as NOT to trigger
+ * protocol initialization when a notifier is registered against a still
+ * not registered protocol, since it would make little sense to force init
+ * protocols for which still no SCMI driver user exists: they wouldn't
+ * emit any event anyway till some SCMI driver starts using it.
+ */
+ scmi_protocol_acquire(ni->handle, KEY_XTRACT_PROTO_ID(hndl->key));
+ hndl->r_evt = r_evt;
+
+ mutex_lock(&r_evt->proto->registered_mtx);
+ hash_add(r_evt->proto->registered_events_handlers,
+ &hndl->hash, hndl->key);
+ mutex_unlock(&r_evt->proto->registered_mtx);
+
+ return 0;
+}
+
+/**
+ * scmi_valid_pending_handler() - Helper to check pending status of handlers
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to check
+ *
+ * An handler is considered pending when its r_evt == NULL, because the related
+ * event was still unknown at handler's registration time; anyway, since all
+ * protocols register their supported events once for all at protocols'
+ * initialization time, a pending handler cannot be considered valid anymore if
+ * the underlying event (which it is waiting for), belongs to an already
+ * initialized and registered protocol.
+ *
+ * Return: 0 on Success
+ */
+static inline int scmi_valid_pending_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ struct scmi_registered_events_desc *pd;
+
+ if (!IS_HNDL_PENDING(hndl))
+ return -EINVAL;
+
+ pd = SCMI_GET_PROTO(ni, KEY_XTRACT_PROTO_ID(hndl->key));
+ if (pd)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * scmi_register_event_handler() - Register whenever possible an Event handler
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to register
+ *
+ * At first try to bind an event handler to its associated event, then check if
+ * it was at least a valid pending handler: if it was not bound nor valid return
+ * false.
+ *
+ * Valid pending incomplete bindings will be periodically retried by a dedicated
+ * worker which is kicked each time a new protocol completes its own
+ * registration phase.
+ *
+ * Context: Assumes to be called with @pending_mtx acquired.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_register_event_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ int ret;
+
+ ret = scmi_bind_event_handler(ni, hndl);
+ if (!ret) {
+ dev_dbg(ni->handle->dev, "registered NEW handler - key:%X\n",
+ hndl->key);
+ } else {
+ ret = scmi_valid_pending_handler(ni, hndl);
+ if (!ret)
+ dev_dbg(ni->handle->dev,
+ "registered PENDING handler - key:%X\n",
+ hndl->key);
+ }
+
+ return ret;
+}
+
+/**
+ * __scmi_event_handler_get_ops() - Utility to get or create an event handler
+ * @ni: A reference to the notification instance to use
+ * @evt_key: The event key to use
+ * @create: A boolean flag to specify if a handler must be created when
+ * not already existent
+ *
+ * Search for the desired handler matching the key in both the per-protocol
+ * registered table and the common pending table:
+ * * if found adjust users refcount
+ * * if not found and @create is true, create and register the new handler:
+ * handler could end up being registered as pending if no matching event
+ * could be found.
+ *
+ * An handler is guaranteed to reside in one and only one of the tables at
+ * any one time; to ensure this the whole search and create is performed
+ * holding the @pending_mtx lock, with @registered_mtx additionally acquired
+ * if needed.
+ *
+ * Note that when a nested acquisition of these mutexes is needed the locking
+ * order is always (same as in @init_work):
+ * 1. pending_mtx
+ * 2. registered_mtx
+ *
+ * Events generation is NOT enabled right after creation within this routine
+ * since at creation time we usually want to have all setup and ready before
+ * events really start flowing.
+ *
+ * Return: A properly refcounted handler on Success, NULL on Failure
+ */
+static inline struct scmi_event_handler *
+__scmi_event_handler_get_ops(struct scmi_notify_instance *ni,
+ u32 evt_key, bool create)
+{
+ struct scmi_registered_event *r_evt;
+ struct scmi_event_handler *hndl = NULL;
+
+ r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
+ KEY_XTRACT_EVT_ID(evt_key));
+
+ mutex_lock(&ni->pending_mtx);
+ /* Search registered events at first ... if possible at all */
+ if (r_evt) {
+ mutex_lock(&r_evt->proto->registered_mtx);
+ hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
+ hndl, evt_key);
+ if (hndl)
+ refcount_inc(&hndl->users);
+ mutex_unlock(&r_evt->proto->registered_mtx);
+ }
+
+ /* ...then amongst pending. */
+ if (!hndl) {
+ hndl = KEY_FIND(ni->pending_events_handlers, hndl, evt_key);
+ if (hndl)
+ refcount_inc(&hndl->users);
+ }
+
+ /* Create if still not found and required */
+ if (!hndl && create) {
+ hndl = scmi_allocate_event_handler(ni, evt_key);
+ if (hndl && scmi_register_event_handler(ni, hndl)) {
+ dev_dbg(ni->handle->dev,
+ "purging UNKNOWN handler - key:%X\n",
+ hndl->key);
+ /* this hndl can be only a pending one */
+ scmi_put_handler_unlocked(ni, hndl);
+ hndl = NULL;
+ }
+ }
+ mutex_unlock(&ni->pending_mtx);
+
+ return hndl;
+}
+
+static struct scmi_event_handler *
+scmi_get_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+ return __scmi_event_handler_get_ops(ni, evt_key, false);
+}
+
+static struct scmi_event_handler *
+scmi_get_or_create_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+ return __scmi_event_handler_get_ops(ni, evt_key, true);
+}
+
+/**
+ * scmi_get_active_handler() - Helper to get active handlers only
+ * @ni: A reference to the notification instance to use
+ * @evt_key: The event key to use
+ *
+ * Search for the desired handler matching the key only in the per-protocol
+ * table of registered handlers: this is called only from the dispatching path
+ * so want to be as quick as possible and do not care about pending.
+ *
+ * Return: A properly refcounted active handler
+ */
+static struct scmi_event_handler *
+scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+ struct scmi_registered_event *r_evt;
+ struct scmi_event_handler *hndl = NULL;
+
+ r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
+ KEY_XTRACT_EVT_ID(evt_key));
+ if (r_evt) {
+ mutex_lock(&r_evt->proto->registered_mtx);
+ hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
+ hndl, evt_key);
+ if (hndl)
+ refcount_inc(&hndl->users);
+ mutex_unlock(&r_evt->proto->registered_mtx);
+ }
+
+ return hndl;
+}
+
+/**
+ * __scmi_enable_evt() - Enable/disable events generation
+ * @r_evt: The registered event to act upon
+ * @src_id: The src_id to act upon
+ * @enable: The action to perform: true->Enable, false->Disable
+ *
+ * Takes care of proper refcounting while performing enable/disable: handles
+ * the special case of ALL sources requests by itself.
+ * Returns successfully if at least one of the required src_id has been
+ * successfully enabled/disabled.
+ *
+ * Return: 0 on Success
+ */
+static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt,
+ u32 src_id, bool enable)
+{
+ int retvals = 0;
+ u32 num_sources;
+ refcount_t *sid;
+
+ if (src_id == SRC_ID_MASK) {
+ src_id = 0;
+ num_sources = r_evt->num_sources;
+ } else if (src_id < r_evt->num_sources) {
+ num_sources = 1;
+ } else {
+ return -EINVAL;
+ }
+
+ mutex_lock(&r_evt->sources_mtx);
+ if (enable) {
+ for (; num_sources; src_id++, num_sources--) {
+ int ret = 0;
+
+ sid = &r_evt->sources[src_id];
+ if (refcount_read(sid) == 0) {
+ ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id,
+ src_id);
+ if (!ret)
+ refcount_set(sid, 1);
+ } else {
+ refcount_inc(sid);
+ }
+ retvals += !ret;
+ }
+ } else {
+ for (; num_sources; src_id++, num_sources--) {
+ sid = &r_evt->sources[src_id];
+ if (refcount_dec_and_test(sid))
+ REVT_NOTIFY_DISABLE(r_evt,
+ r_evt->evt->id, src_id);
+ }
+ retvals = 1;
+ }
+ mutex_unlock(&r_evt->sources_mtx);
+
+ return retvals ? 0 : -EINVAL;
+}
+
+static int scmi_enable_events(struct scmi_event_handler *hndl)
+{
+ int ret = 0;
+
+ if (!hndl->enabled) {
+ ret = __scmi_enable_evt(hndl->r_evt,
+ KEY_XTRACT_SRC_ID(hndl->key), true);
+ if (!ret)
+ hndl->enabled = true;
+ }
+
+ return ret;
+}
+
+static int scmi_disable_events(struct scmi_event_handler *hndl)
+{
+ int ret = 0;
+
+ if (hndl->enabled) {
+ ret = __scmi_enable_evt(hndl->r_evt,
+ KEY_XTRACT_SRC_ID(hndl->key), false);
+ if (!ret)
+ hndl->enabled = false;
+ }
+
+ return ret;
+}
+
+/**
+ * scmi_put_handler_unlocked() - Put an event handler
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to act upon
+ *
+ * After having got exclusive access to the registered handlers hashtable,
+ * update the refcount and if @hndl is no more in use by anyone:
+ * * ask for events' generation disabling
+ * * unregister and free the handler itself
+ *
+ * Context: Assumes all the proper locking has been managed by the caller.
+ *
+ * Return: True if handler was freed (users dropped to zero)
+ */
+static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ bool freed = false;
+
+ if (refcount_dec_and_test(&hndl->users)) {
+ if (!IS_HNDL_PENDING(hndl))
+ scmi_disable_events(hndl);
+ scmi_free_event_handler(hndl);
+ freed = true;
+ }
+
+ return freed;
+}
+
+static void scmi_put_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ bool freed;
+ u8 protocol_id;
+ struct scmi_registered_event *r_evt = hndl->r_evt;
+
+ mutex_lock(&ni->pending_mtx);
+ if (r_evt) {
+ protocol_id = r_evt->proto->id;
+ mutex_lock(&r_evt->proto->registered_mtx);
+ }
+
+ freed = scmi_put_handler_unlocked(ni, hndl);
+
+ if (r_evt) {
+ mutex_unlock(&r_evt->proto->registered_mtx);
+ /*
+ * Only registered handler acquired protocol; must be here
+ * released only AFTER unlocking registered_mtx, since
+ * releasing a protocol can trigger its de-initialization
+ * (ie. including r_evt and registered_mtx)
+ */
+ if (freed)
+ scmi_protocol_release(ni->handle, protocol_id);
+ }
+ mutex_unlock(&ni->pending_mtx);
+}
+
+static void scmi_put_active_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ bool freed;
+ struct scmi_registered_event *r_evt = hndl->r_evt;
+ u8 protocol_id = r_evt->proto->id;
+
+ mutex_lock(&r_evt->proto->registered_mtx);
+ freed = scmi_put_handler_unlocked(ni, hndl);
+ mutex_unlock(&r_evt->proto->registered_mtx);
+ if (freed)
+ scmi_protocol_release(ni->handle, protocol_id);
+}
+
+/**
+ * scmi_event_handler_enable_events() - Enable events associated to an handler
+ * @hndl: The Event handler to act upon
+ *
+ * Return: 0 on Success
+ */
+static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl)
+{
+ if (scmi_enable_events(hndl)) {
+ pr_err("Failed to ENABLE events for key:%X !\n", hndl->key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * scmi_notifier_register() - Register a notifier_block for an event
+ * @handle: The handle identifying the platform instance against which the
+ * callback is registered
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID
+ * @src_id: Source ID, when NULL register for events coming form ALL possible
+ * sources
+ * @nb: A standard notifier block to register for the specified event
+ *
+ * Generic helper to register a notifier_block against a protocol event.
+ *
+ * A notifier_block @nb will be registered for each distinct event identified
+ * by the tuple (proto_id, evt_id, src_id) on a dedicated notification chain
+ * so that:
+ *
+ * (proto_X, evt_Y, src_Z) --> chain_X_Y_Z
+ *
+ * @src_id meaning is protocol specific and identifies the origin of the event
+ * (like domain_id, sensor_id and so forth).
+ *
+ * @src_id can be NULL to signify that the caller is interested in receiving
+ * notifications from ALL the available sources for that protocol OR simply that
+ * the protocol does not support distinct sources.
+ *
+ * As soon as one user for the specified tuple appears, an handler is created,
+ * and that specific event's generation is enabled at the platform level, unless
+ * an associated registered event is found missing, meaning that the needed
+ * protocol is still to be initialized and the handler has just been registered
+ * as still pending.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_notifier_register(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id, const u32 *src_id,
+ struct notifier_block *nb)
+{
+ int ret = 0;
+ u32 evt_key;
+ struct scmi_event_handler *hndl;
+ struct scmi_notify_instance *ni;
+
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
+ return -ENODEV;
+
+ evt_key = MAKE_HASH_KEY(proto_id, evt_id,
+ src_id ? *src_id : SRC_ID_MASK);
+ hndl = scmi_get_or_create_handler(ni, evt_key);
+ if (!hndl)
+ return -EINVAL;
+
+ blocking_notifier_chain_register(&hndl->chain, nb);
+
+ /* Enable events for not pending handlers */
+ if (!IS_HNDL_PENDING(hndl)) {
+ ret = scmi_event_handler_enable_events(hndl);
+ if (ret)
+ scmi_put_handler(ni, hndl);
+ }
+
+ return ret;
+}
+
+/**
+ * scmi_notifier_unregister() - Unregister a notifier_block for an event
+ * @handle: The handle identifying the platform instance against which the
+ * callback is unregistered
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID
+ * @src_id: Source ID
+ * @nb: The notifier_block to unregister
+ *
+ * Takes care to unregister the provided @nb from the notification chain
+ * associated to the specified event and, if there are no more users for the
+ * event handler, frees also the associated event handler structures.
+ * (this could possibly cause disabling of event's generation at platform level)
+ *
+ * Return: 0 on Success
+ */
+static int scmi_notifier_unregister(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id, const u32 *src_id,
+ struct notifier_block *nb)
+{
+ u32 evt_key;
+ struct scmi_event_handler *hndl;
+ struct scmi_notify_instance *ni;
+
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
+ return -ENODEV;
+
+ evt_key = MAKE_HASH_KEY(proto_id, evt_id,
+ src_id ? *src_id : SRC_ID_MASK);
+ hndl = scmi_get_handler(ni, evt_key);
+ if (!hndl)
+ return -EINVAL;
+
+ /*
+ * Note that this chain unregistration call is safe on its own
+ * being internally protected by an rwsem.
+ */
+ blocking_notifier_chain_unregister(&hndl->chain, nb);
+ scmi_put_handler(ni, hndl);
+
+ /*
+ * This balances the initial get issued in @scmi_notifier_register.
+ * If this notifier_block happened to be the last known user callback
+ * for this event, the handler is here freed and the event's generation
+ * stopped.
+ *
+ * Note that, an ongoing concurrent lookup on the delivery workqueue
+ * path could still hold the refcount to 1 even after this routine
+ * completes: in such a case it will be the final put on the delivery
+ * path which will finally free this unused handler.
+ */
+ scmi_put_handler(ni, hndl);
+
+ return 0;
+}
+
+struct scmi_notifier_devres {
+ const struct scmi_handle *handle;
+ u8 proto_id;
+ u8 evt_id;
+ u32 __src_id;
+ u32 *src_id;
+ struct notifier_block *nb;
+};
+
+static void scmi_devm_release_notifier(struct device *dev, void *res)
+{
+ struct scmi_notifier_devres *dres = res;
+
+ scmi_notifier_unregister(dres->handle, dres->proto_id, dres->evt_id,
+ dres->src_id, dres->nb);
+}
+
+/**
+ * scmi_devm_notifier_register() - Managed registration of a notifier_block
+ * for an event
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
+ * be used for devres accounting.
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID
+ * @src_id: Source ID, when NULL register for events coming form ALL possible
+ * sources
+ * @nb: A standard notifier block to register for the specified event
+ *
+ * Generic devres managed helper to register a notifier_block against a
+ * protocol event.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_devm_notifier_register(struct scmi_device *sdev,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
+ struct notifier_block *nb)
+{
+ int ret;
+ struct scmi_notifier_devres *dres;
+
+ dres = devres_alloc(scmi_devm_release_notifier,
+ sizeof(*dres), GFP_KERNEL);
+ if (!dres)
+ return -ENOMEM;
+
+ ret = scmi_notifier_register(sdev->handle, proto_id,
+ evt_id, src_id, nb);
+ if (ret) {
+ devres_free(dres);
+ return ret;
+ }
+
+ dres->handle = sdev->handle;
+ dres->proto_id = proto_id;
+ dres->evt_id = evt_id;
+ dres->nb = nb;
+ if (src_id) {
+ dres->__src_id = *src_id;
+ dres->src_id = &dres->__src_id;
+ } else {
+ dres->src_id = NULL;
+ }
+ devres_add(&sdev->dev, dres);
+
+ return ret;
+}
+
+static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
+{
+ struct scmi_notifier_devres *dres = res;
+ struct scmi_notifier_devres *xres = data;
+
+ if (WARN_ON(!dres || !xres))
+ return 0;
+
+ return dres->proto_id == xres->proto_id &&
+ dres->evt_id == xres->evt_id &&
+ dres->nb == xres->nb &&
+ ((!dres->src_id && !xres->src_id) ||
+ (dres->src_id && xres->src_id &&
+ dres->__src_id == xres->__src_id));
+}
+
+/**
+ * scmi_devm_notifier_unregister() - Managed un-registration of a
+ * notifier_block for an event
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
+ * be used for devres accounting.
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID
+ * @src_id: Source ID, when NULL register for events coming form ALL possible
+ * sources
+ * @nb: A standard notifier block to register for the specified event
+ *
+ * Generic devres managed helper to explicitly un-register a notifier_block
+ * against a protocol event, which was previously registered using the above
+ * @scmi_devm_notifier_register.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_devm_notifier_unregister(struct scmi_device *sdev,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
+ struct notifier_block *nb)
+{
+ int ret;
+ struct scmi_notifier_devres dres;
+
+ dres.handle = sdev->handle;
+ dres.proto_id = proto_id;
+ dres.evt_id = evt_id;
+ if (src_id) {
+ dres.__src_id = *src_id;
+ dres.src_id = &dres.__src_id;
+ } else {
+ dres.src_id = NULL;
+ }
+
+ ret = devres_release(&sdev->dev, scmi_devm_release_notifier,
+ scmi_devm_notifier_match, &dres);
+
+ WARN_ON(ret);
+
+ return ret;
+}
+
+/**
+ * scmi_protocols_late_init() - Worker for late initialization
+ * @work: The work item to use associated to the proper SCMI instance
+ *
+ * This kicks in whenever a new protocol has completed its own registration via
+ * scmi_register_protocol_events(): it is in charge of scanning the table of
+ * pending handlers (registered by users while the related protocol was still
+ * not initialized) and finalizing their initialization whenever possible;
+ * invalid pending handlers are purged at this point in time.
+ */
+static void scmi_protocols_late_init(struct work_struct *work)
+{
+ int bkt;
+ struct scmi_event_handler *hndl;
+ struct scmi_notify_instance *ni;
+ struct hlist_node *tmp;
+
+ ni = container_of(work, struct scmi_notify_instance, init_work);
+
+ /* Ensure protocols and events are up to date */
+ smp_rmb();
+
+ mutex_lock(&ni->pending_mtx);
+ hash_for_each_safe(ni->pending_events_handlers, bkt, tmp, hndl, hash) {
+ int ret;
+
+ ret = scmi_bind_event_handler(ni, hndl);
+ if (!ret) {
+ dev_dbg(ni->handle->dev,
+ "finalized PENDING handler - key:%X\n",
+ hndl->key);
+ ret = scmi_event_handler_enable_events(hndl);
+ if (ret) {
+ dev_dbg(ni->handle->dev,
+ "purging INVALID handler - key:%X\n",
+ hndl->key);
+ scmi_put_active_handler(ni, hndl);
+ }
+ } else {
+ ret = scmi_valid_pending_handler(ni, hndl);
+ if (ret) {
+ dev_dbg(ni->handle->dev,
+ "purging PENDING handler - key:%X\n",
+ hndl->key);
+ /* this hndl can be only a pending one */
+ scmi_put_handler_unlocked(ni, hndl);
+ }
+ }
+ }
+ mutex_unlock(&ni->pending_mtx);
+}
+
+/*
+ * notify_ops are attached to the handle so that can be accessed
+ * directly from an scmi_driver to register its own notifiers.
+ */
+static const struct scmi_notify_ops notify_ops = {
+ .devm_event_notifier_register = scmi_devm_notifier_register,
+ .devm_event_notifier_unregister = scmi_devm_notifier_unregister,
+ .event_notifier_register = scmi_notifier_register,
+ .event_notifier_unregister = scmi_notifier_unregister,
+};
+
+/**
+ * scmi_notification_init() - Initializes Notification Core Support
+ * @handle: The handle identifying the platform instance to initialize
+ *
+ * This function lays out all the basic resources needed by the notification
+ * core instance identified by the provided handle: once done, all of the
+ * SCMI Protocols can register their events with the core during their own
+ * initializations.
+ *
+ * Note that failing to initialize the core notifications support does not
+ * cause the whole SCMI Protocols stack to fail its initialization.
+ *
+ * SCMI Notification Initialization happens in 2 steps:
+ * * initialization: basic common allocations (this function)
+ * * registration: protocols asynchronously come into life and registers their
+ * own supported list of events with the core; this causes
+ * further per-protocol allocations
+ *
+ * Any user's callback registration attempt, referring a still not registered
+ * event, will be registered as pending and finalized later (if possible)
+ * by scmi_protocols_late_init() work.
+ * This allows for lazy initialization of SCMI Protocols due to late (or
+ * missing) SCMI drivers' modules loading.
+ *
+ * Return: 0 on Success
+ */
+int scmi_notification_init(struct scmi_handle *handle)
+{
+ void *gid;
+ struct scmi_notify_instance *ni;
+
+ gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
+ if (!gid)
+ return -ENOMEM;
+
+ ni = devm_kzalloc(handle->dev, sizeof(*ni), GFP_KERNEL);
+ if (!ni)
+ goto err;
+
+ ni->gid = gid;
+ ni->handle = handle;
+
+ ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,
+ sizeof(char *), GFP_KERNEL);
+ if (!ni->registered_protocols)
+ goto err;
+
+ ni->notify_wq = alloc_workqueue(dev_name(handle->dev),
+ WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
+ 0);
+ if (!ni->notify_wq)
+ goto err;
+
+ mutex_init(&ni->pending_mtx);
+ hash_init(ni->pending_events_handlers);
+
+ INIT_WORK(&ni->init_work, scmi_protocols_late_init);
+
+ scmi_notification_instance_data_set(handle, ni);
+ handle->notify_ops = &notify_ops;
+ /* Ensure handle is up to date */
+ smp_wmb();
+
+ dev_info(handle->dev, "Core Enabled.\n");
+
+ devres_close_group(handle->dev, ni->gid);
+
+ return 0;
+
+err:
+ dev_warn(handle->dev, "Initialization Failed.\n");
+ devres_release_group(handle->dev, gid);
+ return -ENOMEM;
+}
+
+/**
+ * scmi_notification_exit() - Shutdown and clean Notification core
+ * @handle: The handle identifying the platform instance to shutdown
+ */
+void scmi_notification_exit(struct scmi_handle *handle)
+{
+ struct scmi_notify_instance *ni;
+
+ ni = scmi_notification_instance_data_get(handle);
+ if (!ni)
+ return;
+ scmi_notification_instance_data_set(handle, NULL);
+
+ /* Destroy while letting pending work complete */
+ destroy_workqueue(ni->notify_wq);
+
+ devres_release_group(ni->handle->dev, ni->gid);
+}
diff --git a/drivers/firmware/arm_scmi/notify.h b/drivers/firmware/arm_scmi/notify.h
new file mode 100644
index 0000000000..4e9b627edf
--- /dev/null
+++ b/drivers/firmware/arm_scmi/notify.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * notification header file containing some definitions, structures
+ * and function prototypes related to SCMI Notification handling.
+ *
+ * Copyright (C) 2020-2021 ARM Ltd.
+ */
+#ifndef _SCMI_NOTIFY_H
+#define _SCMI_NOTIFY_H
+
+#include <linux/device.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#define SCMI_PROTO_QUEUE_SZ 4096
+
+/**
+ * struct scmi_event - Describes an event to be supported
+ * @id: Event ID
+ * @max_payld_sz: Max possible size for the payload of a notification message
+ * @max_report_sz: Max possible size for the report of a notification message
+ *
+ * Each SCMI protocol, during its initialization phase, can describe the events
+ * it wishes to support in a few struct scmi_event and pass them to the core
+ * using scmi_register_protocol_events().
+ */
+struct scmi_event {
+ u8 id;
+ size_t max_payld_sz;
+ size_t max_report_sz;
+};
+
+struct scmi_protocol_handle;
+
+/**
+ * struct scmi_event_ops - Protocol helpers called by the notification core.
+ * @get_num_sources: Returns the number of possible events' sources for this
+ * protocol
+ * @set_notify_enabled: Enable/disable the required evt_id/src_id notifications
+ * using the proper custom protocol commands.
+ * Return 0 on Success
+ * @fill_custom_report: fills a custom event report from the provided
+ * event message payld identifying the event
+ * specific src_id.
+ * Return NULL on failure otherwise @report now fully
+ * populated
+ *
+ * Context: Helpers described in &struct scmi_event_ops are called only in
+ * process context.
+ */
+struct scmi_event_ops {
+ int (*get_num_sources)(const struct scmi_protocol_handle *ph);
+ int (*set_notify_enabled)(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enabled);
+ void *(*fill_custom_report)(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id);
+};
+
+/**
+ * struct scmi_protocol_events - Per-protocol description of available events
+ * @queue_sz: Size in bytes of the per-protocol queue to use.
+ * @ops: Array of protocol-specific events operations.
+ * @evts: Array of supported protocol's events.
+ * @num_events: Number of supported protocol's events described in @evts.
+ * @num_sources: Number of protocol's sources, should be greater than 0; if not
+ * available at compile time, it will be provided at run-time via
+ * @get_num_sources.
+ */
+struct scmi_protocol_events {
+ size_t queue_sz;
+ const struct scmi_event_ops *ops;
+ const struct scmi_event *evts;
+ unsigned int num_events;
+ unsigned int num_sources;
+};
+
+int scmi_notification_init(struct scmi_handle *handle);
+void scmi_notification_exit(struct scmi_handle *handle);
+int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
+ const struct scmi_protocol_handle *ph,
+ const struct scmi_protocol_events *ee);
+void scmi_deregister_protocol_events(const struct scmi_handle *handle,
+ u8 proto_id);
+int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
+ const void *buf, size_t len, ktime_t ts);
+
+#endif /* _SCMI_NOTIFY_H */
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
new file mode 100644
index 0000000000..e123de6e8c
--- /dev/null
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -0,0 +1,643 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+#include <uapi/linux/tee.h>
+
+#include "common.h"
+
+#define SCMI_OPTEE_MAX_MSG_SIZE 128
+
+enum scmi_optee_pta_cmd {
+ /*
+ * PTA_SCMI_CMD_CAPABILITIES - Get channel capabilities
+ *
+ * [out] value[0].a: Capability bit mask (enum pta_scmi_caps)
+ * [out] value[0].b: Extended capabilities or 0
+ */
+ PTA_SCMI_CMD_CAPABILITIES = 0,
+
+ /*
+ * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL - Process SCMI message in SMT buffer
+ *
+ * [in] value[0].a: Channel handle
+ *
+ * Shared memory used for SCMI message/response exhange is expected
+ * already identified and bound to channel handle in both SCMI agent
+ * and SCMI server (OP-TEE) parts.
+ * The memory uses SMT header to carry SCMI meta-data (protocol ID and
+ * protocol message ID).
+ */
+ PTA_SCMI_CMD_PROCESS_SMT_CHANNEL = 1,
+
+ /*
+ * PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE - Process SMT/SCMI message
+ *
+ * [in] value[0].a: Channel handle
+ * [in/out] memref[1]: Message/response buffer (SMT and SCMI payload)
+ *
+ * Shared memory used for SCMI message/response is a SMT buffer
+ * referenced by param[1]. It shall be 128 bytes large to fit response
+ * payload whatever message playload size.
+ * The memory uses SMT header to carry SCMI meta-data (protocol ID and
+ * protocol message ID).
+ */
+ PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE = 2,
+
+ /*
+ * PTA_SCMI_CMD_GET_CHANNEL - Get channel handle
+ *
+ * SCMI shm information are 0 if agent expects to use OP-TEE regular SHM
+ *
+ * [in] value[0].a: Channel identifier
+ * [out] value[0].a: Returned channel handle
+ * [in] value[0].b: Requested capabilities mask (enum pta_scmi_caps)
+ */
+ PTA_SCMI_CMD_GET_CHANNEL = 3,
+
+ /*
+ * PTA_SCMI_CMD_PROCESS_MSG_CHANNEL - Process SCMI message in a MSG
+ * buffer pointed by memref parameters
+ *
+ * [in] value[0].a: Channel handle
+ * [in] memref[1]: Message buffer (MSG and SCMI payload)
+ * [out] memref[2]: Response buffer (MSG and SCMI payload)
+ *
+ * Shared memories used for SCMI message/response are MSG buffers
+ * referenced by param[1] and param[2]. MSG transport protocol
+ * uses a 32bit header to carry SCMI meta-data (protocol ID and
+ * protocol message ID) followed by the effective SCMI message
+ * payload.
+ */
+ PTA_SCMI_CMD_PROCESS_MSG_CHANNEL = 4,
+};
+
+/*
+ * OP-TEE SCMI service capabilities bit flags (32bit)
+ *
+ * PTA_SCMI_CAPS_SMT_HEADER
+ * When set, OP-TEE supports command using SMT header protocol (SCMI shmem) in
+ * shared memory buffers to carry SCMI protocol synchronisation information.
+ *
+ * PTA_SCMI_CAPS_MSG_HEADER
+ * When set, OP-TEE supports command using MSG header protocol in an OP-TEE
+ * shared memory to carry SCMI protocol synchronisation information and SCMI
+ * message payload.
+ */
+#define PTA_SCMI_CAPS_NONE 0
+#define PTA_SCMI_CAPS_SMT_HEADER BIT(0)
+#define PTA_SCMI_CAPS_MSG_HEADER BIT(1)
+#define PTA_SCMI_CAPS_MASK (PTA_SCMI_CAPS_SMT_HEADER | \
+ PTA_SCMI_CAPS_MSG_HEADER)
+
+/**
+ * struct scmi_optee_channel - Description of an OP-TEE SCMI channel
+ *
+ * @channel_id: OP-TEE channel ID used for this transport
+ * @tee_session: TEE session identifier
+ * @caps: OP-TEE SCMI channel capabilities
+ * @rx_len: Response size
+ * @mu: Mutex protection on channel access
+ * @cinfo: SCMI channel information
+ * @shmem: Virtual base address of the shared memory
+ * @req: Shared memory protocol handle for SCMI request and synchronous response
+ * @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem
+ * @link: Reference in agent's channel list
+ */
+struct scmi_optee_channel {
+ u32 channel_id;
+ u32 tee_session;
+ u32 caps;
+ u32 rx_len;
+ struct mutex mu;
+ struct scmi_chan_info *cinfo;
+ union {
+ struct scmi_shared_mem __iomem *shmem;
+ struct scmi_msg_payld *msg;
+ } req;
+ struct tee_shm *tee_shm;
+ struct list_head link;
+};
+
+/**
+ * struct scmi_optee_agent - OP-TEE transport private data
+ *
+ * @dev: Device used for communication with TEE
+ * @tee_ctx: TEE context used for communication
+ * @caps: Supported channel capabilities
+ * @mu: Mutex for protection of @channel_list
+ * @channel_list: List of all created channels for the agent
+ */
+struct scmi_optee_agent {
+ struct device *dev;
+ struct tee_context *tee_ctx;
+ u32 caps;
+ struct mutex mu;
+ struct list_head channel_list;
+};
+
+/* There can be only 1 SCMI service in OP-TEE we connect to */
+static struct scmi_optee_agent *scmi_optee_private;
+
+/* Forward reference to scmi_optee transport initialization */
+static int scmi_optee_init(void);
+
+/* Open a session toward SCMI OP-TEE service with REE_KERNEL identity */
+static int open_session(struct scmi_optee_agent *agent, u32 *tee_session)
+{
+ struct device *dev = agent->dev;
+ struct tee_client_device *scmi_pta = to_tee_client_device(dev);
+ struct tee_ioctl_open_session_arg arg = { };
+ int ret;
+
+ memcpy(arg.uuid, scmi_pta->id.uuid.b, TEE_IOCTL_UUID_LEN);
+ arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
+
+ ret = tee_client_open_session(agent->tee_ctx, &arg, NULL);
+ if (ret < 0 || arg.ret) {
+ dev_err(dev, "Can't open tee session: %d / %#x\n", ret, arg.ret);
+ return -EOPNOTSUPP;
+ }
+
+ *tee_session = arg.session;
+
+ return 0;
+}
+
+static void close_session(struct scmi_optee_agent *agent, u32 tee_session)
+{
+ tee_client_close_session(agent->tee_ctx, tee_session);
+}
+
+static int get_capabilities(struct scmi_optee_agent *agent)
+{
+ struct tee_ioctl_invoke_arg arg = { };
+ struct tee_param param[1] = { };
+ u32 caps;
+ u32 tee_session;
+ int ret;
+
+ ret = open_session(agent, &tee_session);
+ if (ret)
+ return ret;
+
+ arg.func = PTA_SCMI_CMD_CAPABILITIES;
+ arg.session = tee_session;
+ arg.num_params = 1;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+
+ ret = tee_client_invoke_func(agent->tee_ctx, &arg, param);
+
+ close_session(agent, tee_session);
+
+ if (ret < 0 || arg.ret) {
+ dev_err(agent->dev, "Can't get capabilities: %d / %#x\n", ret, arg.ret);
+ return -EOPNOTSUPP;
+ }
+
+ caps = param[0].u.value.a;
+
+ if (!(caps & (PTA_SCMI_CAPS_SMT_HEADER | PTA_SCMI_CAPS_MSG_HEADER))) {
+ dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT and MSG\n");
+ return -EOPNOTSUPP;
+ }
+
+ agent->caps = caps;
+
+ return 0;
+}
+
+static int get_channel(struct scmi_optee_channel *channel)
+{
+ struct device *dev = scmi_optee_private->dev;
+ struct tee_ioctl_invoke_arg arg = { };
+ struct tee_param param[1] = { };
+ unsigned int caps = 0;
+ int ret;
+
+ if (channel->tee_shm)
+ caps = PTA_SCMI_CAPS_MSG_HEADER;
+ else
+ caps = PTA_SCMI_CAPS_SMT_HEADER;
+
+ arg.func = PTA_SCMI_CMD_GET_CHANNEL;
+ arg.session = channel->tee_session;
+ arg.num_params = 1;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
+ param[0].u.value.a = channel->channel_id;
+ param[0].u.value.b = caps;
+
+ ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
+
+ if (ret || arg.ret) {
+ dev_err(dev, "Can't get channel with caps %#x: %d / %#x\n", caps, ret, arg.ret);
+ return -EOPNOTSUPP;
+ }
+
+ /* From now on use channel identifer provided by OP-TEE SCMI service */
+ channel->channel_id = param[0].u.value.a;
+ channel->caps = caps;
+
+ return 0;
+}
+
+static int invoke_process_smt_channel(struct scmi_optee_channel *channel)
+{
+ struct tee_ioctl_invoke_arg arg = {
+ .func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL,
+ .session = channel->tee_session,
+ .num_params = 1,
+ };
+ struct tee_param param[1] = { };
+ int ret;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ param[0].u.value.a = channel->channel_id;
+
+ ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
+ if (ret < 0 || arg.ret) {
+ dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n",
+ channel->channel_id, ret, arg.ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t msg_size)
+{
+ struct tee_ioctl_invoke_arg arg = {
+ .func = PTA_SCMI_CMD_PROCESS_MSG_CHANNEL,
+ .session = channel->tee_session,
+ .num_params = 3,
+ };
+ struct tee_param param[3] = { };
+ int ret;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ param[0].u.value.a = channel->channel_id;
+
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+ param[1].u.memref.shm = channel->tee_shm;
+ param[1].u.memref.size = msg_size;
+
+ param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[2].u.memref.shm = channel->tee_shm;
+ param[2].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE;
+
+ ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
+ if (ret < 0 || arg.ret) {
+ dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n",
+ channel->channel_id, ret, arg.ret);
+ return -EIO;
+ }
+
+ /* Save response size */
+ channel->rx_len = param[2].u.memref.size;
+
+ return 0;
+}
+
+static int scmi_optee_link_supplier(struct device *dev)
+{
+ if (!scmi_optee_private) {
+ if (scmi_optee_init())
+ dev_dbg(dev, "Optee bus not yet ready\n");
+
+ /* Wait for optee bus */
+ return -EPROBE_DEFER;
+ }
+
+ if (!device_link_add(dev, scmi_optee_private->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) {
+ dev_err(dev, "Adding link to supplier optee device failed\n");
+ return -ECANCELED;
+ }
+
+ return 0;
+}
+
+static bool scmi_optee_chan_available(struct device_node *of_node, int idx)
+{
+ u32 channel_id;
+
+ return !of_property_read_u32_index(of_node, "linaro,optee-channel-id",
+ idx, &channel_id);
+}
+
+static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo)
+{
+ struct scmi_optee_channel *channel = cinfo->transport_info;
+
+ if (!channel->tee_shm)
+ shmem_clear_channel(channel->req.shmem);
+}
+
+static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel)
+{
+ const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE;
+ void *shbuf;
+
+ channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size);
+ if (IS_ERR(channel->tee_shm)) {
+ dev_err(channel->cinfo->dev, "shmem allocation failed\n");
+ return -ENOMEM;
+ }
+
+ shbuf = tee_shm_get_va(channel->tee_shm, 0);
+ memset(shbuf, 0, msg_size);
+ channel->req.msg = shbuf;
+ channel->rx_len = msg_size;
+
+ return 0;
+}
+
+static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo,
+ struct scmi_optee_channel *channel)
+{
+ struct device_node *np;
+ resource_size_t size;
+ struct resource res;
+ int ret;
+
+ np = of_parse_phandle(cinfo->dev->of_node, "shmem", 0);
+ if (!of_device_is_compatible(np, "arm,scmi-shmem")) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(dev, "Failed to get SCMI Tx shared memory\n");
+ goto out;
+ }
+
+ size = resource_size(&res);
+
+ channel->req.shmem = devm_ioremap(dev, res.start, size);
+ if (!channel->req.shmem) {
+ dev_err(dev, "Failed to ioremap SCMI Tx shared memory\n");
+ ret = -EADDRNOTAVAIL;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ of_node_put(np);
+
+ return ret;
+}
+
+static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo,
+ struct scmi_optee_channel *channel)
+{
+ if (of_property_present(cinfo->dev->of_node, "shmem"))
+ return setup_static_shmem(dev, cinfo, channel);
+ else
+ return setup_dynamic_shmem(dev, channel);
+}
+
+static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx)
+{
+ struct scmi_optee_channel *channel;
+ uint32_t channel_id;
+ int ret;
+
+ if (!tx)
+ return -ENODEV;
+
+ channel = devm_kzalloc(dev, sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_index(cinfo->dev->of_node, "linaro,optee-channel-id",
+ 0, &channel_id);
+ if (ret)
+ return ret;
+
+ cinfo->transport_info = channel;
+ channel->cinfo = cinfo;
+ channel->channel_id = channel_id;
+ mutex_init(&channel->mu);
+
+ ret = setup_shmem(dev, cinfo, channel);
+ if (ret)
+ return ret;
+
+ ret = open_session(scmi_optee_private, &channel->tee_session);
+ if (ret)
+ goto err_free_shm;
+
+ ret = get_channel(channel);
+ if (ret)
+ goto err_close_sess;
+
+ /* Enable polling */
+ cinfo->no_completion_irq = true;
+
+ mutex_lock(&scmi_optee_private->mu);
+ list_add(&channel->link, &scmi_optee_private->channel_list);
+ mutex_unlock(&scmi_optee_private->mu);
+
+ return 0;
+
+err_close_sess:
+ close_session(scmi_optee_private, channel->tee_session);
+err_free_shm:
+ if (channel->tee_shm)
+ tee_shm_free(channel->tee_shm);
+
+ return ret;
+}
+
+static int scmi_optee_chan_free(int id, void *p, void *data)
+{
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_optee_channel *channel = cinfo->transport_info;
+
+ mutex_lock(&scmi_optee_private->mu);
+ list_del(&channel->link);
+ mutex_unlock(&scmi_optee_private->mu);
+
+ close_session(scmi_optee_private, channel->tee_session);
+
+ if (channel->tee_shm) {
+ tee_shm_free(channel->tee_shm);
+ channel->tee_shm = NULL;
+ }
+
+ cinfo->transport_info = NULL;
+ channel->cinfo = NULL;
+
+ return 0;
+}
+
+static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_optee_channel *channel = cinfo->transport_info;
+ int ret;
+
+ mutex_lock(&channel->mu);
+
+ if (channel->tee_shm) {
+ msg_tx_prepare(channel->req.msg, xfer);
+ ret = invoke_process_msg_channel(channel, msg_command_size(xfer));
+ } else {
+ shmem_tx_prepare(channel->req.shmem, xfer, cinfo);
+ ret = invoke_process_smt_channel(channel);
+ }
+
+ if (ret)
+ mutex_unlock(&channel->mu);
+
+ return ret;
+}
+
+static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_optee_channel *channel = cinfo->transport_info;
+
+ if (channel->tee_shm)
+ msg_fetch_response(channel->req.msg, channel->rx_len, xfer);
+ else
+ shmem_fetch_response(channel->req.shmem, xfer);
+}
+
+static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+ struct scmi_xfer *__unused)
+{
+ struct scmi_optee_channel *channel = cinfo->transport_info;
+
+ mutex_unlock(&channel->mu);
+}
+
+static struct scmi_transport_ops scmi_optee_ops = {
+ .link_supplier = scmi_optee_link_supplier,
+ .chan_available = scmi_optee_chan_available,
+ .chan_setup = scmi_optee_chan_setup,
+ .chan_free = scmi_optee_chan_free,
+ .send_message = scmi_optee_send_message,
+ .mark_txdone = scmi_optee_mark_txdone,
+ .fetch_response = scmi_optee_fetch_response,
+ .clear_channel = scmi_optee_clear_channel,
+};
+
+static int scmi_optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ return ver->impl_id == TEE_IMPL_ID_OPTEE;
+}
+
+static int scmi_optee_service_probe(struct device *dev)
+{
+ struct scmi_optee_agent *agent;
+ struct tee_context *tee_ctx;
+ int ret;
+
+ /* Only one SCMI OP-TEE device allowed */
+ if (scmi_optee_private) {
+ dev_err(dev, "An SCMI OP-TEE device was already initialized: only one allowed\n");
+ return -EBUSY;
+ }
+
+ tee_ctx = tee_client_open_context(NULL, scmi_optee_ctx_match, NULL, NULL);
+ if (IS_ERR(tee_ctx))
+ return -ENODEV;
+
+ agent = devm_kzalloc(dev, sizeof(*agent), GFP_KERNEL);
+ if (!agent) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ agent->dev = dev;
+ agent->tee_ctx = tee_ctx;
+ INIT_LIST_HEAD(&agent->channel_list);
+ mutex_init(&agent->mu);
+
+ ret = get_capabilities(agent);
+ if (ret)
+ goto err;
+
+ /* Ensure agent resources are all visible before scmi_optee_private is */
+ smp_mb();
+ scmi_optee_private = agent;
+
+ return 0;
+
+err:
+ tee_client_close_context(tee_ctx);
+
+ return ret;
+}
+
+static int scmi_optee_service_remove(struct device *dev)
+{
+ struct scmi_optee_agent *agent = scmi_optee_private;
+
+ if (!scmi_optee_private)
+ return -EINVAL;
+
+ if (!list_empty(&scmi_optee_private->channel_list))
+ return -EBUSY;
+
+ /* Ensure cleared reference is visible before resources are released */
+ smp_store_mb(scmi_optee_private, NULL);
+
+ tee_client_close_context(agent->tee_ctx);
+
+ return 0;
+}
+
+static const struct tee_client_device_id scmi_optee_service_id[] = {
+ {
+ UUID_INIT(0xa8cfe406, 0xd4f5, 0x4a2e,
+ 0x9f, 0x8d, 0xa2, 0x5d, 0xc7, 0x54, 0xc0, 0x99)
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(tee, scmi_optee_service_id);
+
+static struct tee_client_driver scmi_optee_driver = {
+ .id_table = scmi_optee_service_id,
+ .driver = {
+ .name = "scmi-optee",
+ .bus = &tee_bus_type,
+ .probe = scmi_optee_service_probe,
+ .remove = scmi_optee_service_remove,
+ },
+};
+
+static int scmi_optee_init(void)
+{
+ return driver_register(&scmi_optee_driver.driver);
+}
+
+static void scmi_optee_exit(void)
+{
+ if (scmi_optee_private)
+ driver_unregister(&scmi_optee_driver.driver);
+}
+
+const struct scmi_desc scmi_optee_desc = {
+ .transport_exit = scmi_optee_exit,
+ .ops = &scmi_optee_ops,
+ .max_rx_timeout_ms = 30,
+ .max_msg = 20,
+ .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE,
+ .sync_cmds_completed_on_ret = true,
+};
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
new file mode 100644
index 0000000000..dd344506b0
--- /dev/null
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -0,0 +1,1148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Performance Protocol
+ *
+ * Copyright (C) 2018-2023 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
+
+#include <linux/bits.h>
+#include <linux/hashtable.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/scmi_protocol.h>
+#include <linux/sort.h>
+#include <linux/xarray.h>
+
+#include <trace/events/scmi.h>
+
+#include "protocols.h"
+#include "notify.h"
+
+#define MAX_OPPS 16
+
+enum scmi_performance_protocol_cmd {
+ PERF_DOMAIN_ATTRIBUTES = 0x3,
+ PERF_DESCRIBE_LEVELS = 0x4,
+ PERF_LIMITS_SET = 0x5,
+ PERF_LIMITS_GET = 0x6,
+ PERF_LEVEL_SET = 0x7,
+ PERF_LEVEL_GET = 0x8,
+ PERF_NOTIFY_LIMITS = 0x9,
+ PERF_NOTIFY_LEVEL = 0xa,
+ PERF_DESCRIBE_FASTCHANNEL = 0xb,
+ PERF_DOMAIN_NAME_GET = 0xc,
+};
+
+enum {
+ PERF_FC_LEVEL,
+ PERF_FC_LIMIT,
+ PERF_FC_MAX,
+};
+
+struct scmi_opp {
+ u32 perf;
+ u32 power;
+ u32 trans_latency_us;
+ u32 indicative_freq;
+ u32 level_index;
+ struct hlist_node hash;
+};
+
+struct scmi_msg_resp_perf_attributes {
+ __le16 num_domains;
+ __le16 flags;
+#define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0))
+#define POWER_SCALE_IN_MICROWATT(x) ((x) & BIT(1))
+ __le32 stats_addr_low;
+ __le32 stats_addr_high;
+ __le32 stats_size;
+};
+
+struct scmi_msg_resp_perf_domain_attributes {
+ __le32 flags;
+#define SUPPORTS_SET_LIMITS(x) ((x) & BIT(31))
+#define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
+#define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
+#define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
+#define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27))
+#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(26))
+#define SUPPORTS_LEVEL_INDEXING(x) ((x) & BIT(25))
+ __le32 rate_limit_us;
+ __le32 sustained_freq_khz;
+ __le32 sustained_perf_level;
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
+};
+
+struct scmi_msg_perf_describe_levels {
+ __le32 domain;
+ __le32 level_index;
+};
+
+struct scmi_perf_set_limits {
+ __le32 domain;
+ __le32 max_level;
+ __le32 min_level;
+};
+
+struct scmi_perf_get_limits {
+ __le32 max_level;
+ __le32 min_level;
+};
+
+struct scmi_perf_set_level {
+ __le32 domain;
+ __le32 level;
+};
+
+struct scmi_perf_notify_level_or_limits {
+ __le32 domain;
+ __le32 notify_enable;
+};
+
+struct scmi_perf_limits_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 range_max;
+ __le32 range_min;
+};
+
+struct scmi_perf_level_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 performance_level;
+};
+
+struct scmi_msg_resp_perf_describe_levels {
+ __le16 num_returned;
+ __le16 num_remaining;
+ struct {
+ __le32 perf_val;
+ __le32 power;
+ __le16 transition_latency_us;
+ __le16 reserved;
+ } opp[];
+};
+
+struct scmi_msg_resp_perf_describe_levels_v4 {
+ __le16 num_returned;
+ __le16 num_remaining;
+ struct {
+ __le32 perf_val;
+ __le32 power;
+ __le16 transition_latency_us;
+ __le16 reserved;
+ __le32 indicative_freq;
+ __le32 level_index;
+ } opp[];
+};
+
+struct perf_dom_info {
+ u32 id;
+ bool set_limits;
+ bool perf_limit_notify;
+ bool perf_level_notify;
+ bool perf_fastchannels;
+ bool level_indexing_mode;
+ u32 opp_count;
+ u32 sustained_freq_khz;
+ u32 sustained_perf_level;
+ unsigned long mult_factor;
+ struct scmi_perf_domain_info info;
+ struct scmi_opp opp[MAX_OPPS];
+ struct scmi_fc_info *fc_info;
+ struct xarray opps_by_idx;
+ struct xarray opps_by_lvl;
+ DECLARE_HASHTABLE(opps_by_freq, ilog2(MAX_OPPS));
+};
+
+#define LOOKUP_BY_FREQ(__htp, __freq) \
+({ \
+ /* u32 cast is needed to pick right hash func */ \
+ u32 f_ = (u32)(__freq); \
+ struct scmi_opp *_opp; \
+ \
+ hash_for_each_possible((__htp), _opp, hash, f_) \
+ if (_opp->indicative_freq == f_) \
+ break; \
+ _opp; \
+})
+
+struct scmi_perf_info {
+ u32 version;
+ u16 num_domains;
+ enum scmi_power_scale power_scale;
+ u64 stats_addr;
+ u32 stats_size;
+ struct perf_dom_info *dom_info;
+};
+
+static enum scmi_performance_protocol_cmd evt_2_cmd[] = {
+ PERF_NOTIFY_LIMITS,
+ PERF_NOTIFY_LEVEL,
+};
+
+static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_perf_info *pi)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_perf_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ u16 flags = le16_to_cpu(attr->flags);
+
+ pi->num_domains = le16_to_cpu(attr->num_domains);
+
+ if (POWER_SCALE_IN_MILLIWATT(flags))
+ pi->power_scale = SCMI_POWER_MILLIWATTS;
+ if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3)
+ if (POWER_SCALE_IN_MICROWATT(flags))
+ pi->power_scale = SCMI_POWER_MICROWATTS;
+
+ pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
+ (u64)le32_to_cpu(attr->stats_addr_high) << 32;
+ pi->stats_size = le32_to_cpu(attr->stats_size);
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static void scmi_perf_xa_destroy(void *data)
+{
+ int domain;
+ struct scmi_perf_info *pinfo = data;
+
+ for (domain = 0; domain < pinfo->num_domains; domain++) {
+ xa_destroy(&((pinfo->dom_info + domain)->opps_by_idx));
+ xa_destroy(&((pinfo->dom_info + domain)->opps_by_lvl));
+ }
+}
+
+static int
+scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ struct perf_dom_info *dom_info,
+ u32 version)
+{
+ int ret;
+ u32 flags;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_perf_domain_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, PERF_DOMAIN_ATTRIBUTES,
+ sizeof(dom_info->id), sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(dom_info->id, t->tx.buf);
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ flags = le32_to_cpu(attr->flags);
+
+ dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
+ dom_info->info.set_perf = SUPPORTS_SET_PERF_LVL(flags);
+ dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
+ dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
+ dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
+ if (PROTOCOL_REV_MAJOR(version) >= 0x4)
+ dom_info->level_indexing_mode =
+ SUPPORTS_LEVEL_INDEXING(flags);
+ dom_info->sustained_freq_khz =
+ le32_to_cpu(attr->sustained_freq_khz);
+ dom_info->sustained_perf_level =
+ le32_to_cpu(attr->sustained_perf_level);
+ if (!dom_info->sustained_freq_khz ||
+ !dom_info->sustained_perf_level ||
+ dom_info->level_indexing_mode)
+ /* CPUFreq converts to kHz, hence default 1000 */
+ dom_info->mult_factor = 1000;
+ else
+ dom_info->mult_factor =
+ (dom_info->sustained_freq_khz * 1000UL)
+ / dom_info->sustained_perf_level;
+ strscpy(dom_info->info.name, attr->name,
+ SCMI_SHORT_NAME_MAX_SIZE);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
+ SUPPORTS_EXTENDED_NAMES(flags))
+ ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET,
+ dom_info->id, dom_info->info.name,
+ SCMI_MAX_STR_SIZE);
+
+ if (dom_info->level_indexing_mode) {
+ xa_init(&dom_info->opps_by_idx);
+ xa_init(&dom_info->opps_by_lvl);
+ hash_init(dom_info->opps_by_freq);
+ }
+
+ return ret;
+}
+
+static int opp_cmp_func(const void *opp1, const void *opp2)
+{
+ const struct scmi_opp *t1 = opp1, *t2 = opp2;
+
+ return t1->perf - t2->perf;
+}
+
+struct scmi_perf_ipriv {
+ u32 version;
+ struct perf_dom_info *perf_dom;
+};
+
+static void iter_perf_levels_prepare_message(void *message,
+ unsigned int desc_index,
+ const void *priv)
+{
+ struct scmi_msg_perf_describe_levels *msg = message;
+ const struct scmi_perf_ipriv *p = priv;
+
+ msg->domain = cpu_to_le32(p->perf_dom->id);
+ /* Set the number of OPPs to be skipped/already read */
+ msg->level_index = cpu_to_le32(desc_index);
+}
+
+static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ const struct scmi_msg_resp_perf_describe_levels *r = response;
+
+ st->num_returned = le16_to_cpu(r->num_returned);
+ st->num_remaining = le16_to_cpu(r->num_remaining);
+
+ return 0;
+}
+
+static inline void
+process_response_opp(struct scmi_opp *opp, unsigned int loop_idx,
+ const struct scmi_msg_resp_perf_describe_levels *r)
+{
+ opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
+ opp->power = le32_to_cpu(r->opp[loop_idx].power);
+ opp->trans_latency_us =
+ le16_to_cpu(r->opp[loop_idx].transition_latency_us);
+}
+
+static inline void
+process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
+ struct scmi_opp *opp, unsigned int loop_idx,
+ const struct scmi_msg_resp_perf_describe_levels_v4 *r)
+{
+ opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
+ opp->power = le32_to_cpu(r->opp[loop_idx].power);
+ opp->trans_latency_us =
+ le16_to_cpu(r->opp[loop_idx].transition_latency_us);
+
+ /* Note that PERF v4 reports always five 32-bit words */
+ opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
+ if (dom->level_indexing_mode) {
+ int ret;
+
+ opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index);
+
+ ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
+ GFP_KERNEL);
+ if (ret)
+ dev_warn(dev,
+ "Failed to add opps_by_idx at %d - ret:%d\n",
+ opp->level_index, ret);
+
+ ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+ if (ret)
+ dev_warn(dev,
+ "Failed to add opps_by_lvl at %d - ret:%d\n",
+ opp->perf, ret);
+
+ hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
+ }
+}
+
+static int
+iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv)
+{
+ struct scmi_opp *opp;
+ struct scmi_perf_ipriv *p = priv;
+
+ opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
+ if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
+ process_response_opp(opp, st->loop_idx, response);
+ else
+ process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
+ response);
+ p->perf_dom->opp_count++;
+
+ dev_dbg(ph->dev, "Level %d Power %d Latency %dus Ifreq %d Index %d\n",
+ opp->perf, opp->power, opp->trans_latency_us,
+ opp->indicative_freq, opp->level_index);
+
+ return 0;
+}
+
+static int
+scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph,
+ struct perf_dom_info *perf_dom, u32 version)
+{
+ int ret;
+ void *iter;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_perf_levels_prepare_message,
+ .update_state = iter_perf_levels_update_state,
+ .process_response = iter_perf_levels_process_response,
+ };
+ struct scmi_perf_ipriv ppriv = {
+ .version = version,
+ .perf_dom = perf_dom,
+ };
+
+ iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS,
+ PERF_DESCRIBE_LEVELS,
+ sizeof(struct scmi_msg_perf_describe_levels),
+ &ppriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ ret = ph->hops->iter_response_run(iter);
+ if (ret)
+ return ret;
+
+ if (perf_dom->opp_count)
+ sort(perf_dom->opp, perf_dom->opp_count,
+ sizeof(struct scmi_opp), opp_cmp_func, NULL);
+
+ return ret;
+}
+
+static int scmi_perf_num_domains_get(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_perf_info *pi = ph->get_priv(ph);
+
+ return pi->num_domains;
+}
+
+static inline struct perf_dom_info *
+scmi_perf_domain_lookup(const struct scmi_protocol_handle *ph, u32 domain)
+{
+ struct scmi_perf_info *pi = ph->get_priv(ph);
+
+ if (domain >= pi->num_domains)
+ return ERR_PTR(-EINVAL);
+
+ return pi->dom_info + domain;
+}
+
+static const struct scmi_perf_domain_info *
+scmi_perf_info_get(const struct scmi_protocol_handle *ph, u32 domain)
+{
+ struct perf_dom_info *dom;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return ERR_PTR(-EINVAL);
+
+ return &dom->info;
+}
+
+static int scmi_perf_msg_limits_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 max_perf, u32 min_perf)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_perf_set_limits *limits;
+
+ ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_SET,
+ sizeof(*limits), 0, &t);
+ if (ret)
+ return ret;
+
+ limits = t->tx.buf;
+ limits->domain = cpu_to_le32(domain);
+ limits->max_level = cpu_to_le32(max_perf);
+ limits->min_level = cpu_to_le32(min_perf);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int __scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
+ struct perf_dom_info *dom, u32 max_perf,
+ u32 min_perf)
+{
+ if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) {
+ struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
+
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_SET,
+ dom->id, min_perf, max_perf);
+ iowrite32(max_perf, fci->set_addr);
+ iowrite32(min_perf, fci->set_addr + 4);
+ ph->hops->fastchannel_db_ring(fci->set_db);
+ return 0;
+ }
+
+ return scmi_perf_msg_limits_set(ph, dom->id, max_perf, min_perf);
+}
+
+static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 max_perf, u32 min_perf)
+{
+ struct scmi_perf_info *pi = ph->get_priv(ph);
+ struct perf_dom_info *dom;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf)
+ return -EINVAL;
+
+ if (dom->level_indexing_mode) {
+ struct scmi_opp *opp;
+
+ if (min_perf) {
+ opp = xa_load(&dom->opps_by_lvl, min_perf);
+ if (!opp)
+ return -EIO;
+
+ min_perf = opp->level_index;
+ }
+
+ if (max_perf) {
+ opp = xa_load(&dom->opps_by_lvl, max_perf);
+ if (!opp)
+ return -EIO;
+
+ max_perf = opp->level_index;
+ }
+ }
+
+ return __scmi_perf_limits_set(ph, dom, max_perf, min_perf);
+}
+
+static int scmi_perf_msg_limits_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *max_perf, u32 *min_perf)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_perf_get_limits *limits;
+
+ ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_GET,
+ sizeof(__le32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain, t->tx.buf);
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ limits = t->rx.buf;
+
+ *max_perf = le32_to_cpu(limits->max_level);
+ *min_perf = le32_to_cpu(limits->min_level);
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int __scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
+ struct perf_dom_info *dom, u32 *max_perf,
+ u32 *min_perf)
+{
+ if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) {
+ struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
+
+ *max_perf = ioread32(fci->get_addr);
+ *min_perf = ioread32(fci->get_addr + 4);
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_GET,
+ dom->id, *min_perf, *max_perf);
+ return 0;
+ }
+
+ return scmi_perf_msg_limits_get(ph, dom->id, max_perf, min_perf);
+}
+
+static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *max_perf, u32 *min_perf)
+{
+ int ret;
+ struct perf_dom_info *dom;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ ret = __scmi_perf_limits_get(ph, dom, max_perf, min_perf);
+ if (ret)
+ return ret;
+
+ if (dom->level_indexing_mode) {
+ struct scmi_opp *opp;
+
+ opp = xa_load(&dom->opps_by_idx, *min_perf);
+ if (!opp)
+ return -EIO;
+
+ *min_perf = opp->perf;
+
+ opp = xa_load(&dom->opps_by_idx, *max_perf);
+ if (!opp)
+ return -EIO;
+
+ *max_perf = opp->perf;
+ }
+
+ return 0;
+}
+
+static int scmi_perf_msg_level_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 level, bool poll)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_perf_set_level *lvl;
+
+ ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_SET, sizeof(*lvl), 0, &t);
+ if (ret)
+ return ret;
+
+ t->hdr.poll_completion = poll;
+ lvl = t->tx.buf;
+ lvl->domain = cpu_to_le32(domain);
+ lvl->level = cpu_to_le32(level);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int __scmi_perf_level_set(const struct scmi_protocol_handle *ph,
+ struct perf_dom_info *dom, u32 level,
+ bool poll)
+{
+ if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) {
+ struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL];
+
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_SET,
+ dom->id, level, 0);
+ iowrite32(level, fci->set_addr);
+ ph->hops->fastchannel_db_ring(fci->set_db);
+ return 0;
+ }
+
+ return scmi_perf_msg_level_set(ph, dom->id, level, poll);
+}
+
+static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 level, bool poll)
+{
+ struct perf_dom_info *dom;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ if (dom->level_indexing_mode) {
+ struct scmi_opp *opp;
+
+ opp = xa_load(&dom->opps_by_lvl, level);
+ if (!opp)
+ return -EIO;
+
+ level = opp->level_index;
+ }
+
+ return __scmi_perf_level_set(ph, dom, level, poll);
+}
+
+static int scmi_perf_msg_level_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *level, bool poll)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_GET,
+ sizeof(u32), sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ t->hdr.poll_completion = poll;
+ put_unaligned_le32(domain, t->tx.buf);
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *level = get_unaligned_le32(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int __scmi_perf_level_get(const struct scmi_protocol_handle *ph,
+ struct perf_dom_info *dom, u32 *level,
+ bool poll)
+{
+ if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) {
+ *level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr);
+ trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_GET,
+ dom->id, *level, 0);
+ return 0;
+ }
+
+ return scmi_perf_msg_level_get(ph, dom->id, level, poll);
+}
+
+static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *level, bool poll)
+{
+ int ret;
+ struct perf_dom_info *dom;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ ret = __scmi_perf_level_get(ph, dom, level, poll);
+ if (ret)
+ return ret;
+
+ if (dom->level_indexing_mode) {
+ struct scmi_opp *opp;
+
+ opp = xa_load(&dom->opps_by_idx, *level);
+ if (!opp)
+ return -EIO;
+
+ *level = opp->perf;
+ }
+
+ return 0;
+}
+
+static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph,
+ u32 domain, int message_id,
+ bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_perf_notify_level_or_limits *notify;
+
+ ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
+ u32 domain, struct scmi_fc_info **p_fc)
+{
+ struct scmi_fc_info *fc;
+
+ fc = devm_kcalloc(ph->dev, PERF_FC_MAX, sizeof(*fc), GFP_KERNEL);
+ if (!fc)
+ return;
+
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LEVEL_SET, 4, domain,
+ &fc[PERF_FC_LEVEL].set_addr,
+ &fc[PERF_FC_LEVEL].set_db);
+
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LEVEL_GET, 4, domain,
+ &fc[PERF_FC_LEVEL].get_addr, NULL);
+
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LIMITS_SET, 8, domain,
+ &fc[PERF_FC_LIMIT].set_addr,
+ &fc[PERF_FC_LIMIT].set_db);
+
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LIMITS_GET, 8, domain,
+ &fc[PERF_FC_LIMIT].get_addr, NULL);
+
+ *p_fc = fc;
+}
+
+/* Device specific ops */
+static int scmi_dev_domain_id(struct device *dev)
+{
+ struct of_phandle_args clkspec;
+
+ if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
+ 0, &clkspec))
+ return -EINVAL;
+
+ return clkspec.args[0];
+}
+
+static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
+ struct device *dev)
+{
+ int idx, ret, domain;
+ unsigned long freq;
+ struct perf_dom_info *dom;
+
+ domain = scmi_dev_domain_id(dev);
+ if (domain < 0)
+ return -EINVAL;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ for (idx = 0; idx < dom->opp_count; idx++) {
+ if (!dom->level_indexing_mode)
+ freq = dom->opp[idx].perf * dom->mult_factor;
+ else
+ freq = dom->opp[idx].indicative_freq * dom->mult_factor;
+
+ ret = dev_pm_opp_add(dev, freq, 0);
+ if (ret) {
+ dev_warn(dev, "failed to add opp %luHz\n", freq);
+ dev_pm_opp_remove_all_dynamic(dev);
+ return ret;
+ }
+
+ dev_dbg(dev, "[%d][%s]:: Registered OPP[%d] %lu\n",
+ domain, dom->info.name, idx, freq);
+ }
+ return 0;
+}
+
+static int
+scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
+ struct device *dev)
+{
+ int domain;
+ struct perf_dom_info *dom;
+
+ domain = scmi_dev_domain_id(dev);
+ if (domain < 0)
+ return -EINVAL;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ /* uS to nS */
+ return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
+}
+
+static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
+ unsigned long freq, bool poll)
+{
+ unsigned int level;
+ struct perf_dom_info *dom;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ if (!dom->level_indexing_mode) {
+ level = freq / dom->mult_factor;
+ } else {
+ struct scmi_opp *opp;
+
+ opp = LOOKUP_BY_FREQ(dom->opps_by_freq,
+ freq / dom->mult_factor);
+ if (!opp)
+ return -EIO;
+
+ level = opp->level_index;
+ }
+
+ return __scmi_perf_level_set(ph, dom, level, poll);
+}
+
+static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
+ unsigned long *freq, bool poll)
+{
+ int ret;
+ u32 level;
+ struct perf_dom_info *dom;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ ret = __scmi_perf_level_get(ph, dom, &level, poll);
+ if (ret)
+ return ret;
+
+ if (!dom->level_indexing_mode) {
+ *freq = level * dom->mult_factor;
+ } else {
+ struct scmi_opp *opp;
+
+ opp = xa_load(&dom->opps_by_idx, level);
+ if (!opp)
+ return -EIO;
+
+ *freq = opp->indicative_freq * dom->mult_factor;
+ }
+
+ return ret;
+}
+
+static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
+ u32 domain, unsigned long *freq,
+ unsigned long *power)
+{
+ struct perf_dom_info *dom;
+ unsigned long opp_freq;
+ int idx, ret = -EINVAL;
+ struct scmi_opp *opp;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
+
+ for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
+ if (!dom->level_indexing_mode)
+ opp_freq = opp->perf * dom->mult_factor;
+ else
+ opp_freq = opp->indicative_freq * dom->mult_factor;
+
+ if (opp_freq < *freq)
+ continue;
+
+ *freq = opp_freq;
+ *power = opp->power;
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
+ struct device *dev)
+{
+ int domain;
+ struct perf_dom_info *dom;
+
+ domain = scmi_dev_domain_id(dev);
+ if (domain < 0)
+ return false;
+
+ dom = scmi_perf_domain_lookup(ph, domain);
+ if (IS_ERR(dom))
+ return false;
+
+ return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr;
+}
+
+static enum scmi_power_scale
+scmi_power_scale_get(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_perf_info *pi = ph->get_priv(ph);
+
+ return pi->power_scale;
+}
+
+static const struct scmi_perf_proto_ops perf_proto_ops = {
+ .num_domains_get = scmi_perf_num_domains_get,
+ .info_get = scmi_perf_info_get,
+ .limits_set = scmi_perf_limits_set,
+ .limits_get = scmi_perf_limits_get,
+ .level_set = scmi_perf_level_set,
+ .level_get = scmi_perf_level_get,
+ .device_domain_id = scmi_dev_domain_id,
+ .transition_latency_get = scmi_dvfs_transition_latency_get,
+ .device_opps_add = scmi_dvfs_device_opps_add,
+ .freq_set = scmi_dvfs_freq_set,
+ .freq_get = scmi_dvfs_freq_get,
+ .est_power_get = scmi_dvfs_est_power_get,
+ .fast_switch_possible = scmi_fast_switch_possible,
+ .power_scale_get = scmi_power_scale_get,
+};
+
+static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret, cmd_id;
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd))
+ return -EINVAL;
+
+ cmd_id = evt_2_cmd[evt_id];
+ ret = scmi_perf_level_limits_notify(ph, src_id, cmd_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ void *rep = NULL;
+
+ switch (evt_id) {
+ case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED:
+ {
+ const struct scmi_perf_limits_notify_payld *p = payld;
+ struct scmi_perf_limits_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->range_max = le32_to_cpu(p->range_max);
+ r->range_min = le32_to_cpu(p->range_min);
+ *src_id = r->domain_id;
+ rep = r;
+ break;
+ }
+ case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED:
+ {
+ const struct scmi_perf_level_notify_payld *p = payld;
+ struct scmi_perf_level_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->performance_level = le32_to_cpu(p->performance_level);
+ *src_id = r->domain_id;
+ rep = r;
+ break;
+ }
+ default:
+ break;
+ }
+
+ return rep;
+}
+
+static int scmi_perf_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_perf_info *pi = ph->get_priv(ph);
+
+ if (!pi)
+ return -EINVAL;
+
+ return pi->num_domains;
+}
+
+static const struct scmi_event perf_events[] = {
+ {
+ .id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld),
+ .max_report_sz = sizeof(struct scmi_perf_limits_report),
+ },
+ {
+ .id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_perf_level_notify_payld),
+ .max_report_sz = sizeof(struct scmi_perf_level_report),
+ },
+};
+
+static const struct scmi_event_ops perf_event_ops = {
+ .get_num_sources = scmi_perf_get_num_sources,
+ .set_notify_enabled = scmi_perf_set_notify_enabled,
+ .fill_custom_report = scmi_perf_fill_custom_report,
+};
+
+static const struct scmi_protocol_events perf_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &perf_event_ops,
+ .evts = perf_events,
+ .num_events = ARRAY_SIZE(perf_events),
+};
+
+static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ int domain, ret;
+ u32 version;
+ struct scmi_perf_info *pinfo;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_dbg(ph->dev, "Performance Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ pinfo->version = version;
+
+ ret = scmi_perf_attributes_get(ph, pinfo);
+ if (ret)
+ return ret;
+
+ pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
+ sizeof(*pinfo->dom_info), GFP_KERNEL);
+ if (!pinfo->dom_info)
+ return -ENOMEM;
+
+ for (domain = 0; domain < pinfo->num_domains; domain++) {
+ struct perf_dom_info *dom = pinfo->dom_info + domain;
+
+ dom->id = domain;
+ scmi_perf_domain_attributes_get(ph, dom, version);
+ scmi_perf_describe_levels_get(ph, dom, version);
+
+ if (dom->perf_fastchannels)
+ scmi_perf_domain_init_fc(ph, dom->id, &dom->fc_info);
+ }
+
+ ret = devm_add_action_or_reset(ph->dev, scmi_perf_xa_destroy, pinfo);
+ if (ret)
+ return ret;
+
+ return ph->set_priv(ph, pinfo);
+}
+
+static const struct scmi_protocol scmi_perf = {
+ .id = SCMI_PROTOCOL_PERF,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_perf_protocol_init,
+ .ops = &perf_proto_ops,
+ .events = &perf_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf, scmi_perf)
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
new file mode 100644
index 0000000000..356e836316
--- /dev/null
+++ b/drivers/firmware/arm_scmi/power.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Power Protocol
+ *
+ * Copyright (C) 2018-2022 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications POWER - " fmt
+
+#include <linux/module.h>
+#include <linux/scmi_protocol.h>
+
+#include "protocols.h"
+#include "notify.h"
+
+enum scmi_power_protocol_cmd {
+ POWER_DOMAIN_ATTRIBUTES = 0x3,
+ POWER_STATE_SET = 0x4,
+ POWER_STATE_GET = 0x5,
+ POWER_STATE_NOTIFY = 0x6,
+ POWER_DOMAIN_NAME_GET = 0x8,
+};
+
+struct scmi_msg_resp_power_attributes {
+ __le16 num_domains;
+ __le16 reserved;
+ __le32 stats_addr_low;
+ __le32 stats_addr_high;
+ __le32 stats_size;
+};
+
+struct scmi_msg_resp_power_domain_attributes {
+ __le32 flags;
+#define SUPPORTS_STATE_SET_NOTIFY(x) ((x) & BIT(31))
+#define SUPPORTS_STATE_SET_ASYNC(x) ((x) & BIT(30))
+#define SUPPORTS_STATE_SET_SYNC(x) ((x) & BIT(29))
+#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(27))
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
+};
+
+struct scmi_power_set_state {
+ __le32 flags;
+#define STATE_SET_ASYNC BIT(0)
+ __le32 domain;
+ __le32 state;
+};
+
+struct scmi_power_state_notify {
+ __le32 domain;
+ __le32 notify_enable;
+};
+
+struct scmi_power_state_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 power_state;
+};
+
+struct power_dom_info {
+ bool state_set_sync;
+ bool state_set_async;
+ bool state_set_notify;
+ char name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_power_info {
+ u32 version;
+ int num_domains;
+ u64 stats_addr;
+ u32 stats_size;
+ struct power_dom_info *dom_info;
+};
+
+static int scmi_power_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_power_info *pi)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_power_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ pi->num_domains = le16_to_cpu(attr->num_domains);
+ pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
+ (u64)le32_to_cpu(attr->stats_addr_high) << 32;
+ pi->stats_size = le32_to_cpu(attr->stats_size);
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int
+scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 domain, struct power_dom_info *dom_info,
+ u32 version)
+{
+ int ret;
+ u32 flags;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_power_domain_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, POWER_DOMAIN_ATTRIBUTES,
+ sizeof(domain), sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain, t->tx.buf);
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ flags = le32_to_cpu(attr->flags);
+
+ dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags);
+ dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags);
+ dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags);
+ strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
+ }
+ ph->xops->xfer_put(ph, t);
+
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
+ SUPPORTS_EXTENDED_NAMES(flags)) {
+ ph->hops->extended_name_get(ph, POWER_DOMAIN_NAME_GET,
+ domain, dom_info->name,
+ SCMI_MAX_STR_SIZE);
+ }
+
+ return ret;
+}
+
+static int scmi_power_state_set(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 state)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_power_set_state *st;
+
+ ret = ph->xops->xfer_get_init(ph, POWER_STATE_SET, sizeof(*st), 0, &t);
+ if (ret)
+ return ret;
+
+ st = t->tx.buf;
+ st->flags = cpu_to_le32(0);
+ st->domain = cpu_to_le32(domain);
+ st->state = cpu_to_le32(state);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_power_state_get(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *state)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, POWER_STATE_GET, sizeof(u32), sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain, t->tx.buf);
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *state = get_unaligned_le32(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_power_num_domains_get(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_power_info *pi = ph->get_priv(ph);
+
+ return pi->num_domains;
+}
+
+static const char *
+scmi_power_name_get(const struct scmi_protocol_handle *ph,
+ u32 domain)
+{
+ struct scmi_power_info *pi = ph->get_priv(ph);
+ struct power_dom_info *dom = pi->dom_info + domain;
+
+ return dom->name;
+}
+
+static const struct scmi_power_proto_ops power_proto_ops = {
+ .num_domains_get = scmi_power_num_domains_get,
+ .name_get = scmi_power_name_get,
+ .state_set = scmi_power_state_set,
+ .state_get = scmi_power_state_get,
+};
+
+static int scmi_power_request_notify(const struct scmi_protocol_handle *ph,
+ u32 domain, bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_power_state_notify *notify;
+
+ ret = ph->xops->xfer_get_init(ph, POWER_STATE_NOTIFY,
+ sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_power_set_notify_enabled(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_power_request_notify(ph, src_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLE - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *
+scmi_power_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_power_state_notify_payld *p = payld;
+ struct scmi_power_state_changed_report *r = report;
+
+ if (evt_id != SCMI_EVENT_POWER_STATE_CHANGED || sizeof(*p) != payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->power_state = le32_to_cpu(p->power_state);
+ *src_id = r->domain_id;
+
+ return r;
+}
+
+static int scmi_power_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_power_info *pinfo = ph->get_priv(ph);
+
+ if (!pinfo)
+ return -EINVAL;
+
+ return pinfo->num_domains;
+}
+
+static const struct scmi_event power_events[] = {
+ {
+ .id = SCMI_EVENT_POWER_STATE_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_power_state_notify_payld),
+ .max_report_sz =
+ sizeof(struct scmi_power_state_changed_report),
+ },
+};
+
+static const struct scmi_event_ops power_event_ops = {
+ .get_num_sources = scmi_power_get_num_sources,
+ .set_notify_enabled = scmi_power_set_notify_enabled,
+ .fill_custom_report = scmi_power_fill_custom_report,
+};
+
+static const struct scmi_protocol_events power_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &power_event_ops,
+ .evts = power_events,
+ .num_events = ARRAY_SIZE(power_events),
+};
+
+static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ int domain, ret;
+ u32 version;
+ struct scmi_power_info *pinfo;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_dbg(ph->dev, "Power Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ ret = scmi_power_attributes_get(ph, pinfo);
+ if (ret)
+ return ret;
+
+ pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
+ sizeof(*pinfo->dom_info), GFP_KERNEL);
+ if (!pinfo->dom_info)
+ return -ENOMEM;
+
+ for (domain = 0; domain < pinfo->num_domains; domain++) {
+ struct power_dom_info *dom = pinfo->dom_info + domain;
+
+ scmi_power_domain_attributes_get(ph, domain, dom, version);
+ }
+
+ pinfo->version = version;
+
+ return ph->set_priv(ph, pinfo);
+}
+
+static const struct scmi_protocol scmi_power = {
+ .id = SCMI_PROTOCOL_POWER,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_power_protocol_init,
+ .ops = &power_proto_ops,
+ .events = &power_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(power, scmi_power)
diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c
new file mode 100644
index 0000000000..244929cb4f
--- /dev/null
+++ b/drivers/firmware/arm_scmi/powercap.c
@@ -0,0 +1,989 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Powercap Protocol
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications POWERCAP - " fmt
+
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/scmi_protocol.h>
+
+#include <trace/events/scmi.h>
+
+#include "protocols.h"
+#include "notify.h"
+
+enum scmi_powercap_protocol_cmd {
+ POWERCAP_DOMAIN_ATTRIBUTES = 0x3,
+ POWERCAP_CAP_GET = 0x4,
+ POWERCAP_CAP_SET = 0x5,
+ POWERCAP_PAI_GET = 0x6,
+ POWERCAP_PAI_SET = 0x7,
+ POWERCAP_DOMAIN_NAME_GET = 0x8,
+ POWERCAP_MEASUREMENTS_GET = 0x9,
+ POWERCAP_CAP_NOTIFY = 0xa,
+ POWERCAP_MEASUREMENTS_NOTIFY = 0xb,
+ POWERCAP_DESCRIBE_FASTCHANNEL = 0xc,
+};
+
+enum {
+ POWERCAP_FC_CAP,
+ POWERCAP_FC_PAI,
+ POWERCAP_FC_MAX,
+};
+
+struct scmi_msg_resp_powercap_domain_attributes {
+ __le32 attributes;
+#define SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(x) ((x) & BIT(31))
+#define SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(x) ((x) & BIT(30))
+#define SUPPORTS_ASYNC_POWERCAP_CAP_SET(x) ((x) & BIT(29))
+#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(28))
+#define SUPPORTS_POWERCAP_CAP_CONFIGURATION(x) ((x) & BIT(27))
+#define SUPPORTS_POWERCAP_MONITORING(x) ((x) & BIT(26))
+#define SUPPORTS_POWERCAP_PAI_CONFIGURATION(x) ((x) & BIT(25))
+#define SUPPORTS_POWERCAP_FASTCHANNELS(x) ((x) & BIT(22))
+#define POWERCAP_POWER_UNIT(x) \
+ (FIELD_GET(GENMASK(24, 23), (x)))
+#define SUPPORTS_POWER_UNITS_MW(x) \
+ (POWERCAP_POWER_UNIT(x) == 0x2)
+#define SUPPORTS_POWER_UNITS_UW(x) \
+ (POWERCAP_POWER_UNIT(x) == 0x1)
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
+ __le32 min_pai;
+ __le32 max_pai;
+ __le32 pai_step;
+ __le32 min_power_cap;
+ __le32 max_power_cap;
+ __le32 power_cap_step;
+ __le32 sustainable_power;
+ __le32 accuracy;
+ __le32 parent_id;
+};
+
+struct scmi_msg_powercap_set_cap_or_pai {
+ __le32 domain;
+ __le32 flags;
+#define CAP_SET_ASYNC BIT(1)
+#define CAP_SET_IGNORE_DRESP BIT(0)
+ __le32 value;
+};
+
+struct scmi_msg_resp_powercap_cap_set_complete {
+ __le32 domain;
+ __le32 power_cap;
+};
+
+struct scmi_msg_resp_powercap_meas_get {
+ __le32 power;
+ __le32 pai;
+};
+
+struct scmi_msg_powercap_notify_cap {
+ __le32 domain;
+ __le32 notify_enable;
+};
+
+struct scmi_msg_powercap_notify_thresh {
+ __le32 domain;
+ __le32 notify_enable;
+ __le32 power_thresh_low;
+ __le32 power_thresh_high;
+};
+
+struct scmi_powercap_cap_changed_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 power_cap;
+ __le32 pai;
+};
+
+struct scmi_powercap_meas_changed_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 power;
+};
+
+struct scmi_powercap_state {
+ bool enabled;
+ u32 last_pcap;
+ bool meas_notif_enabled;
+ u64 thresholds;
+#define THRESH_LOW(p, id) \
+ (lower_32_bits((p)->states[(id)].thresholds))
+#define THRESH_HIGH(p, id) \
+ (upper_32_bits((p)->states[(id)].thresholds))
+};
+
+struct powercap_info {
+ u32 version;
+ int num_domains;
+ struct scmi_powercap_state *states;
+ struct scmi_powercap_info *powercaps;
+};
+
+static enum scmi_powercap_protocol_cmd evt_2_cmd[] = {
+ POWERCAP_CAP_NOTIFY,
+ POWERCAP_MEASUREMENTS_NOTIFY,
+};
+
+static int scmi_powercap_notify(const struct scmi_protocol_handle *ph,
+ u32 domain, int message_id, bool enable);
+
+static int
+scmi_powercap_attributes_get(const struct scmi_protocol_handle *ph,
+ struct powercap_info *pi)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ u32 attributes;
+
+ attributes = get_unaligned_le32(t->rx.buf);
+ pi->num_domains = FIELD_GET(GENMASK(15, 0), attributes);
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static inline int
+scmi_powercap_validate(unsigned int min_val, unsigned int max_val,
+ unsigned int step_val, bool configurable)
+{
+ if (!min_val || !max_val)
+ return -EPROTO;
+
+ if ((configurable && min_val == max_val) ||
+ (!configurable && min_val != max_val))
+ return -EPROTO;
+
+ if (min_val != max_val && !step_val)
+ return -EPROTO;
+
+ return 0;
+}
+
+static int
+scmi_powercap_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ struct powercap_info *pinfo, u32 domain)
+{
+ int ret;
+ u32 flags;
+ struct scmi_xfer *t;
+ struct scmi_powercap_info *dom_info = pinfo->powercaps + domain;
+ struct scmi_msg_resp_powercap_domain_attributes *resp;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_DOMAIN_ATTRIBUTES,
+ sizeof(domain), sizeof(*resp), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain, t->tx.buf);
+ resp = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ flags = le32_to_cpu(resp->attributes);
+
+ dom_info->id = domain;
+ dom_info->notify_powercap_cap_change =
+ SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(flags);
+ dom_info->notify_powercap_measurement_change =
+ SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(flags);
+ dom_info->async_powercap_cap_set =
+ SUPPORTS_ASYNC_POWERCAP_CAP_SET(flags);
+ dom_info->powercap_cap_config =
+ SUPPORTS_POWERCAP_CAP_CONFIGURATION(flags);
+ dom_info->powercap_monitoring =
+ SUPPORTS_POWERCAP_MONITORING(flags);
+ dom_info->powercap_pai_config =
+ SUPPORTS_POWERCAP_PAI_CONFIGURATION(flags);
+ dom_info->powercap_scale_mw =
+ SUPPORTS_POWER_UNITS_MW(flags);
+ dom_info->powercap_scale_uw =
+ SUPPORTS_POWER_UNITS_UW(flags);
+ dom_info->fastchannels =
+ SUPPORTS_POWERCAP_FASTCHANNELS(flags);
+
+ strscpy(dom_info->name, resp->name, SCMI_SHORT_NAME_MAX_SIZE);
+
+ dom_info->min_pai = le32_to_cpu(resp->min_pai);
+ dom_info->max_pai = le32_to_cpu(resp->max_pai);
+ dom_info->pai_step = le32_to_cpu(resp->pai_step);
+ ret = scmi_powercap_validate(dom_info->min_pai,
+ dom_info->max_pai,
+ dom_info->pai_step,
+ dom_info->powercap_pai_config);
+ if (ret) {
+ dev_err(ph->dev,
+ "Platform reported inconsistent PAI config for domain %d - %s\n",
+ dom_info->id, dom_info->name);
+ goto clean;
+ }
+
+ dom_info->min_power_cap = le32_to_cpu(resp->min_power_cap);
+ dom_info->max_power_cap = le32_to_cpu(resp->max_power_cap);
+ dom_info->power_cap_step = le32_to_cpu(resp->power_cap_step);
+ ret = scmi_powercap_validate(dom_info->min_power_cap,
+ dom_info->max_power_cap,
+ dom_info->power_cap_step,
+ dom_info->powercap_cap_config);
+ if (ret) {
+ dev_err(ph->dev,
+ "Platform reported inconsistent CAP config for domain %d - %s\n",
+ dom_info->id, dom_info->name);
+ goto clean;
+ }
+
+ dom_info->sustainable_power =
+ le32_to_cpu(resp->sustainable_power);
+ dom_info->accuracy = le32_to_cpu(resp->accuracy);
+
+ dom_info->parent_id = le32_to_cpu(resp->parent_id);
+ if (dom_info->parent_id != SCMI_POWERCAP_ROOT_ZONE_ID &&
+ (dom_info->parent_id >= pinfo->num_domains ||
+ dom_info->parent_id == dom_info->id)) {
+ dev_err(ph->dev,
+ "Platform reported inconsistent parent ID for domain %d - %s\n",
+ dom_info->id, dom_info->name);
+ ret = -ENODEV;
+ }
+ }
+
+clean:
+ ph->xops->xfer_put(ph, t);
+
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (!ret && SUPPORTS_EXTENDED_NAMES(flags))
+ ph->hops->extended_name_get(ph, POWERCAP_DOMAIN_NAME_GET,
+ domain, dom_info->name,
+ SCMI_MAX_STR_SIZE);
+
+ return ret;
+}
+
+static int scmi_powercap_num_domains_get(const struct scmi_protocol_handle *ph)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ return pi->num_domains;
+}
+
+static const struct scmi_powercap_info *
+scmi_powercap_dom_info_get(const struct scmi_protocol_handle *ph, u32 domain_id)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (domain_id >= pi->num_domains)
+ return NULL;
+
+ return pi->powercaps + domain_id;
+}
+
+static int scmi_powercap_xfer_cap_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_cap)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_GET, sizeof(u32),
+ sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain_id, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *power_cap = get_unaligned_le32(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int __scmi_powercap_cap_get(const struct scmi_protocol_handle *ph,
+ const struct scmi_powercap_info *dom,
+ u32 *power_cap)
+{
+ if (dom->fc_info && dom->fc_info[POWERCAP_FC_CAP].get_addr) {
+ *power_cap = ioread32(dom->fc_info[POWERCAP_FC_CAP].get_addr);
+ trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_GET,
+ dom->id, *power_cap, 0);
+ return 0;
+ }
+
+ return scmi_powercap_xfer_cap_get(ph, dom->id, power_cap);
+}
+
+static int scmi_powercap_cap_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_cap)
+{
+ const struct scmi_powercap_info *dom;
+
+ if (!power_cap)
+ return -EINVAL;
+
+ dom = scmi_powercap_dom_info_get(ph, domain_id);
+ if (!dom)
+ return -EINVAL;
+
+ return __scmi_powercap_cap_get(ph, dom, power_cap);
+}
+
+static int scmi_powercap_xfer_cap_set(const struct scmi_protocol_handle *ph,
+ const struct scmi_powercap_info *pc,
+ u32 power_cap, bool ignore_dresp)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_powercap_set_cap_or_pai *msg;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_SET,
+ sizeof(*msg), 0, &t);
+ if (ret)
+ return ret;
+
+ msg = t->tx.buf;
+ msg->domain = cpu_to_le32(pc->id);
+ msg->flags =
+ cpu_to_le32(FIELD_PREP(CAP_SET_ASYNC, !!pc->async_powercap_cap_set) |
+ FIELD_PREP(CAP_SET_IGNORE_DRESP, !!ignore_dresp));
+ msg->value = cpu_to_le32(power_cap);
+
+ if (!pc->async_powercap_cap_set || ignore_dresp) {
+ ret = ph->xops->do_xfer(ph, t);
+ } else {
+ ret = ph->xops->do_xfer_with_response(ph, t);
+ if (!ret) {
+ struct scmi_msg_resp_powercap_cap_set_complete *resp;
+
+ resp = t->rx.buf;
+ if (le32_to_cpu(resp->domain) == pc->id)
+ dev_dbg(ph->dev,
+ "Powercap ID %d CAP set async to %u\n",
+ pc->id,
+ get_unaligned_le32(&resp->power_cap));
+ else
+ ret = -EPROTO;
+ }
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int __scmi_powercap_cap_set(const struct scmi_protocol_handle *ph,
+ struct powercap_info *pi, u32 domain_id,
+ u32 power_cap, bool ignore_dresp)
+{
+ int ret = -EINVAL;
+ const struct scmi_powercap_info *pc;
+
+ pc = scmi_powercap_dom_info_get(ph, domain_id);
+ if (!pc || !pc->powercap_cap_config)
+ return ret;
+
+ if (power_cap &&
+ (power_cap < pc->min_power_cap || power_cap > pc->max_power_cap))
+ return ret;
+
+ if (pc->fc_info && pc->fc_info[POWERCAP_FC_CAP].set_addr) {
+ struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_CAP];
+
+ iowrite32(power_cap, fci->set_addr);
+ ph->hops->fastchannel_db_ring(fci->set_db);
+ trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_SET,
+ domain_id, power_cap, 0);
+ ret = 0;
+ } else {
+ ret = scmi_powercap_xfer_cap_set(ph, pc, power_cap,
+ ignore_dresp);
+ }
+
+ /* Save the last explicitly set non-zero powercap value */
+ if (PROTOCOL_REV_MAJOR(pi->version) >= 0x2 && !ret && power_cap)
+ pi->states[domain_id].last_pcap = power_cap;
+
+ return ret;
+}
+
+static int scmi_powercap_cap_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 power_cap,
+ bool ignore_dresp)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ /*
+ * Disallow zero as a possible explicitly requested powercap:
+ * there are enable/disable operations for this.
+ */
+ if (!power_cap)
+ return -EINVAL;
+
+ /* Just log the last set request if acting on a disabled domain */
+ if (PROTOCOL_REV_MAJOR(pi->version) >= 0x2 &&
+ !pi->states[domain_id].enabled) {
+ pi->states[domain_id].last_pcap = power_cap;
+ return 0;
+ }
+
+ return __scmi_powercap_cap_set(ph, pi, domain_id,
+ power_cap, ignore_dresp);
+}
+
+static int scmi_powercap_xfer_pai_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *pai)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_GET, sizeof(u32),
+ sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain_id, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *pai = get_unaligned_le32(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_powercap_pai_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *pai)
+{
+ struct scmi_powercap_info *dom;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (!pai || domain_id >= pi->num_domains)
+ return -EINVAL;
+
+ dom = pi->powercaps + domain_id;
+ if (dom->fc_info && dom->fc_info[POWERCAP_FC_PAI].get_addr) {
+ *pai = ioread32(dom->fc_info[POWERCAP_FC_PAI].get_addr);
+ trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_GET,
+ domain_id, *pai, 0);
+ return 0;
+ }
+
+ return scmi_powercap_xfer_pai_get(ph, domain_id, pai);
+}
+
+static int scmi_powercap_xfer_pai_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 pai)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_powercap_set_cap_or_pai *msg;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_SET,
+ sizeof(*msg), 0, &t);
+ if (ret)
+ return ret;
+
+ msg = t->tx.buf;
+ msg->domain = cpu_to_le32(domain_id);
+ msg->flags = cpu_to_le32(0);
+ msg->value = cpu_to_le32(pai);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_powercap_pai_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 pai)
+{
+ const struct scmi_powercap_info *pc;
+
+ pc = scmi_powercap_dom_info_get(ph, domain_id);
+ if (!pc || !pc->powercap_pai_config || !pai ||
+ pai < pc->min_pai || pai > pc->max_pai)
+ return -EINVAL;
+
+ if (pc->fc_info && pc->fc_info[POWERCAP_FC_PAI].set_addr) {
+ struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_PAI];
+
+ trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_SET,
+ domain_id, pai, 0);
+ iowrite32(pai, fci->set_addr);
+ ph->hops->fastchannel_db_ring(fci->set_db);
+ return 0;
+ }
+
+ return scmi_powercap_xfer_pai_set(ph, domain_id, pai);
+}
+
+static int scmi_powercap_measurements_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *average_power,
+ u32 *pai)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_powercap_meas_get *resp;
+ const struct scmi_powercap_info *pc;
+
+ pc = scmi_powercap_dom_info_get(ph, domain_id);
+ if (!pc || !pc->powercap_monitoring || !pai || !average_power)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, POWERCAP_MEASUREMENTS_GET,
+ sizeof(u32), sizeof(*resp), &t);
+ if (ret)
+ return ret;
+
+ resp = t->rx.buf;
+ put_unaligned_le32(domain_id, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ *average_power = le32_to_cpu(resp->power);
+ *pai = le32_to_cpu(resp->pai);
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int
+scmi_powercap_measurements_threshold_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_thresh_low,
+ u32 *power_thresh_high)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (!power_thresh_low || !power_thresh_high ||
+ domain_id >= pi->num_domains)
+ return -EINVAL;
+
+ *power_thresh_low = THRESH_LOW(pi, domain_id);
+ *power_thresh_high = THRESH_HIGH(pi, domain_id);
+
+ return 0;
+}
+
+static int
+scmi_powercap_measurements_threshold_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 power_thresh_low,
+ u32 power_thresh_high)
+{
+ int ret = 0;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (domain_id >= pi->num_domains ||
+ power_thresh_low > power_thresh_high)
+ return -EINVAL;
+
+ /* Anything to do ? */
+ if (THRESH_LOW(pi, domain_id) == power_thresh_low &&
+ THRESH_HIGH(pi, domain_id) == power_thresh_high)
+ return ret;
+
+ pi->states[domain_id].thresholds =
+ (FIELD_PREP(GENMASK_ULL(31, 0), power_thresh_low) |
+ FIELD_PREP(GENMASK_ULL(63, 32), power_thresh_high));
+
+ /* Update thresholds if notification already enabled */
+ if (pi->states[domain_id].meas_notif_enabled)
+ ret = scmi_powercap_notify(ph, domain_id,
+ POWERCAP_MEASUREMENTS_NOTIFY,
+ true);
+
+ return ret;
+}
+
+static int scmi_powercap_cap_enable_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, bool enable)
+{
+ int ret;
+ u32 power_cap;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (PROTOCOL_REV_MAJOR(pi->version) < 0x2)
+ return -EINVAL;
+
+ if (enable == pi->states[domain_id].enabled)
+ return 0;
+
+ if (enable) {
+ /* Cannot enable with a zero powercap. */
+ if (!pi->states[domain_id].last_pcap)
+ return -EINVAL;
+
+ ret = __scmi_powercap_cap_set(ph, pi, domain_id,
+ pi->states[domain_id].last_pcap,
+ true);
+ } else {
+ ret = __scmi_powercap_cap_set(ph, pi, domain_id, 0, true);
+ }
+
+ if (ret)
+ return ret;
+
+ /*
+ * Update our internal state to reflect final platform state: the SCMI
+ * server could have ignored a disable request and kept enforcing some
+ * powercap limit requested by other agents.
+ */
+ ret = scmi_powercap_cap_get(ph, domain_id, &power_cap);
+ if (!ret)
+ pi->states[domain_id].enabled = !!power_cap;
+
+ return ret;
+}
+
+static int scmi_powercap_cap_enable_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, bool *enable)
+{
+ int ret;
+ u32 power_cap;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ *enable = true;
+ if (PROTOCOL_REV_MAJOR(pi->version) < 0x2)
+ return 0;
+
+ /*
+ * Report always real platform state; platform could have ignored
+ * a previous disable request. Default true on any error.
+ */
+ ret = scmi_powercap_cap_get(ph, domain_id, &power_cap);
+ if (!ret)
+ *enable = !!power_cap;
+
+ /* Update internal state with current real platform state */
+ pi->states[domain_id].enabled = *enable;
+
+ return 0;
+}
+
+static const struct scmi_powercap_proto_ops powercap_proto_ops = {
+ .num_domains_get = scmi_powercap_num_domains_get,
+ .info_get = scmi_powercap_dom_info_get,
+ .cap_get = scmi_powercap_cap_get,
+ .cap_set = scmi_powercap_cap_set,
+ .cap_enable_set = scmi_powercap_cap_enable_set,
+ .cap_enable_get = scmi_powercap_cap_enable_get,
+ .pai_get = scmi_powercap_pai_get,
+ .pai_set = scmi_powercap_pai_set,
+ .measurements_get = scmi_powercap_measurements_get,
+ .measurements_threshold_set = scmi_powercap_measurements_threshold_set,
+ .measurements_threshold_get = scmi_powercap_measurements_threshold_get,
+};
+
+static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph,
+ u32 domain, struct scmi_fc_info **p_fc)
+{
+ struct scmi_fc_info *fc;
+
+ fc = devm_kcalloc(ph->dev, POWERCAP_FC_MAX, sizeof(*fc), GFP_KERNEL);
+ if (!fc)
+ return;
+
+ ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
+ POWERCAP_CAP_SET, 4, domain,
+ &fc[POWERCAP_FC_CAP].set_addr,
+ &fc[POWERCAP_FC_CAP].set_db);
+
+ ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
+ POWERCAP_CAP_GET, 4, domain,
+ &fc[POWERCAP_FC_CAP].get_addr, NULL);
+
+ ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
+ POWERCAP_PAI_SET, 4, domain,
+ &fc[POWERCAP_FC_PAI].set_addr,
+ &fc[POWERCAP_FC_PAI].set_db);
+
+ ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
+ POWERCAP_PAI_GET, 4, domain,
+ &fc[POWERCAP_FC_PAI].get_addr, NULL);
+
+ *p_fc = fc;
+}
+
+static int scmi_powercap_notify(const struct scmi_protocol_handle *ph,
+ u32 domain, int message_id, bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ switch (message_id) {
+ case POWERCAP_CAP_NOTIFY:
+ {
+ struct scmi_msg_powercap_notify_cap *notify;
+
+ ret = ph->xops->xfer_get_init(ph, message_id,
+ sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0);
+ break;
+ }
+ case POWERCAP_MEASUREMENTS_NOTIFY:
+ {
+ u32 low, high;
+ struct scmi_msg_powercap_notify_thresh *notify;
+
+ /*
+ * Note that we have to pick the most recently configured
+ * thresholds to build a proper POWERCAP_MEASUREMENTS_NOTIFY
+ * enable request and we fail, complaining, if no thresholds
+ * were ever set, since this is an indication the API has been
+ * used wrongly.
+ */
+ ret = scmi_powercap_measurements_threshold_get(ph, domain,
+ &low, &high);
+ if (ret)
+ return ret;
+
+ if (enable && !low && !high) {
+ dev_err(ph->dev,
+ "Invalid Measurements Notify thresholds: %u/%u\n",
+ low, high);
+ return -EINVAL;
+ }
+
+ ret = ph->xops->xfer_get_init(ph, message_id,
+ sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0);
+ notify->power_thresh_low = cpu_to_le32(low);
+ notify->power_thresh_high = cpu_to_le32(high);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int
+scmi_powercap_set_notify_enabled(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret, cmd_id;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd) || src_id >= pi->num_domains)
+ return -EINVAL;
+
+ cmd_id = evt_2_cmd[evt_id];
+ ret = scmi_powercap_notify(ph, src_id, cmd_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+ else if (cmd_id == POWERCAP_MEASUREMENTS_NOTIFY)
+ /*
+ * On success save the current notification enabled state, so
+ * as to be able to properly update the notification thresholds
+ * when they are modified on a domain for which measurement
+ * notifications were currently enabled.
+ *
+ * This is needed because the SCMI Notification core machinery
+ * and API does not support passing per-notification custom
+ * arguments at callback registration time.
+ *
+ * Note that this can be done here with a simple flag since the
+ * SCMI core Notifications code takes care of keeping proper
+ * per-domain enables refcounting, so that this helper function
+ * will be called only once (for enables) when the first user
+ * registers a callback on this domain and once more (disable)
+ * when the last user de-registers its callback.
+ */
+ pi->states[src_id].meas_notif_enabled = enable;
+
+ return ret;
+}
+
+static void *
+scmi_powercap_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ void *rep = NULL;
+
+ switch (evt_id) {
+ case SCMI_EVENT_POWERCAP_CAP_CHANGED:
+ {
+ const struct scmi_powercap_cap_changed_notify_payld *p = payld;
+ struct scmi_powercap_cap_changed_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->power_cap = le32_to_cpu(p->power_cap);
+ r->pai = le32_to_cpu(p->pai);
+ *src_id = r->domain_id;
+ rep = r;
+ break;
+ }
+ case SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED:
+ {
+ const struct scmi_powercap_meas_changed_notify_payld *p = payld;
+ struct scmi_powercap_meas_changed_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->power = le32_to_cpu(p->power);
+ *src_id = r->domain_id;
+ rep = r;
+ break;
+ }
+ default:
+ break;
+ }
+
+ return rep;
+}
+
+static int
+scmi_powercap_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (!pi)
+ return -EINVAL;
+
+ return pi->num_domains;
+}
+
+static const struct scmi_event powercap_events[] = {
+ {
+ .id = SCMI_EVENT_POWERCAP_CAP_CHANGED,
+ .max_payld_sz =
+ sizeof(struct scmi_powercap_cap_changed_notify_payld),
+ .max_report_sz =
+ sizeof(struct scmi_powercap_cap_changed_report),
+ },
+ {
+ .id = SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED,
+ .max_payld_sz =
+ sizeof(struct scmi_powercap_meas_changed_notify_payld),
+ .max_report_sz =
+ sizeof(struct scmi_powercap_meas_changed_report),
+ },
+};
+
+static const struct scmi_event_ops powercap_event_ops = {
+ .get_num_sources = scmi_powercap_get_num_sources,
+ .set_notify_enabled = scmi_powercap_set_notify_enabled,
+ .fill_custom_report = scmi_powercap_fill_custom_report,
+};
+
+static const struct scmi_protocol_events powercap_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &powercap_event_ops,
+ .evts = powercap_events,
+ .num_events = ARRAY_SIZE(powercap_events),
+};
+
+static int
+scmi_powercap_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ int domain, ret;
+ u32 version;
+ struct powercap_info *pinfo;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_dbg(ph->dev, "Powercap Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ ret = scmi_powercap_attributes_get(ph, pinfo);
+ if (ret)
+ return ret;
+
+ pinfo->powercaps = devm_kcalloc(ph->dev, pinfo->num_domains,
+ sizeof(*pinfo->powercaps),
+ GFP_KERNEL);
+ if (!pinfo->powercaps)
+ return -ENOMEM;
+
+ pinfo->states = devm_kcalloc(ph->dev, pinfo->num_domains,
+ sizeof(*pinfo->states), GFP_KERNEL);
+ if (!pinfo->states)
+ return -ENOMEM;
+
+ /*
+ * Note that any failure in retrieving any domain attribute leads to
+ * the whole Powercap protocol initialization failure: this way the
+ * reported Powercap domains are all assured, when accessed, to be well
+ * formed and correlated by sane parent-child relationship (if any).
+ */
+ for (domain = 0; domain < pinfo->num_domains; domain++) {
+ ret = scmi_powercap_domain_attributes_get(ph, pinfo, domain);
+ if (ret)
+ return ret;
+
+ if (pinfo->powercaps[domain].fastchannels)
+ scmi_powercap_domain_init_fc(ph, domain,
+ &pinfo->powercaps[domain].fc_info);
+
+ /* Grab initial state when disable is supported. */
+ if (PROTOCOL_REV_MAJOR(version) >= 0x2) {
+ ret = __scmi_powercap_cap_get(ph,
+ &pinfo->powercaps[domain],
+ &pinfo->states[domain].last_pcap);
+ if (ret)
+ return ret;
+
+ pinfo->states[domain].enabled =
+ !!pinfo->states[domain].last_pcap;
+ }
+ }
+
+ pinfo->version = version;
+ return ph->set_priv(ph, pinfo);
+}
+
+static const struct scmi_protocol scmi_powercap = {
+ .id = SCMI_PROTOCOL_POWERCAP,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_powercap_protocol_init,
+ .ops = &powercap_proto_ops,
+ .events = &powercap_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(powercap, scmi_powercap)
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
new file mode 100644
index 0000000000..78e1a01eb6
--- /dev/null
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * protocols common header file containing some definitions, structures
+ * and function prototypes used in all the different SCMI protocols.
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+#ifndef _SCMI_PROTOCOLS_H
+#define _SCMI_PROTOCOLS_H
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/hashtable.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/refcount.h>
+#include <linux/scmi_protocol.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
+
+#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0)
+#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16)
+#define PROTOCOL_REV_MAJOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x))))
+#define PROTOCOL_REV_MINOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x))))
+
+enum scmi_common_cmd {
+ PROTOCOL_VERSION = 0x0,
+ PROTOCOL_ATTRIBUTES = 0x1,
+ PROTOCOL_MESSAGE_ATTRIBUTES = 0x2,
+};
+
+/**
+ * struct scmi_msg_resp_prot_version - Response for a message
+ *
+ * @minor_version: Minor version of the ABI that firmware supports
+ * @major_version: Major version of the ABI that firmware supports
+ *
+ * In general, ABI version changes follow the rule that minor version increments
+ * are backward compatible. Major revision changes in ABI may not be
+ * backward compatible.
+ *
+ * Response to a generic message with message type SCMI_MSG_VERSION
+ */
+struct scmi_msg_resp_prot_version {
+ __le16 minor_version;
+ __le16 major_version;
+};
+
+/**
+ * struct scmi_msg - Message(Tx/Rx) structure
+ *
+ * @buf: Buffer pointer
+ * @len: Length of data in the Buffer
+ */
+struct scmi_msg {
+ void *buf;
+ size_t len;
+};
+
+/**
+ * struct scmi_msg_hdr - Message(Tx/Rx) header
+ *
+ * @id: The identifier of the message being sent
+ * @protocol_id: The identifier of the protocol used to send @id message
+ * @type: The SCMI type for this message
+ * @seq: The token to identify the message. When a message returns, the
+ * platform returns the whole message header unmodified including the
+ * token
+ * @status: Status of the transfer once it's complete
+ * @poll_completion: Indicate if the transfer needs to be polled for
+ * completion or interrupt mode is used
+ */
+struct scmi_msg_hdr {
+ u8 id;
+ u8 protocol_id;
+ u8 type;
+ u16 seq;
+ u32 status;
+ bool poll_completion;
+};
+
+/**
+ * struct scmi_xfer - Structure representing a message flow
+ *
+ * @transfer_id: Unique ID for debug & profiling purpose
+ * @hdr: Transmit message header
+ * @tx: Transmit message
+ * @rx: Receive message, the buffer should be pre-allocated to store
+ * message. If request-ACK protocol is used, we can reuse the same
+ * buffer for the rx path as we use for the tx path.
+ * @done: command message transmit completion event
+ * @async_done: pointer to delayed response message received event completion
+ * @pending: True for xfers added to @pending_xfers hashtable
+ * @node: An hlist_node reference used to store this xfer, alternatively, on
+ * the free list @free_xfers or in the @pending_xfers hashtable
+ * @users: A refcount to track the active users for this xfer.
+ * This is meant to protect against the possibility that, when a command
+ * transaction times out concurrently with the reception of a valid
+ * response message, the xfer could be finally put on the TX path, and
+ * so vanish, while on the RX path scmi_rx_callback() is still
+ * processing it: in such a case this refcounting will ensure that, even
+ * though the timed-out transaction will anyway cause the command
+ * request to be reported as failed by time-out, the underlying xfer
+ * cannot be discarded and possibly reused until the last one user on
+ * the RX path has released it.
+ * @busy: An atomic flag to ensure exclusive write access to this xfer
+ * @state: The current state of this transfer, with states transitions deemed
+ * valid being:
+ * - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ]
+ * - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK
+ * (Missing synchronous response is assumed OK and ignored)
+ * @flags: Optional flags associated to this xfer.
+ * @lock: A spinlock to protect state and busy fields.
+ * @priv: A pointer for transport private usage.
+ */
+struct scmi_xfer {
+ int transfer_id;
+ struct scmi_msg_hdr hdr;
+ struct scmi_msg tx;
+ struct scmi_msg rx;
+ struct completion done;
+ struct completion *async_done;
+ bool pending;
+ struct hlist_node node;
+ refcount_t users;
+#define SCMI_XFER_FREE 0
+#define SCMI_XFER_BUSY 1
+ atomic_t busy;
+#define SCMI_XFER_SENT_OK 0
+#define SCMI_XFER_RESP_OK 1
+#define SCMI_XFER_DRESP_OK 2
+ int state;
+#define SCMI_XFER_FLAG_IS_RAW BIT(0)
+#define SCMI_XFER_IS_RAW(x) ((x)->flags & SCMI_XFER_FLAG_IS_RAW)
+#define SCMI_XFER_FLAG_CHAN_SET BIT(1)
+#define SCMI_XFER_IS_CHAN_SET(x) \
+ ((x)->flags & SCMI_XFER_FLAG_CHAN_SET)
+ int flags;
+ /* A lock to protect state and busy fields */
+ spinlock_t lock;
+ void *priv;
+};
+
+struct scmi_xfer_ops;
+struct scmi_proto_helpers_ops;
+
+/**
+ * struct scmi_protocol_handle - Reference to an initialized protocol instance
+ *
+ * @dev: A reference to the associated SCMI instance device (handle->dev).
+ * @xops: A reference to a struct holding refs to the core xfer operations that
+ * can be used by the protocol implementation to generate SCMI messages.
+ * @set_priv: A method to set protocol private data for this instance.
+ * @get_priv: A method to get protocol private data previously set.
+ *
+ * This structure represents a protocol initialized against specific SCMI
+ * instance and it will be used as follows:
+ * - as a parameter fed from the core to the protocol initialization code so
+ * that it can access the core xfer operations to build and generate SCMI
+ * messages exclusively for the specific underlying protocol instance.
+ * - as an opaque handle fed by an SCMI driver user when it tries to access
+ * this protocol through its own protocol operations.
+ * In this case this handle will be returned as an opaque object together
+ * with the related protocol operations when the SCMI driver tries to access
+ * the protocol.
+ */
+struct scmi_protocol_handle {
+ struct device *dev;
+ const struct scmi_xfer_ops *xops;
+ const struct scmi_proto_helpers_ops *hops;
+ int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv);
+ void *(*get_priv)(const struct scmi_protocol_handle *ph);
+};
+
+/**
+ * struct scmi_iterator_state - Iterator current state descriptor
+ * @desc_index: Starting index for the current mulit-part request.
+ * @num_returned: Number of returned items in the last multi-part reply.
+ * @num_remaining: Number of remaining items in the multi-part message.
+ * @max_resources: Maximum acceptable number of items, configured by the caller
+ * depending on the underlying resources that it is querying.
+ * @loop_idx: The iterator loop index in the current multi-part reply.
+ * @rx_len: Size in bytes of the currenly processed message; it can be used by
+ * the user of the iterator to verify a reply size.
+ * @priv: Optional pointer to some additional state-related private data setup
+ * by the caller during the iterations.
+ */
+struct scmi_iterator_state {
+ unsigned int desc_index;
+ unsigned int num_returned;
+ unsigned int num_remaining;
+ unsigned int max_resources;
+ unsigned int loop_idx;
+ size_t rx_len;
+ void *priv;
+};
+
+/**
+ * struct scmi_iterator_ops - Custom iterator operations
+ * @prepare_message: An operation to provide the custom logic to fill in the
+ * SCMI command request pointed by @message. @desc_index is
+ * a reference to the next index to use in the multi-part
+ * request.
+ * @update_state: An operation to provide the custom logic to update the
+ * iterator state from the actual message response.
+ * @process_response: An operation to provide the custom logic needed to process
+ * each chunk of the multi-part message.
+ */
+struct scmi_iterator_ops {
+ void (*prepare_message)(void *message, unsigned int desc_index,
+ const void *priv);
+ int (*update_state)(struct scmi_iterator_state *st,
+ const void *response, void *priv);
+ int (*process_response)(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv);
+};
+
+struct scmi_fc_db_info {
+ int width;
+ u64 set;
+ u64 mask;
+ void __iomem *addr;
+};
+
+struct scmi_fc_info {
+ void __iomem *set_addr;
+ void __iomem *get_addr;
+ struct scmi_fc_db_info *set_db;
+};
+
+/**
+ * struct scmi_proto_helpers_ops - References to common protocol helpers
+ * @extended_name_get: A common helper function to retrieve extended naming
+ * for the specified resource using the specified command.
+ * Result is returned as a NULL terminated string in the
+ * pre-allocated area pointed to by @name with maximum
+ * capacity of @len bytes.
+ * @iter_response_init: A common helper to initialize a generic iterator to
+ * parse multi-message responses: when run the iterator
+ * will take care to send the initial command request as
+ * specified by @msg_id and @tx_size and then to parse the
+ * multi-part responses using the custom operations
+ * provided in @ops.
+ * @iter_response_run: A common helper to trigger the run of a previously
+ * initialized iterator.
+ * @fastchannel_init: A common helper used to initialize FC descriptors by
+ * gathering FC descriptions from the SCMI platform server.
+ * @fastchannel_db_ring: A common helper to ring a FC doorbell.
+ */
+struct scmi_proto_helpers_ops {
+ int (*extended_name_get)(const struct scmi_protocol_handle *ph,
+ u8 cmd_id, u32 res_id, char *name, size_t len);
+ void *(*iter_response_init)(const struct scmi_protocol_handle *ph,
+ struct scmi_iterator_ops *ops,
+ unsigned int max_resources, u8 msg_id,
+ size_t tx_size, void *priv);
+ int (*iter_response_run)(void *iter);
+ void (*fastchannel_init)(const struct scmi_protocol_handle *ph,
+ u8 describe_id, u32 message_id,
+ u32 valid_size, u32 domain,
+ void __iomem **p_addr,
+ struct scmi_fc_db_info **p_db);
+ void (*fastchannel_db_ring)(struct scmi_fc_db_info *db);
+};
+
+/**
+ * struct scmi_xfer_ops - References to the core SCMI xfer operations.
+ * @version_get: Get this version protocol.
+ * @xfer_get_init: Initialize one struct xfer if any xfer slot is free.
+ * @reset_rx_to_maxsz: Reset rx size to max transport size.
+ * @do_xfer: Do the SCMI transfer.
+ * @do_xfer_with_response: Do the SCMI transfer waiting for a response.
+ * @xfer_put: Free the xfer slot.
+ *
+ * Note that all this operations expect a protocol handle as first parameter;
+ * they then internally use it to infer the underlying protocol number: this
+ * way is not possible for a protocol implementation to forge messages for
+ * another protocol.
+ */
+struct scmi_xfer_ops {
+ int (*version_get)(const struct scmi_protocol_handle *ph, u32 *version);
+ int (*xfer_get_init)(const struct scmi_protocol_handle *ph, u8 msg_id,
+ size_t tx_size, size_t rx_size,
+ struct scmi_xfer **p);
+ void (*reset_rx_to_maxsz)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+ int (*do_xfer)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+ int (*do_xfer_with_response)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+ void (*xfer_put)(const struct scmi_protocol_handle *ph,
+ struct scmi_xfer *xfer);
+};
+
+typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *);
+
+/**
+ * struct scmi_protocol - Protocol descriptor
+ * @id: Protocol ID.
+ * @owner: Module reference if any.
+ * @instance_init: Mandatory protocol initialization function.
+ * @instance_deinit: Optional protocol de-initialization function.
+ * @ops: Optional reference to the operations provided by the protocol and
+ * exposed in scmi_protocol.h.
+ * @events: An optional reference to the events supported by this protocol.
+ */
+struct scmi_protocol {
+ const u8 id;
+ struct module *owner;
+ const scmi_prot_init_ph_fn_t instance_init;
+ const scmi_prot_init_ph_fn_t instance_deinit;
+ const void *ops;
+ const struct scmi_protocol_events *events;
+};
+
+#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \
+static const struct scmi_protocol *__this_proto = &(proto); \
+ \
+int __init scmi_##name##_register(void) \
+{ \
+ return scmi_protocol_register(__this_proto); \
+} \
+ \
+void __exit scmi_##name##_unregister(void) \
+{ \
+ scmi_protocol_unregister(__this_proto); \
+}
+
+#define DECLARE_SCMI_REGISTER_UNREGISTER(func) \
+ int __init scmi_##func##_register(void); \
+ void __exit scmi_##func##_unregister(void)
+DECLARE_SCMI_REGISTER_UNREGISTER(base);
+DECLARE_SCMI_REGISTER_UNREGISTER(clock);
+DECLARE_SCMI_REGISTER_UNREGISTER(perf);
+DECLARE_SCMI_REGISTER_UNREGISTER(power);
+DECLARE_SCMI_REGISTER_UNREGISTER(reset);
+DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
+DECLARE_SCMI_REGISTER_UNREGISTER(voltage);
+DECLARE_SCMI_REGISTER_UNREGISTER(system);
+DECLARE_SCMI_REGISTER_UNREGISTER(powercap);
+
+#endif /* _SCMI_PROTOCOLS_H */
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
new file mode 100644
index 0000000000..3505735185
--- /dev/null
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -0,0 +1,1450 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Raw mode support
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+/**
+ * DOC: Theory of operation
+ *
+ * When enabled the SCMI Raw mode support exposes a userspace API which allows
+ * to send and receive SCMI commands, replies and notifications from a user
+ * application through injection and snooping of bare SCMI messages in binary
+ * little-endian format.
+ *
+ * Such injected SCMI transactions will then be routed through the SCMI core
+ * stack towards the SCMI backend server using whatever SCMI transport is
+ * currently configured on the system under test.
+ *
+ * It is meant to help in running any sort of SCMI backend server testing, no
+ * matter where the server is placed, as long as it is normally reachable via
+ * the transport configured on the system.
+ *
+ * It is activated by a Kernel configuration option since it is NOT meant to
+ * be used in production but only during development and in CI deployments.
+ *
+ * In order to avoid possible interferences between the SCMI Raw transactions
+ * originated from a test-suite and the normal operations of the SCMI drivers,
+ * when Raw mode is enabled, by default, all the regular SCMI drivers are
+ * inhibited, unless CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX is enabled: in this
+ * latter case the regular SCMI stack drivers will be loaded as usual and it is
+ * up to the user of this interface to take care of manually inhibiting the
+ * regular SCMI drivers in order to avoid interferences during the test runs.
+ *
+ * The exposed API is as follows.
+ *
+ * All SCMI Raw entries are rooted under a common top /raw debugfs top directory
+ * which in turn is rooted under the corresponding underlying SCMI instance.
+ *
+ * /sys/kernel/debug/scmi/
+ * `-- 0
+ * |-- atomic_threshold_us
+ * |-- instance_name
+ * |-- raw
+ * | |-- channels
+ * | | |-- 0x10
+ * | | | |-- message
+ * | | | `-- message_async
+ * | | `-- 0x13
+ * | | |-- message
+ * | | `-- message_async
+ * | |-- errors
+ * | |-- message
+ * | |-- message_async
+ * | |-- notification
+ * | `-- reset
+ * `-- transport
+ * |-- is_atomic
+ * |-- max_msg_size
+ * |-- max_rx_timeout_ms
+ * |-- rx_max_msg
+ * |-- tx_max_msg
+ * `-- type
+ *
+ * where:
+ *
+ * - errors: used to read back timed-out and unexpected replies
+ * - message*: used to send sync/async commands and read back immediate and
+ * delayed reponses (if any)
+ * - notification: used to read any notification being emitted by the system
+ * (if previously enabled by the user app)
+ * - reset: used to flush the queues of messages (of any kind) still pending
+ * to be read; this is useful at test-suite start/stop to get
+ * rid of any unread messages from the previous run.
+ *
+ * with the per-channel entries rooted at /channels being present only on a
+ * system where multiple transport channels have been configured.
+ *
+ * Such per-channel entries can be used to explicitly choose a specific channel
+ * for SCMI bare message injection, in contrast with the general entries above
+ * where, instead, the selection of the proper channel to use is automatically
+ * performed based the protocol embedded in the injected message and on how the
+ * transport is configured on the system.
+ *
+ * Note that other common general entries are available under transport/ to let
+ * the user applications properly make up their expectations in terms of
+ * timeouts and message characteristics.
+ *
+ * Each write to the message* entries causes one command request to be built
+ * and sent while the replies or delayed response are read back from those same
+ * entries one message at time (receiving an EOF at each message boundary).
+ *
+ * The user application running the test is in charge of handling timeouts
+ * on replies and properly choosing SCMI sequence numbers for the outgoing
+ * requests (using the same sequence number is supported but discouraged).
+ *
+ * Injection of multiple in-flight requests is supported as long as the user
+ * application uses properly distinct sequence numbers for concurrent requests
+ * and takes care to properly manage all the related issues about concurrency
+ * and command/reply pairing. Keep in mind that, anyway, the real level of
+ * parallelism attainable in such scenario is dependent on the characteristics
+ * of the underlying transport being used.
+ *
+ * Since the SCMI core regular stack is partially used to deliver and collect
+ * the messages, late replies arrived after timeouts and any other sort of
+ * unexpected message can be identified by the SCMI core as usual and they will
+ * be reported as messages under "errors" for later analysis.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+
+#include "common.h"
+
+#include "raw_mode.h"
+
+#include <trace/events/scmi.h>
+
+#define SCMI_XFER_RAW_MAX_RETRIES 10
+
+/**
+ * struct scmi_raw_queue - Generic Raw queue descriptor
+ *
+ * @free_bufs: A freelists listhead used to keep unused raw buffers
+ * @free_bufs_lock: Spinlock used to protect access to @free_bufs
+ * @msg_q: A listhead to a queue of snooped messages waiting to be read out
+ * @msg_q_lock: Spinlock used to protect access to @msg_q
+ * @wq: A waitqueue used to wait and poll on related @msg_q
+ */
+struct scmi_raw_queue {
+ struct list_head free_bufs;
+ /* Protect free_bufs[] lists */
+ spinlock_t free_bufs_lock;
+ struct list_head msg_q;
+ /* Protect msg_q[] lists */
+ spinlock_t msg_q_lock;
+ wait_queue_head_t wq;
+};
+
+/**
+ * struct scmi_raw_mode_info - Structure holding SCMI Raw instance data
+ *
+ * @id: Sequential Raw instance ID.
+ * @handle: Pointer to SCMI entity handle to use
+ * @desc: Pointer to the transport descriptor to use
+ * @tx_max_msg: Maximum number of concurrent TX in-flight messages
+ * @q: An array of Raw queue descriptors
+ * @chans_q: An XArray mapping optional additional per-channel queues
+ * @free_waiters: Head of freelist for unused waiters
+ * @free_mtx: A mutex to protect the waiters freelist
+ * @active_waiters: Head of list for currently active and used waiters
+ * @active_mtx: A mutex to protect the active waiters list
+ * @waiters_work: A work descriptor to be used with the workqueue machinery
+ * @wait_wq: A workqueue reference to the created workqueue
+ * @dentry: Top debugfs root dentry for SCMI Raw
+ * @gid: A group ID used for devres accounting
+ *
+ * Note that this descriptor is passed back to the core after SCMI Raw is
+ * initialized as an opaque handle to use by subsequent SCMI Raw call hooks.
+ *
+ */
+struct scmi_raw_mode_info {
+ unsigned int id;
+ const struct scmi_handle *handle;
+ const struct scmi_desc *desc;
+ int tx_max_msg;
+ struct scmi_raw_queue *q[SCMI_RAW_MAX_QUEUE];
+ struct xarray chans_q;
+ struct list_head free_waiters;
+ /* Protect free_waiters list */
+ struct mutex free_mtx;
+ struct list_head active_waiters;
+ /* Protect active_waiters list */
+ struct mutex active_mtx;
+ struct work_struct waiters_work;
+ struct workqueue_struct *wait_wq;
+ struct dentry *dentry;
+ void *gid;
+};
+
+/**
+ * struct scmi_xfer_raw_waiter - Structure to describe an xfer to be waited for
+ *
+ * @start_jiffies: The timestamp in jiffies of when this structure was queued.
+ * @cinfo: A reference to the channel to use for this transaction
+ * @xfer: A reference to the xfer to be waited for
+ * @async_response: A completion to be, optionally, used for async waits: it
+ * will be setup by @scmi_do_xfer_raw_start, if needed, to be
+ * pointed at by xfer->async_done.
+ * @node: A list node.
+ */
+struct scmi_xfer_raw_waiter {
+ unsigned long start_jiffies;
+ struct scmi_chan_info *cinfo;
+ struct scmi_xfer *xfer;
+ struct completion async_response;
+ struct list_head node;
+};
+
+/**
+ * struct scmi_raw_buffer - Structure to hold a full SCMI message
+ *
+ * @max_len: The maximum allowed message size (header included) that can be
+ * stored into @msg
+ * @msg: A message buffer used to collect a full message grabbed from an xfer.
+ * @node: A list node.
+ */
+struct scmi_raw_buffer {
+ size_t max_len;
+ struct scmi_msg msg;
+ struct list_head node;
+};
+
+/**
+ * struct scmi_dbg_raw_data - Structure holding data needed by the debugfs
+ * layer
+ *
+ * @chan_id: The preferred channel to use: if zero the channel is automatically
+ * selected based on protocol.
+ * @raw: A reference to the Raw instance.
+ * @tx: A message buffer used to collect TX message on write.
+ * @tx_size: The effective size of the TX message.
+ * @tx_req_size: The final expected size of the complete TX message.
+ * @rx: A message buffer to collect RX message on read.
+ * @rx_size: The effective size of the RX message.
+ */
+struct scmi_dbg_raw_data {
+ u8 chan_id;
+ struct scmi_raw_mode_info *raw;
+ struct scmi_msg tx;
+ size_t tx_size;
+ size_t tx_req_size;
+ struct scmi_msg rx;
+ size_t rx_size;
+};
+
+static struct scmi_raw_queue *
+scmi_raw_queue_select(struct scmi_raw_mode_info *raw, unsigned int idx,
+ unsigned int chan_id)
+{
+ if (!chan_id)
+ return raw->q[idx];
+
+ return xa_load(&raw->chans_q, chan_id);
+}
+
+static struct scmi_raw_buffer *scmi_raw_buffer_get(struct scmi_raw_queue *q)
+{
+ unsigned long flags;
+ struct scmi_raw_buffer *rb = NULL;
+ struct list_head *head = &q->free_bufs;
+
+ spin_lock_irqsave(&q->free_bufs_lock, flags);
+ if (!list_empty(head)) {
+ rb = list_first_entry(head, struct scmi_raw_buffer, node);
+ list_del_init(&rb->node);
+ }
+ spin_unlock_irqrestore(&q->free_bufs_lock, flags);
+
+ return rb;
+}
+
+static void scmi_raw_buffer_put(struct scmi_raw_queue *q,
+ struct scmi_raw_buffer *rb)
+{
+ unsigned long flags;
+
+ /* Reset to full buffer length */
+ rb->msg.len = rb->max_len;
+
+ spin_lock_irqsave(&q->free_bufs_lock, flags);
+ list_add_tail(&rb->node, &q->free_bufs);
+ spin_unlock_irqrestore(&q->free_bufs_lock, flags);
+}
+
+static void scmi_raw_buffer_enqueue(struct scmi_raw_queue *q,
+ struct scmi_raw_buffer *rb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ list_add_tail(&rb->node, &q->msg_q);
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ wake_up_interruptible(&q->wq);
+}
+
+static struct scmi_raw_buffer*
+scmi_raw_buffer_dequeue_unlocked(struct scmi_raw_queue *q)
+{
+ struct scmi_raw_buffer *rb = NULL;
+
+ if (!list_empty(&q->msg_q)) {
+ rb = list_first_entry(&q->msg_q, struct scmi_raw_buffer, node);
+ list_del_init(&rb->node);
+ }
+
+ return rb;
+}
+
+static struct scmi_raw_buffer *scmi_raw_buffer_dequeue(struct scmi_raw_queue *q)
+{
+ unsigned long flags;
+ struct scmi_raw_buffer *rb;
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ rb = scmi_raw_buffer_dequeue_unlocked(q);
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ return rb;
+}
+
+static void scmi_raw_buffer_queue_flush(struct scmi_raw_queue *q)
+{
+ struct scmi_raw_buffer *rb;
+
+ do {
+ rb = scmi_raw_buffer_dequeue(q);
+ if (rb)
+ scmi_raw_buffer_put(q, rb);
+ } while (rb);
+}
+
+static struct scmi_xfer_raw_waiter *
+scmi_xfer_raw_waiter_get(struct scmi_raw_mode_info *raw, struct scmi_xfer *xfer,
+ struct scmi_chan_info *cinfo, bool async)
+{
+ struct scmi_xfer_raw_waiter *rw = NULL;
+
+ mutex_lock(&raw->free_mtx);
+ if (!list_empty(&raw->free_waiters)) {
+ rw = list_first_entry(&raw->free_waiters,
+ struct scmi_xfer_raw_waiter, node);
+ list_del_init(&rw->node);
+
+ if (async) {
+ reinit_completion(&rw->async_response);
+ xfer->async_done = &rw->async_response;
+ }
+
+ rw->cinfo = cinfo;
+ rw->xfer = xfer;
+ }
+ mutex_unlock(&raw->free_mtx);
+
+ return rw;
+}
+
+static void scmi_xfer_raw_waiter_put(struct scmi_raw_mode_info *raw,
+ struct scmi_xfer_raw_waiter *rw)
+{
+ if (rw->xfer) {
+ rw->xfer->async_done = NULL;
+ rw->xfer = NULL;
+ }
+
+ mutex_lock(&raw->free_mtx);
+ list_add_tail(&rw->node, &raw->free_waiters);
+ mutex_unlock(&raw->free_mtx);
+}
+
+static void scmi_xfer_raw_waiter_enqueue(struct scmi_raw_mode_info *raw,
+ struct scmi_xfer_raw_waiter *rw)
+{
+ /* A timestamp for the deferred worker to know how much this has aged */
+ rw->start_jiffies = jiffies;
+
+ trace_scmi_xfer_response_wait(rw->xfer->transfer_id, rw->xfer->hdr.id,
+ rw->xfer->hdr.protocol_id,
+ rw->xfer->hdr.seq,
+ raw->desc->max_rx_timeout_ms,
+ rw->xfer->hdr.poll_completion);
+
+ mutex_lock(&raw->active_mtx);
+ list_add_tail(&rw->node, &raw->active_waiters);
+ mutex_unlock(&raw->active_mtx);
+
+ /* kick waiter work */
+ queue_work(raw->wait_wq, &raw->waiters_work);
+}
+
+static struct scmi_xfer_raw_waiter *
+scmi_xfer_raw_waiter_dequeue(struct scmi_raw_mode_info *raw)
+{
+ struct scmi_xfer_raw_waiter *rw = NULL;
+
+ mutex_lock(&raw->active_mtx);
+ if (!list_empty(&raw->active_waiters)) {
+ rw = list_first_entry(&raw->active_waiters,
+ struct scmi_xfer_raw_waiter, node);
+ list_del_init(&rw->node);
+ }
+ mutex_unlock(&raw->active_mtx);
+
+ return rw;
+}
+
+/**
+ * scmi_xfer_raw_worker - Work function to wait for Raw xfers completions
+ *
+ * @work: A reference to the work.
+ *
+ * In SCMI Raw mode, once a user-provided injected SCMI message is sent, we
+ * cannot wait to receive its response (if any) in the context of the injection
+ * routines so as not to leave the userspace write syscall, which delivered the
+ * SCMI message to send, pending till eventually a reply is received.
+ * Userspace should and will poll/wait instead on the read syscalls which will
+ * be in charge of reading a received reply (if any).
+ *
+ * Even though reply messages are collected and reported into the SCMI Raw layer
+ * on the RX path, nonetheless we have to properly wait for their completion as
+ * usual (and async_completion too if needed) in order to properly release the
+ * xfer structure at the end: to do this out of the context of the write/send
+ * these waiting jobs are delegated to this deferred worker.
+ *
+ * Any sent xfer, to be waited for, is timestamped and queued for later
+ * consumption by this worker: queue aging is accounted for while choosing a
+ * timeout for the completion, BUT we do not really care here if we end up
+ * accidentally waiting for a bit too long.
+ */
+static void scmi_xfer_raw_worker(struct work_struct *work)
+{
+ struct scmi_raw_mode_info *raw;
+ struct device *dev;
+ unsigned long max_tmo;
+
+ raw = container_of(work, struct scmi_raw_mode_info, waiters_work);
+ dev = raw->handle->dev;
+ max_tmo = msecs_to_jiffies(raw->desc->max_rx_timeout_ms);
+
+ do {
+ int ret = 0;
+ unsigned int timeout_ms;
+ unsigned long aging;
+ struct scmi_xfer *xfer;
+ struct scmi_xfer_raw_waiter *rw;
+ struct scmi_chan_info *cinfo;
+
+ rw = scmi_xfer_raw_waiter_dequeue(raw);
+ if (!rw)
+ return;
+
+ cinfo = rw->cinfo;
+ xfer = rw->xfer;
+ /*
+ * Waiters are queued by wait-deadline at the end, so some of
+ * them could have been already expired when processed, BUT we
+ * have to check the completion status anyway just in case a
+ * virtually expired (aged) transaction was indeed completed
+ * fine and we'll have to wait for the asynchronous part (if
+ * any): for this reason a 1 ms timeout is used for already
+ * expired/aged xfers.
+ */
+ aging = jiffies - rw->start_jiffies;
+ timeout_ms = max_tmo > aging ?
+ jiffies_to_msecs(max_tmo - aging) : 1;
+
+ ret = scmi_xfer_raw_wait_for_message_response(cinfo, xfer,
+ timeout_ms);
+ if (!ret && xfer->hdr.status)
+ ret = scmi_to_linux_errno(xfer->hdr.status);
+
+ if (raw->desc->ops->mark_txdone)
+ raw->desc->ops->mark_txdone(rw->cinfo, ret, xfer);
+
+ trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq, ret);
+
+ /* Wait also for an async delayed response if needed */
+ if (!ret && xfer->async_done) {
+ unsigned long tmo = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
+
+ if (!wait_for_completion_timeout(xfer->async_done, tmo))
+ dev_err(dev,
+ "timed out in RAW delayed resp - HDR:%08X\n",
+ pack_scmi_header(&xfer->hdr));
+ }
+
+ /* Release waiter and xfer */
+ scmi_xfer_raw_put(raw->handle, xfer);
+ scmi_xfer_raw_waiter_put(raw, rw);
+ } while (1);
+}
+
+static void scmi_xfer_raw_reset(struct scmi_raw_mode_info *raw)
+{
+ int i;
+
+ dev_info(raw->handle->dev, "Resetting SCMI Raw stack.\n");
+
+ for (i = 0; i < SCMI_RAW_MAX_QUEUE; i++)
+ scmi_raw_buffer_queue_flush(raw->q[i]);
+}
+
+/**
+ * scmi_xfer_raw_get_init - An helper to build a valid xfer from the provided
+ * bare SCMI message.
+ *
+ * @raw: A reference to the Raw instance.
+ * @buf: A buffer containing the whole SCMI message to send (including the
+ * header) in little-endian binary formmat.
+ * @len: Length of the message in @buf.
+ * @p: A pointer to return the initialized Raw xfer.
+ *
+ * After an xfer is picked from the TX pool and filled in with the message
+ * content, the xfer is registered as pending with the core in the usual way
+ * using the original sequence number provided by the user with the message.
+ *
+ * Note that, in case the testing user application is NOT using distinct
+ * sequence-numbers between successive SCMI messages such registration could
+ * fail temporarily if the previous message, using the same sequence number,
+ * had still not released; in such a case we just wait and retry.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_xfer_raw_get_init(struct scmi_raw_mode_info *raw, void *buf,
+ size_t len, struct scmi_xfer **p)
+{
+ u32 msg_hdr;
+ size_t tx_size;
+ struct scmi_xfer *xfer;
+ int ret, retry = SCMI_XFER_RAW_MAX_RETRIES;
+ struct device *dev = raw->handle->dev;
+
+ if (!buf || len < sizeof(u32))
+ return -EINVAL;
+
+ tx_size = len - sizeof(u32);
+ /* Ensure we have sane transfer sizes */
+ if (tx_size > raw->desc->max_msg_size)
+ return -ERANGE;
+
+ xfer = scmi_xfer_raw_get(raw->handle);
+ if (IS_ERR(xfer)) {
+ dev_warn(dev, "RAW - Cannot get a free RAW xfer !\n");
+ return PTR_ERR(xfer);
+ }
+
+ /* Build xfer from the provided SCMI bare LE message */
+ msg_hdr = le32_to_cpu(*((__le32 *)buf));
+ unpack_scmi_header(msg_hdr, &xfer->hdr);
+ xfer->hdr.seq = (u16)MSG_XTRACT_TOKEN(msg_hdr);
+ /* Polling not supported */
+ xfer->hdr.poll_completion = false;
+ xfer->hdr.status = SCMI_SUCCESS;
+ xfer->tx.len = tx_size;
+ xfer->rx.len = raw->desc->max_msg_size;
+ /* Clear the whole TX buffer */
+ memset(xfer->tx.buf, 0x00, raw->desc->max_msg_size);
+ if (xfer->tx.len)
+ memcpy(xfer->tx.buf, (u8 *)buf + sizeof(msg_hdr), xfer->tx.len);
+ *p = xfer;
+
+ /*
+ * In flight registration can temporarily fail in case of Raw messages
+ * if the user injects messages without using monotonically increasing
+ * sequence numbers since, in Raw mode, the xfer (and the token) is
+ * finally released later by a deferred worker. Just retry for a while.
+ */
+ do {
+ ret = scmi_xfer_raw_inflight_register(raw->handle, xfer);
+ if (ret) {
+ dev_dbg(dev,
+ "...retrying[%d] inflight registration\n",
+ retry);
+ msleep(raw->desc->max_rx_timeout_ms /
+ SCMI_XFER_RAW_MAX_RETRIES);
+ }
+ } while (ret && --retry);
+
+ if (ret) {
+ dev_warn(dev,
+ "RAW - Could NOT register xfer %d in-flight HDR:0x%08X\n",
+ xfer->hdr.seq, msg_hdr);
+ scmi_xfer_raw_put(raw->handle, xfer);
+ }
+
+ return ret;
+}
+
+/**
+ * scmi_do_xfer_raw_start - An helper to send a valid raw xfer
+ *
+ * @raw: A reference to the Raw instance.
+ * @xfer: The xfer to send
+ * @chan_id: The channel ID to use, if zero the channels is automatically
+ * selected based on the protocol used.
+ * @async: A flag stating if an asynchronous command is required.
+ *
+ * This function send a previously built raw xfer using an appropriate channel
+ * and queues the related waiting work.
+ *
+ * Note that we need to know explicitly if the required command is meant to be
+ * asynchronous in kind since we have to properly setup the waiter.
+ * (and deducing this from the payload is weak and do not scale given there is
+ * NOT a common header-flag stating if the command is asynchronous or not)
+ *
+ * Return: 0 on Success
+ */
+static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw,
+ struct scmi_xfer *xfer, u8 chan_id,
+ bool async)
+{
+ int ret;
+ struct scmi_chan_info *cinfo;
+ struct scmi_xfer_raw_waiter *rw;
+ struct device *dev = raw->handle->dev;
+
+ if (!chan_id)
+ chan_id = xfer->hdr.protocol_id;
+ else
+ xfer->flags |= SCMI_XFER_FLAG_CHAN_SET;
+
+ cinfo = scmi_xfer_raw_channel_get(raw->handle, chan_id);
+ if (IS_ERR(cinfo))
+ return PTR_ERR(cinfo);
+
+ rw = scmi_xfer_raw_waiter_get(raw, xfer, cinfo, async);
+ if (!rw) {
+ dev_warn(dev, "RAW - Cannot get a free waiter !\n");
+ return -ENOMEM;
+ }
+
+ /* True ONLY if also supported by transport. */
+ if (is_polling_enabled(cinfo, raw->desc))
+ xfer->hdr.poll_completion = true;
+
+ reinit_completion(&xfer->done);
+ /* Make sure xfer state update is visible before sending */
+ smp_store_mb(xfer->state, SCMI_XFER_SENT_OK);
+
+ trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ xfer->hdr.poll_completion);
+
+ ret = raw->desc->ops->send_message(rw->cinfo, xfer);
+ if (ret) {
+ dev_err(dev, "Failed to send RAW message %d\n", ret);
+ scmi_xfer_raw_waiter_put(raw, rw);
+ return ret;
+ }
+
+ trace_scmi_msg_dump(raw->id, cinfo->id, xfer->hdr.protocol_id,
+ xfer->hdr.id, "cmnd", xfer->hdr.seq,
+ xfer->hdr.status,
+ xfer->tx.buf, xfer->tx.len);
+
+ scmi_xfer_raw_waiter_enqueue(raw, rw);
+
+ return ret;
+}
+
+/**
+ * scmi_raw_message_send - An helper to build and send an SCMI command using
+ * the provided SCMI bare message buffer
+ *
+ * @raw: A reference to the Raw instance.
+ * @buf: A buffer containing the whole SCMI message to send (including the
+ * header) in little-endian binary format.
+ * @len: Length of the message in @buf.
+ * @chan_id: The channel ID to use.
+ * @async: A flag stating if an asynchronous command is required.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
+ void *buf, size_t len, u8 chan_id, bool async)
+{
+ int ret;
+ struct scmi_xfer *xfer;
+
+ ret = scmi_xfer_raw_get_init(raw, buf, len, &xfer);
+ if (ret)
+ return ret;
+
+ ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async);
+ if (ret)
+ scmi_xfer_raw_put(raw->handle, xfer);
+
+ return ret;
+}
+
+static struct scmi_raw_buffer *
+scmi_raw_message_dequeue(struct scmi_raw_queue *q, bool o_nonblock)
+{
+ unsigned long flags;
+ struct scmi_raw_buffer *rb;
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ while (list_empty(&q->msg_q)) {
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ if (o_nonblock)
+ return ERR_PTR(-EAGAIN);
+
+ if (wait_event_interruptible(q->wq, !list_empty(&q->msg_q)))
+ return ERR_PTR(-ERESTARTSYS);
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ }
+
+ rb = scmi_raw_buffer_dequeue_unlocked(q);
+
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ return rb;
+}
+
+/**
+ * scmi_raw_message_receive - An helper to dequeue and report the next
+ * available enqueued raw message payload that has been collected.
+ *
+ * @raw: A reference to the Raw instance.
+ * @buf: A buffer to get hold of the whole SCMI message received and represented
+ * in little-endian binary format.
+ * @len: Length of @buf.
+ * @size: The effective size of the message copied into @buf
+ * @idx: The index of the queue to pick the next queued message from.
+ * @chan_id: The channel ID to use.
+ * @o_nonblock: A flag to request a non-blocking message dequeue.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_raw_message_receive(struct scmi_raw_mode_info *raw,
+ void *buf, size_t len, size_t *size,
+ unsigned int idx, unsigned int chan_id,
+ bool o_nonblock)
+{
+ int ret = 0;
+ struct scmi_raw_buffer *rb;
+ struct scmi_raw_queue *q;
+
+ q = scmi_raw_queue_select(raw, idx, chan_id);
+ if (!q)
+ return -ENODEV;
+
+ rb = scmi_raw_message_dequeue(q, o_nonblock);
+ if (IS_ERR(rb)) {
+ dev_dbg(raw->handle->dev, "RAW - No message available!\n");
+ return PTR_ERR(rb);
+ }
+
+ if (rb->msg.len <= len) {
+ memcpy(buf, rb->msg.buf, rb->msg.len);
+ *size = rb->msg.len;
+ } else {
+ ret = -ENOSPC;
+ }
+
+ scmi_raw_buffer_put(q, rb);
+
+ return ret;
+}
+
+/* SCMI Raw debugfs helpers */
+
+static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos,
+ unsigned int idx)
+{
+ ssize_t cnt;
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+
+ if (!rd->rx_size) {
+ int ret;
+
+ ret = scmi_raw_message_receive(rd->raw, rd->rx.buf, rd->rx.len,
+ &rd->rx_size, idx, rd->chan_id,
+ filp->f_flags & O_NONBLOCK);
+ if (ret) {
+ rd->rx_size = 0;
+ return ret;
+ }
+
+ /* Reset any previous filepos change, including writes */
+ *ppos = 0;
+ } else if (*ppos == rd->rx_size) {
+ /* Return EOF once all the message has been read-out */
+ rd->rx_size = 0;
+ return 0;
+ }
+
+ cnt = simple_read_from_buffer(buf, count, ppos,
+ rd->rx.buf, rd->rx_size);
+
+ return cnt;
+}
+
+static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos,
+ bool async)
+{
+ int ret;
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+
+ if (count > rd->tx.len - rd->tx_size)
+ return -ENOSPC;
+
+ /* On first write attempt @count carries the total full message size. */
+ if (!rd->tx_size)
+ rd->tx_req_size = count;
+
+ /*
+ * Gather a full message, possibly across multiple interrupted wrrtes,
+ * before sending it with a single RAW xfer.
+ */
+ if (rd->tx_size < rd->tx_req_size) {
+ ssize_t cnt;
+
+ cnt = simple_write_to_buffer(rd->tx.buf, rd->tx.len, ppos,
+ buf, count);
+ if (cnt < 0)
+ return cnt;
+
+ rd->tx_size += cnt;
+ if (cnt < count)
+ return cnt;
+ }
+
+ ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size,
+ rd->chan_id, async);
+
+ /* Reset ppos for next message ... */
+ rd->tx_size = 0;
+ *ppos = 0;
+
+ return ret ?: count;
+}
+
+static __poll_t scmi_test_dbg_raw_common_poll(struct file *filp,
+ struct poll_table_struct *wait,
+ unsigned int idx)
+{
+ unsigned long flags;
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+ struct scmi_raw_queue *q;
+ __poll_t mask = 0;
+
+ q = scmi_raw_queue_select(rd->raw, idx, rd->chan_id);
+ if (!q)
+ return mask;
+
+ poll_wait(filp, &q->wq, wait);
+
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ if (!list_empty(&q->msg_q))
+ mask = EPOLLIN | EPOLLRDNORM;
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ return mask;
+}
+
+static ssize_t scmi_dbg_raw_mode_message_read(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
+ SCMI_RAW_REPLY_QUEUE);
+}
+
+static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false);
+}
+
+static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_REPLY_QUEUE);
+}
+
+static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
+{
+ u8 id;
+ struct scmi_raw_mode_info *raw;
+ struct scmi_dbg_raw_data *rd;
+ const char *id_str = filp->f_path.dentry->d_parent->d_name.name;
+
+ if (!inode->i_private)
+ return -ENODEV;
+
+ raw = inode->i_private;
+ rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+
+ rd->rx.len = raw->desc->max_msg_size + sizeof(u32);
+ rd->rx.buf = kzalloc(rd->rx.len, GFP_KERNEL);
+ if (!rd->rx.buf) {
+ kfree(rd);
+ return -ENOMEM;
+ }
+
+ rd->tx.len = raw->desc->max_msg_size + sizeof(u32);
+ rd->tx.buf = kzalloc(rd->tx.len, GFP_KERNEL);
+ if (!rd->tx.buf) {
+ kfree(rd->rx.buf);
+ kfree(rd);
+ return -ENOMEM;
+ }
+
+ /* Grab channel ID from debugfs entry naming if any */
+ if (!kstrtou8(id_str, 16, &id))
+ rd->chan_id = id;
+
+ rd->raw = raw;
+ filp->private_data = rd;
+
+ return 0;
+}
+
+static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
+{
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+
+ kfree(rd->rx.buf);
+ kfree(rd->tx.buf);
+ kfree(rd);
+
+ return 0;
+}
+
+static ssize_t scmi_dbg_raw_mode_reset_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct scmi_dbg_raw_data *rd = filp->private_data;
+
+ scmi_xfer_raw_reset(rd->raw);
+
+ return count;
+}
+
+static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .write = scmi_dbg_raw_mode_reset_write,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations scmi_dbg_raw_mode_message_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_async_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_test_dbg_raw_mode_notif_read(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
+ SCMI_RAW_NOTIF_QUEUE);
+}
+
+static __poll_t
+scmi_test_dbg_raw_mode_notif_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_NOTIF_QUEUE);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_test_dbg_raw_mode_notif_read,
+ .poll = scmi_test_dbg_raw_mode_notif_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_test_dbg_raw_mode_errors_read(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
+ SCMI_RAW_ERRS_QUEUE);
+}
+
+static __poll_t
+scmi_test_dbg_raw_mode_errors_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_ERRS_QUEUE);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_test_dbg_raw_mode_errors_read,
+ .poll = scmi_test_dbg_raw_mode_errors_poll,
+ .owner = THIS_MODULE,
+};
+
+static struct scmi_raw_queue *
+scmi_raw_queue_init(struct scmi_raw_mode_info *raw)
+{
+ int i;
+ struct scmi_raw_buffer *rb;
+ struct device *dev = raw->handle->dev;
+ struct scmi_raw_queue *q;
+
+ q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return ERR_PTR(-ENOMEM);
+
+ rb = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rb), GFP_KERNEL);
+ if (!rb)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&q->free_bufs_lock);
+ INIT_LIST_HEAD(&q->free_bufs);
+ for (i = 0; i < raw->tx_max_msg; i++, rb++) {
+ rb->max_len = raw->desc->max_msg_size + sizeof(u32);
+ rb->msg.buf = devm_kzalloc(dev, rb->max_len, GFP_KERNEL);
+ if (!rb->msg.buf)
+ return ERR_PTR(-ENOMEM);
+ scmi_raw_buffer_put(q, rb);
+ }
+
+ spin_lock_init(&q->msg_q_lock);
+ INIT_LIST_HEAD(&q->msg_q);
+ init_waitqueue_head(&q->wq);
+
+ return q;
+}
+
+static int scmi_xfer_raw_worker_init(struct scmi_raw_mode_info *raw)
+{
+ int i;
+ struct scmi_xfer_raw_waiter *rw;
+ struct device *dev = raw->handle->dev;
+
+ rw = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rw), GFP_KERNEL);
+ if (!rw)
+ return -ENOMEM;
+
+ raw->wait_wq = alloc_workqueue("scmi-raw-wait-wq-%d",
+ WQ_UNBOUND | WQ_FREEZABLE |
+ WQ_HIGHPRI | WQ_SYSFS, 0, raw->id);
+ if (!raw->wait_wq)
+ return -ENOMEM;
+
+ mutex_init(&raw->free_mtx);
+ INIT_LIST_HEAD(&raw->free_waiters);
+ mutex_init(&raw->active_mtx);
+ INIT_LIST_HEAD(&raw->active_waiters);
+
+ for (i = 0; i < raw->tx_max_msg; i++, rw++) {
+ init_completion(&rw->async_response);
+ scmi_xfer_raw_waiter_put(raw, rw);
+ }
+ INIT_WORK(&raw->waiters_work, scmi_xfer_raw_worker);
+
+ return 0;
+}
+
+static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
+ u8 *channels, int num_chans)
+{
+ int ret, idx;
+ void *gid;
+ struct device *dev = raw->handle->dev;
+
+ gid = devres_open_group(dev, NULL, GFP_KERNEL);
+ if (!gid)
+ return -ENOMEM;
+
+ for (idx = 0; idx < SCMI_RAW_MAX_QUEUE; idx++) {
+ raw->q[idx] = scmi_raw_queue_init(raw);
+ if (IS_ERR(raw->q[idx])) {
+ ret = PTR_ERR(raw->q[idx]);
+ goto err;
+ }
+ }
+
+ xa_init(&raw->chans_q);
+ if (num_chans > 1) {
+ int i;
+
+ for (i = 0; i < num_chans; i++) {
+ struct scmi_raw_queue *q;
+
+ q = scmi_raw_queue_init(raw);
+ if (IS_ERR(q)) {
+ ret = PTR_ERR(q);
+ goto err_xa;
+ }
+
+ ret = xa_insert(&raw->chans_q, channels[i], q,
+ GFP_KERNEL);
+ if (ret) {
+ dev_err(dev,
+ "Fail to allocate Raw queue 0x%02X\n",
+ channels[i]);
+ goto err_xa;
+ }
+ }
+ }
+
+ ret = scmi_xfer_raw_worker_init(raw);
+ if (ret)
+ goto err_xa;
+
+ devres_close_group(dev, gid);
+ raw->gid = gid;
+
+ return 0;
+
+err_xa:
+ xa_destroy(&raw->chans_q);
+err:
+ devres_release_group(dev, gid);
+ return ret;
+}
+
+/**
+ * scmi_raw_mode_init - Function to initialize the SCMI Raw stack
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @top_dentry: A reference to the top Raw debugfs dentry
+ * @instance_id: The ID of the underlying SCMI platform instance represented by
+ * this Raw instance
+ * @channels: The list of the existing channels
+ * @num_chans: The number of entries in @channels
+ * @desc: Reference to the transport operations
+ * @tx_max_msg: Max number of in-flight messages allowed by the transport
+ *
+ * This function prepare the SCMI Raw stack and creates the debugfs API.
+ *
+ * Return: An opaque handle to the Raw instance on Success, an ERR_PTR otherwise
+ */
+void *scmi_raw_mode_init(const struct scmi_handle *handle,
+ struct dentry *top_dentry, int instance_id,
+ u8 *channels, int num_chans,
+ const struct scmi_desc *desc, int tx_max_msg)
+{
+ int ret;
+ struct scmi_raw_mode_info *raw;
+ struct device *dev;
+
+ if (!handle || !desc)
+ return ERR_PTR(-EINVAL);
+
+ dev = handle->dev;
+ raw = devm_kzalloc(dev, sizeof(*raw), GFP_KERNEL);
+ if (!raw)
+ return ERR_PTR(-ENOMEM);
+
+ raw->handle = handle;
+ raw->desc = desc;
+ raw->tx_max_msg = tx_max_msg;
+ raw->id = instance_id;
+
+ ret = scmi_raw_mode_setup(raw, channels, num_chans);
+ if (ret) {
+ devm_kfree(dev, raw);
+ return ERR_PTR(ret);
+ }
+
+ raw->dentry = debugfs_create_dir("raw", top_dentry);
+
+ debugfs_create_file("reset", 0200, raw->dentry, raw,
+ &scmi_dbg_raw_mode_reset_fops);
+
+ debugfs_create_file("message", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_fops);
+
+ debugfs_create_file("message_async", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_async_fops);
+
+ debugfs_create_file("notification", 0400, raw->dentry, raw,
+ &scmi_dbg_raw_mode_notification_fops);
+
+ debugfs_create_file("errors", 0400, raw->dentry, raw,
+ &scmi_dbg_raw_mode_errors_fops);
+
+ /*
+ * Expose per-channel entries if multiple channels available.
+ * Just ignore errors while setting up these interfaces since we
+ * have anyway already a working core Raw support.
+ */
+ if (num_chans > 1) {
+ int i;
+ struct dentry *top_chans;
+
+ top_chans = debugfs_create_dir("channels", raw->dentry);
+
+ for (i = 0; i < num_chans; i++) {
+ char cdir[8];
+ struct dentry *chd;
+
+ snprintf(cdir, 8, "0x%02X", channels[i]);
+ chd = debugfs_create_dir(cdir, top_chans);
+
+ debugfs_create_file("message", 0600, chd, raw,
+ &scmi_dbg_raw_mode_message_fops);
+
+ debugfs_create_file("message_async", 0600, chd, raw,
+ &scmi_dbg_raw_mode_message_async_fops);
+ }
+ }
+
+ dev_info(dev, "SCMI RAW Mode initialized for instance %d\n", raw->id);
+
+ return raw;
+}
+
+/**
+ * scmi_raw_mode_cleanup - Function to cleanup the SCMI Raw stack
+ *
+ * @r: An opaque handle to an initialized SCMI Raw instance
+ */
+void scmi_raw_mode_cleanup(void *r)
+{
+ struct scmi_raw_mode_info *raw = r;
+
+ if (!raw)
+ return;
+
+ debugfs_remove_recursive(raw->dentry);
+
+ cancel_work_sync(&raw->waiters_work);
+ destroy_workqueue(raw->wait_wq);
+ xa_destroy(&raw->chans_q);
+}
+
+static int scmi_xfer_raw_collect(void *msg, size_t *msg_len,
+ struct scmi_xfer *xfer)
+{
+ __le32 *m;
+ size_t msg_size;
+
+ if (!xfer || !msg || !msg_len)
+ return -EINVAL;
+
+ /* Account for hdr ...*/
+ msg_size = xfer->rx.len + sizeof(u32);
+ /* ... and status if needed */
+ if (xfer->hdr.type != MSG_TYPE_NOTIFICATION)
+ msg_size += sizeof(u32);
+
+ if (msg_size > *msg_len)
+ return -ENOSPC;
+
+ m = msg;
+ *m = cpu_to_le32(pack_scmi_header(&xfer->hdr));
+ if (xfer->hdr.type != MSG_TYPE_NOTIFICATION)
+ *++m = cpu_to_le32(xfer->hdr.status);
+
+ memcpy(++m, xfer->rx.buf, xfer->rx.len);
+
+ *msg_len = msg_size;
+
+ return 0;
+}
+
+/**
+ * scmi_raw_message_report - Helper to report back valid reponses/notifications
+ * to raw message requests.
+ *
+ * @r: An opaque reference to the raw instance configuration
+ * @xfer: The xfer containing the message to be reported
+ * @idx: The index of the queue.
+ * @chan_id: The channel ID to use.
+ *
+ * If Raw mode is enabled, this is called from the SCMI core on the regular RX
+ * path to save and enqueue the response/notification payload carried by this
+ * xfer into a dedicated scmi_raw_buffer for later consumption by the user.
+ *
+ * This way the caller can free the related xfer immediately afterwards and the
+ * user can read back the raw message payload at its own pace (if ever) without
+ * holding an xfer for too long.
+ */
+void scmi_raw_message_report(void *r, struct scmi_xfer *xfer,
+ unsigned int idx, unsigned int chan_id)
+{
+ int ret;
+ unsigned long flags;
+ struct scmi_raw_buffer *rb;
+ struct device *dev;
+ struct scmi_raw_queue *q;
+ struct scmi_raw_mode_info *raw = r;
+
+ if (!raw || (idx == SCMI_RAW_REPLY_QUEUE && !SCMI_XFER_IS_RAW(xfer)))
+ return;
+
+ dev = raw->handle->dev;
+ q = scmi_raw_queue_select(raw, idx,
+ SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0);
+ if (!q) {
+ dev_warn(dev,
+ "RAW[%d] - NO queue for chan 0x%X. Dropping report.\n",
+ idx, chan_id);
+ return;
+ }
+
+ /*
+ * Grab the msg_q_lock upfront to avoid a possible race between
+ * realizing the free list was empty and effectively picking the next
+ * buffer to use from the oldest one enqueued and still unread on this
+ * msg_q.
+ *
+ * Note that nowhere else these locks are taken together, so no risk of
+ * deadlocks du eto inversion.
+ */
+ spin_lock_irqsave(&q->msg_q_lock, flags);
+ rb = scmi_raw_buffer_get(q);
+ if (!rb) {
+ /*
+ * Immediate and delayed replies to previously injected Raw
+ * commands MUST be read back from userspace to free the buffers:
+ * if this is not happening something is seriously broken and
+ * must be fixed at the application level: complain loudly.
+ */
+ if (idx == SCMI_RAW_REPLY_QUEUE) {
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+ dev_warn(dev,
+ "RAW[%d] - Buffers exhausted. Dropping report.\n",
+ idx);
+ return;
+ }
+
+ /*
+ * Notifications and errors queues are instead handled in a
+ * circular manner: unread old buffers are just overwritten by
+ * newer ones.
+ *
+ * The main reason for this is that notifications originated
+ * by Raw requests cannot be distinguished from normal ones, so
+ * your Raw buffers queues risk to be flooded and depleted by
+ * notifications if you left it mistakenly enabled or when in
+ * coexistence mode.
+ */
+ rb = scmi_raw_buffer_dequeue_unlocked(q);
+ if (WARN_ON(!rb)) {
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+ return;
+ }
+
+ /* Reset to full buffer length */
+ rb->msg.len = rb->max_len;
+
+ dev_warn_once(dev,
+ "RAW[%d] - Buffers exhausted. Re-using oldest.\n",
+ idx);
+ }
+ spin_unlock_irqrestore(&q->msg_q_lock, flags);
+
+ ret = scmi_xfer_raw_collect(rb->msg.buf, &rb->msg.len, xfer);
+ if (ret) {
+ dev_warn(dev, "RAW - Cannot collect xfer into buffer !\n");
+ scmi_raw_buffer_put(q, rb);
+ return;
+ }
+
+ scmi_raw_buffer_enqueue(q, rb);
+}
+
+static void scmi_xfer_raw_fill(struct scmi_raw_mode_info *raw,
+ struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer, u32 msg_hdr)
+{
+ /* Unpack received HDR as it is */
+ unpack_scmi_header(msg_hdr, &xfer->hdr);
+ xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
+
+ memset(xfer->rx.buf, 0x00, xfer->rx.len);
+
+ raw->desc->ops->fetch_response(cinfo, xfer);
+}
+
+/**
+ * scmi_raw_error_report - Helper to report back timed-out or generally
+ * unexpected replies.
+ *
+ * @r: An opaque reference to the raw instance configuration
+ * @cinfo: A reference to the channel to use to retrieve the broken xfer
+ * @msg_hdr: The SCMI message header of the message to fetch and report
+ * @priv: Any private data related to the xfer.
+ *
+ * If Raw mode is enabled, this is called from the SCMI core on the RX path in
+ * case of errors to save and enqueue the bad message payload carried by the
+ * message that has just been received.
+ *
+ * Note that we have to manually fetch any available payload into a temporary
+ * xfer to be able to save and enqueue the message, since the regular RX error
+ * path which had called this would have not fetched the message payload having
+ * classified it as an error.
+ */
+void scmi_raw_error_report(void *r, struct scmi_chan_info *cinfo,
+ u32 msg_hdr, void *priv)
+{
+ struct scmi_xfer xfer;
+ struct scmi_raw_mode_info *raw = r;
+
+ if (!raw)
+ return;
+
+ xfer.rx.len = raw->desc->max_msg_size;
+ xfer.rx.buf = kzalloc(xfer.rx.len, GFP_ATOMIC);
+ if (!xfer.rx.buf) {
+ dev_info(raw->handle->dev,
+ "Cannot report Raw error for HDR:0x%X - ENOMEM\n",
+ msg_hdr);
+ return;
+ }
+
+ /* Any transport-provided priv must be passed back down to transport */
+ if (priv)
+ /* Ensure priv is visible */
+ smp_store_mb(xfer.priv, priv);
+
+ scmi_xfer_raw_fill(raw, cinfo, &xfer, msg_hdr);
+ scmi_raw_message_report(raw, &xfer, SCMI_RAW_ERRS_QUEUE, 0);
+
+ kfree(xfer.rx.buf);
+}
diff --git a/drivers/firmware/arm_scmi/raw_mode.h b/drivers/firmware/arm_scmi/raw_mode.h
new file mode 100644
index 0000000000..8af756a83f
--- /dev/null
+++ b/drivers/firmware/arm_scmi/raw_mode.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * Raw mode support header.
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+#ifndef _SCMI_RAW_MODE_H
+#define _SCMI_RAW_MODE_H
+
+#include "common.h"
+
+enum {
+ SCMI_RAW_REPLY_QUEUE,
+ SCMI_RAW_NOTIF_QUEUE,
+ SCMI_RAW_ERRS_QUEUE,
+ SCMI_RAW_MAX_QUEUE
+};
+
+void *scmi_raw_mode_init(const struct scmi_handle *handle,
+ struct dentry *top_dentry, int instance_id,
+ u8 *channels, int num_chans,
+ const struct scmi_desc *desc, int tx_max_msg);
+void scmi_raw_mode_cleanup(void *raw);
+
+void scmi_raw_message_report(void *raw, struct scmi_xfer *xfer,
+ unsigned int idx, unsigned int chan_id);
+void scmi_raw_error_report(void *raw, struct scmi_chan_info *cinfo,
+ u32 msg_hdr, void *priv);
+
+#endif /* _SCMI_RAW_MODE_H */
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
new file mode 100644
index 0000000000..e9afa8cab7
--- /dev/null
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Reset Protocol
+ *
+ * Copyright (C) 2019-2022 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications RESET - " fmt
+
+#include <linux/module.h>
+#include <linux/scmi_protocol.h>
+
+#include "protocols.h"
+#include "notify.h"
+
+enum scmi_reset_protocol_cmd {
+ RESET_DOMAIN_ATTRIBUTES = 0x3,
+ RESET = 0x4,
+ RESET_NOTIFY = 0x5,
+ RESET_DOMAIN_NAME_GET = 0x6,
+};
+
+#define NUM_RESET_DOMAIN_MASK 0xffff
+#define RESET_NOTIFY_ENABLE BIT(0)
+
+struct scmi_msg_resp_reset_domain_attributes {
+ __le32 attributes;
+#define SUPPORTS_ASYNC_RESET(x) ((x) & BIT(31))
+#define SUPPORTS_NOTIFY_RESET(x) ((x) & BIT(30))
+#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29))
+ __le32 latency;
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
+};
+
+struct scmi_msg_reset_domain_reset {
+ __le32 domain_id;
+ __le32 flags;
+#define AUTONOMOUS_RESET BIT(0)
+#define EXPLICIT_RESET_ASSERT BIT(1)
+#define ASYNCHRONOUS_RESET BIT(2)
+ __le32 reset_state;
+#define ARCH_COLD_RESET 0
+};
+
+struct scmi_msg_reset_notify {
+ __le32 id;
+ __le32 event_control;
+#define RESET_TP_NOTIFY_ALL BIT(0)
+};
+
+struct scmi_reset_issued_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 reset_state;
+};
+
+struct reset_dom_info {
+ bool async_reset;
+ bool reset_notify;
+ u32 latency_us;
+ char name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_reset_info {
+ u32 version;
+ int num_domains;
+ struct reset_dom_info *dom_info;
+};
+
+static int scmi_reset_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_reset_info *pi)
+{
+ int ret;
+ struct scmi_xfer *t;
+ u32 attr;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(attr), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ attr = get_unaligned_le32(t->rx.buf);
+ pi->num_domains = attr & NUM_RESET_DOMAIN_MASK;
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int
+scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 domain, struct reset_dom_info *dom_info,
+ u32 version)
+{
+ int ret;
+ u32 attributes;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_reset_domain_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, RESET_DOMAIN_ATTRIBUTES,
+ sizeof(domain), sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain, t->tx.buf);
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ attributes = le32_to_cpu(attr->attributes);
+
+ dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes);
+ dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes);
+ dom_info->latency_us = le32_to_cpu(attr->latency);
+ if (dom_info->latency_us == U32_MAX)
+ dom_info->latency_us = 0;
+ strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
+ SUPPORTS_EXTENDED_NAMES(attributes))
+ ph->hops->extended_name_get(ph, RESET_DOMAIN_NAME_GET, domain,
+ dom_info->name, SCMI_MAX_STR_SIZE);
+
+ return ret;
+}
+
+static int scmi_reset_num_domains_get(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_reset_info *pi = ph->get_priv(ph);
+
+ return pi->num_domains;
+}
+
+static const char *
+scmi_reset_name_get(const struct scmi_protocol_handle *ph, u32 domain)
+{
+ struct scmi_reset_info *pi = ph->get_priv(ph);
+
+ struct reset_dom_info *dom = pi->dom_info + domain;
+
+ return dom->name;
+}
+
+static int scmi_reset_latency_get(const struct scmi_protocol_handle *ph,
+ u32 domain)
+{
+ struct scmi_reset_info *pi = ph->get_priv(ph);
+ struct reset_dom_info *dom = pi->dom_info + domain;
+
+ return dom->latency_us;
+}
+
+static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
+ u32 flags, u32 state)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_reset_domain_reset *dom;
+ struct scmi_reset_info *pi = ph->get_priv(ph);
+ struct reset_dom_info *rdom;
+
+ if (domain >= pi->num_domains)
+ return -EINVAL;
+
+ rdom = pi->dom_info + domain;
+ if (rdom->async_reset && flags & AUTONOMOUS_RESET)
+ flags |= ASYNCHRONOUS_RESET;
+
+ ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t);
+ if (ret)
+ return ret;
+
+ dom = t->tx.buf;
+ dom->domain_id = cpu_to_le32(domain);
+ dom->flags = cpu_to_le32(flags);
+ dom->reset_state = cpu_to_le32(state);
+
+ if (flags & ASYNCHRONOUS_RESET)
+ ret = ph->xops->do_xfer_with_response(ph, t);
+ else
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_reset_domain_reset(const struct scmi_protocol_handle *ph,
+ u32 domain)
+{
+ return scmi_domain_reset(ph, domain, AUTONOMOUS_RESET,
+ ARCH_COLD_RESET);
+}
+
+static int
+scmi_reset_domain_assert(const struct scmi_protocol_handle *ph, u32 domain)
+{
+ return scmi_domain_reset(ph, domain, EXPLICIT_RESET_ASSERT,
+ ARCH_COLD_RESET);
+}
+
+static int
+scmi_reset_domain_deassert(const struct scmi_protocol_handle *ph, u32 domain)
+{
+ return scmi_domain_reset(ph, domain, 0, ARCH_COLD_RESET);
+}
+
+static const struct scmi_reset_proto_ops reset_proto_ops = {
+ .num_domains_get = scmi_reset_num_domains_get,
+ .name_get = scmi_reset_name_get,
+ .latency_get = scmi_reset_latency_get,
+ .reset = scmi_reset_domain_reset,
+ .assert = scmi_reset_domain_assert,
+ .deassert = scmi_reset_domain_deassert,
+};
+
+static int scmi_reset_notify(const struct scmi_protocol_handle *ph,
+ u32 domain_id, bool enable)
+{
+ int ret;
+ u32 evt_cntl = enable ? RESET_TP_NOTIFY_ALL : 0;
+ struct scmi_xfer *t;
+ struct scmi_msg_reset_notify *cfg;
+
+ ret = ph->xops->xfer_get_init(ph, RESET_NOTIFY, sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ cfg = t->tx.buf;
+ cfg->id = cpu_to_le32(domain_id);
+ cfg->event_control = cpu_to_le32(evt_cntl);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_reset_set_notify_enabled(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_reset_notify(ph, src_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *
+scmi_reset_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_reset_issued_notify_payld *p = payld;
+ struct scmi_reset_issued_report *r = report;
+
+ if (evt_id != SCMI_EVENT_RESET_ISSUED || sizeof(*p) != payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->reset_state = le32_to_cpu(p->reset_state);
+ *src_id = r->domain_id;
+
+ return r;
+}
+
+static int scmi_reset_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_reset_info *pinfo = ph->get_priv(ph);
+
+ if (!pinfo)
+ return -EINVAL;
+
+ return pinfo->num_domains;
+}
+
+static const struct scmi_event reset_events[] = {
+ {
+ .id = SCMI_EVENT_RESET_ISSUED,
+ .max_payld_sz = sizeof(struct scmi_reset_issued_notify_payld),
+ .max_report_sz = sizeof(struct scmi_reset_issued_report),
+ },
+};
+
+static const struct scmi_event_ops reset_event_ops = {
+ .get_num_sources = scmi_reset_get_num_sources,
+ .set_notify_enabled = scmi_reset_set_notify_enabled,
+ .fill_custom_report = scmi_reset_fill_custom_report,
+};
+
+static const struct scmi_protocol_events reset_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &reset_event_ops,
+ .evts = reset_events,
+ .num_events = ARRAY_SIZE(reset_events),
+};
+
+static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ int domain, ret;
+ u32 version;
+ struct scmi_reset_info *pinfo;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_dbg(ph->dev, "Reset Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ ret = scmi_reset_attributes_get(ph, pinfo);
+ if (ret)
+ return ret;
+
+ pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
+ sizeof(*pinfo->dom_info), GFP_KERNEL);
+ if (!pinfo->dom_info)
+ return -ENOMEM;
+
+ for (domain = 0; domain < pinfo->num_domains; domain++) {
+ struct reset_dom_info *dom = pinfo->dom_info + domain;
+
+ scmi_reset_domain_attributes_get(ph, domain, dom, version);
+ }
+
+ pinfo->version = version;
+ return ph->set_priv(ph, pinfo);
+}
+
+static const struct scmi_protocol scmi_reset = {
+ .id = SCMI_PROTOCOL_RESET,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_reset_protocol_init,
+ .ops = &reset_proto_ops,
+ .events = &reset_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(reset, scmi_reset)
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
new file mode 100644
index 0000000000..0e05a79de8
--- /dev/null
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCMI Generic power domain support.
+ *
+ * Copyright (C) 2018-2021 ARM Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pm_domain.h>
+#include <linux/scmi_protocol.h>
+
+static const struct scmi_power_proto_ops *power_ops;
+
+struct scmi_pm_domain {
+ struct generic_pm_domain genpd;
+ const struct scmi_protocol_handle *ph;
+ const char *name;
+ u32 domain;
+};
+
+#define to_scmi_pd(gpd) container_of(gpd, struct scmi_pm_domain, genpd)
+
+static int scmi_pd_power(struct generic_pm_domain *domain, bool power_on)
+{
+ int ret;
+ u32 state, ret_state;
+ struct scmi_pm_domain *pd = to_scmi_pd(domain);
+
+ if (power_on)
+ state = SCMI_POWER_STATE_GENERIC_ON;
+ else
+ state = SCMI_POWER_STATE_GENERIC_OFF;
+
+ ret = power_ops->state_set(pd->ph, pd->domain, state);
+ if (!ret)
+ ret = power_ops->state_get(pd->ph, pd->domain, &ret_state);
+ if (!ret && state != ret_state)
+ return -EIO;
+
+ return ret;
+}
+
+static int scmi_pd_power_on(struct generic_pm_domain *domain)
+{
+ return scmi_pd_power(domain, true);
+}
+
+static int scmi_pd_power_off(struct generic_pm_domain *domain)
+{
+ return scmi_pd_power(domain, false);
+}
+
+static int scmi_pm_domain_probe(struct scmi_device *sdev)
+{
+ int num_domains, i;
+ struct device *dev = &sdev->dev;
+ struct device_node *np = dev->of_node;
+ struct scmi_pm_domain *scmi_pd;
+ struct genpd_onecell_data *scmi_pd_data;
+ struct generic_pm_domain **domains;
+ const struct scmi_handle *handle = sdev->handle;
+ struct scmi_protocol_handle *ph;
+
+ if (!handle)
+ return -ENODEV;
+
+ power_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_POWER, &ph);
+ if (IS_ERR(power_ops))
+ return PTR_ERR(power_ops);
+
+ num_domains = power_ops->num_domains_get(ph);
+ if (num_domains < 0) {
+ dev_err(dev, "number of domains not found\n");
+ return num_domains;
+ }
+
+ scmi_pd = devm_kcalloc(dev, num_domains, sizeof(*scmi_pd), GFP_KERNEL);
+ if (!scmi_pd)
+ return -ENOMEM;
+
+ scmi_pd_data = devm_kzalloc(dev, sizeof(*scmi_pd_data), GFP_KERNEL);
+ if (!scmi_pd_data)
+ return -ENOMEM;
+
+ domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL);
+ if (!domains)
+ return -ENOMEM;
+
+ for (i = 0; i < num_domains; i++, scmi_pd++) {
+ u32 state;
+
+ if (power_ops->state_get(ph, i, &state)) {
+ dev_warn(dev, "failed to get state for domain %d\n", i);
+ continue;
+ }
+
+ scmi_pd->domain = i;
+ scmi_pd->ph = ph;
+ scmi_pd->name = power_ops->name_get(ph, i);
+ scmi_pd->genpd.name = scmi_pd->name;
+ scmi_pd->genpd.power_off = scmi_pd_power_off;
+ scmi_pd->genpd.power_on = scmi_pd_power_on;
+
+ pm_genpd_init(&scmi_pd->genpd, NULL,
+ state == SCMI_POWER_STATE_GENERIC_OFF);
+
+ domains[i] = &scmi_pd->genpd;
+ }
+
+ scmi_pd_data->domains = domains;
+ scmi_pd_data->num_domains = num_domains;
+
+ dev_set_drvdata(dev, scmi_pd_data);
+
+ return of_genpd_add_provider_onecell(np, scmi_pd_data);
+}
+
+static void scmi_pm_domain_remove(struct scmi_device *sdev)
+{
+ int i;
+ struct genpd_onecell_data *scmi_pd_data;
+ struct device *dev = &sdev->dev;
+ struct device_node *np = dev->of_node;
+
+ of_genpd_del_provider(np);
+
+ scmi_pd_data = dev_get_drvdata(dev);
+ for (i = 0; i < scmi_pd_data->num_domains; i++) {
+ if (!scmi_pd_data->domains[i])
+ continue;
+ pm_genpd_remove(scmi_pd_data->domains[i]);
+ }
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_POWER, "genpd" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_power_domain_driver = {
+ .name = "scmi-power-domain",
+ .probe = scmi_pm_domain_probe,
+ .remove = scmi_pm_domain_remove,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_power_domain_driver);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI power domain driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_scmi/scmi_power_control.c b/drivers/firmware/arm_scmi/scmi_power_control.c
new file mode 100644
index 0000000000..6eb7d2a4b6
--- /dev/null
+++ b/drivers/firmware/arm_scmi/scmi_power_control.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCMI Generic SystemPower Control driver.
+ *
+ * Copyright (C) 2020-2022 ARM Ltd.
+ */
+/*
+ * In order to handle platform originated SCMI SystemPower requests (like
+ * shutdowns or cold/warm resets) we register an SCMI Notification notifier
+ * block to react when such SCMI SystemPower events are emitted by platform.
+ *
+ * Once such a notification is received we act accordingly to perform the
+ * required system transition depending on the kind of request.
+ *
+ * Graceful requests are routed to userspace through the same API methods
+ * (orderly_poweroff/reboot()) used by ACPI when handling ACPI Shutdown bus
+ * events.
+ *
+ * Direct forceful requests are not supported since are not meant to be sent
+ * by the SCMI platform to an OSPM like Linux.
+ *
+ * Additionally, graceful request notifications can carry an optional timeout
+ * field stating the maximum amount of time allowed by the platform for
+ * completion after which they are converted to forceful ones: the assumption
+ * here is that even graceful requests can be upper-bound by a maximum final
+ * timeout strictly enforced by the platform itself which can ultimately cut
+ * the power off at will anytime; in order to avoid such extreme scenario, we
+ * track progress of graceful requests through the means of a reboot notifier
+ * converting timed-out graceful requests to forceful ones, so at least we
+ * try to perform a clean sync and shutdown/restart before the power is cut.
+ *
+ * Given the peculiar nature of SCMI SystemPower protocol, that is being in
+ * charge of triggering system wide shutdown/reboot events, there should be
+ * only one SCMI platform actively emitting SystemPower events.
+ * For this reason the SCMI core takes care to enforce the creation of one
+ * single unique device associated to the SCMI System Power protocol; no matter
+ * how many SCMI platforms are defined on the system, only one can be designated
+ * to support System Power: as a consequence this driver will never be probed
+ * more than once.
+ *
+ * For similar reasons as soon as the first valid SystemPower is received by
+ * this driver and the shutdown/reboot is started, any further notification
+ * possibly emitted by the platform will be ignored.
+ */
+
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/printk.h>
+#include <linux/reboot.h>
+#include <linux/scmi_protocol.h>
+#include <linux/slab.h>
+#include <linux/time64.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#ifndef MODULE
+#include <linux/fs.h>
+#endif
+
+enum scmi_syspower_state {
+ SCMI_SYSPOWER_IDLE,
+ SCMI_SYSPOWER_IN_PROGRESS,
+ SCMI_SYSPOWER_REBOOTING
+};
+
+/**
+ * struct scmi_syspower_conf - Common configuration
+ *
+ * @dev: A reference device
+ * @state: Current SystemPower state
+ * @state_mtx: @state related mutex
+ * @required_transition: The requested transition as decribed in the received
+ * SCMI SystemPower notification
+ * @userspace_nb: The notifier_block registered against the SCMI SystemPower
+ * notification to start the needed userspace interactions.
+ * @reboot_nb: A notifier_block optionally used to track reboot progress
+ * @forceful_work: A worker used to trigger a forceful transition once a
+ * graceful has timed out.
+ */
+struct scmi_syspower_conf {
+ struct device *dev;
+ enum scmi_syspower_state state;
+ /* Protect access to state */
+ struct mutex state_mtx;
+ enum scmi_system_events required_transition;
+
+ struct notifier_block userspace_nb;
+ struct notifier_block reboot_nb;
+
+ struct delayed_work forceful_work;
+};
+
+#define userspace_nb_to_sconf(x) \
+ container_of(x, struct scmi_syspower_conf, userspace_nb)
+
+#define reboot_nb_to_sconf(x) \
+ container_of(x, struct scmi_syspower_conf, reboot_nb)
+
+#define dwork_to_sconf(x) \
+ container_of(x, struct scmi_syspower_conf, forceful_work)
+
+/**
+ * scmi_reboot_notifier - A reboot notifier to catch an ongoing successful
+ * system transition
+ * @nb: Reference to the related notifier block
+ * @reason: The reason for the ongoing reboot
+ * @__unused: The cmd being executed on a restart request (unused)
+ *
+ * When an ongoing system transition is detected, compatible with the one
+ * requested by SCMI, cancel the delayed work.
+ *
+ * Return: NOTIFY_OK in any case
+ */
+static int scmi_reboot_notifier(struct notifier_block *nb,
+ unsigned long reason, void *__unused)
+{
+ struct scmi_syspower_conf *sc = reboot_nb_to_sconf(nb);
+
+ mutex_lock(&sc->state_mtx);
+ switch (reason) {
+ case SYS_HALT:
+ case SYS_POWER_OFF:
+ if (sc->required_transition == SCMI_SYSTEM_SHUTDOWN)
+ sc->state = SCMI_SYSPOWER_REBOOTING;
+ break;
+ case SYS_RESTART:
+ if (sc->required_transition == SCMI_SYSTEM_COLDRESET ||
+ sc->required_transition == SCMI_SYSTEM_WARMRESET)
+ sc->state = SCMI_SYSPOWER_REBOOTING;
+ break;
+ default:
+ break;
+ }
+
+ if (sc->state == SCMI_SYSPOWER_REBOOTING) {
+ dev_dbg(sc->dev, "Reboot in progress...cancel delayed work.\n");
+ cancel_delayed_work_sync(&sc->forceful_work);
+ }
+ mutex_unlock(&sc->state_mtx);
+
+ return NOTIFY_OK;
+}
+
+/**
+ * scmi_request_forceful_transition - Request forceful SystemPower transition
+ * @sc: A reference to the configuration data
+ *
+ * Initiates the required SystemPower transition without involving userspace:
+ * just trigger the action at the kernel level after issuing an emergency
+ * sync. (if possible at all)
+ */
+static inline void
+scmi_request_forceful_transition(struct scmi_syspower_conf *sc)
+{
+ dev_dbg(sc->dev, "Serving forceful request:%d\n",
+ sc->required_transition);
+
+#ifndef MODULE
+ emergency_sync();
+#endif
+ switch (sc->required_transition) {
+ case SCMI_SYSTEM_SHUTDOWN:
+ kernel_power_off();
+ break;
+ case SCMI_SYSTEM_COLDRESET:
+ case SCMI_SYSTEM_WARMRESET:
+ kernel_restart(NULL);
+ break;
+ default:
+ break;
+ }
+}
+
+static void scmi_forceful_work_func(struct work_struct *work)
+{
+ struct scmi_syspower_conf *sc;
+ struct delayed_work *dwork;
+
+ if (system_state > SYSTEM_RUNNING)
+ return;
+
+ dwork = to_delayed_work(work);
+ sc = dwork_to_sconf(dwork);
+
+ dev_dbg(sc->dev, "Graceful request timed out...forcing !\n");
+ mutex_lock(&sc->state_mtx);
+ /* avoid deadlock by unregistering reboot notifier first */
+ unregister_reboot_notifier(&sc->reboot_nb);
+ if (sc->state == SCMI_SYSPOWER_IN_PROGRESS)
+ scmi_request_forceful_transition(sc);
+ mutex_unlock(&sc->state_mtx);
+}
+
+/**
+ * scmi_request_graceful_transition - Request graceful SystemPower transition
+ * @sc: A reference to the configuration data
+ * @timeout_ms: The desired timeout to wait for the shutdown to complete before
+ * system is forcibly shutdown.
+ *
+ * Initiates the required SystemPower transition, requesting userspace
+ * co-operation: it uses the same orderly_ methods used by ACPI Shutdown event
+ * processing.
+ *
+ * Takes care also to register a reboot notifier and to schedule a delayed work
+ * in order to detect if userspace actions are taking too long and in such a
+ * case to trigger a forceful transition.
+ */
+static void scmi_request_graceful_transition(struct scmi_syspower_conf *sc,
+ unsigned int timeout_ms)
+{
+ unsigned int adj_timeout_ms = 0;
+
+ if (timeout_ms) {
+ int ret;
+
+ sc->reboot_nb.notifier_call = &scmi_reboot_notifier;
+ ret = register_reboot_notifier(&sc->reboot_nb);
+ if (!ret) {
+ /* Wait only up to 75% of the advertised timeout */
+ adj_timeout_ms = mult_frac(timeout_ms, 3, 4);
+ INIT_DELAYED_WORK(&sc->forceful_work,
+ scmi_forceful_work_func);
+ schedule_delayed_work(&sc->forceful_work,
+ msecs_to_jiffies(adj_timeout_ms));
+ } else {
+ /* Carry on best effort even without a reboot notifier */
+ dev_warn(sc->dev,
+ "Cannot register reboot notifier !\n");
+ }
+ }
+
+ dev_dbg(sc->dev,
+ "Serving graceful req:%d (timeout_ms:%u adj_timeout_ms:%u)\n",
+ sc->required_transition, timeout_ms, adj_timeout_ms);
+
+ switch (sc->required_transition) {
+ case SCMI_SYSTEM_SHUTDOWN:
+ /*
+ * When triggered early at boot-time the 'orderly' call will
+ * partially fail due to the lack of userspace itself, but
+ * the force=true argument will start anyway a successful
+ * forced shutdown.
+ */
+ orderly_poweroff(true);
+ break;
+ case SCMI_SYSTEM_COLDRESET:
+ case SCMI_SYSTEM_WARMRESET:
+ orderly_reboot();
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * scmi_userspace_notifier - Notifier callback to act on SystemPower
+ * Notifications
+ * @nb: Reference to the related notifier block
+ * @event: The SystemPower notification event id
+ * @data: The SystemPower event report
+ *
+ * This callback is in charge of decoding the received SystemPower report
+ * and act accordingly triggering a graceful or forceful system transition.
+ *
+ * Note that once a valid SCMI SystemPower event starts being served, any
+ * other following SystemPower notification received from the same SCMI
+ * instance (handle) will be ignored.
+ *
+ * Return: NOTIFY_OK once a valid SystemPower event has been successfully
+ * processed.
+ */
+static int scmi_userspace_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct scmi_system_power_state_notifier_report *er = data;
+ struct scmi_syspower_conf *sc = userspace_nb_to_sconf(nb);
+
+ if (er->system_state >= SCMI_SYSTEM_POWERUP) {
+ dev_err(sc->dev, "Ignoring unsupported system_state: 0x%X\n",
+ er->system_state);
+ return NOTIFY_DONE;
+ }
+
+ if (!SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(er->flags)) {
+ dev_err(sc->dev, "Ignoring forceful notification.\n");
+ return NOTIFY_DONE;
+ }
+
+ /*
+ * Bail out if system is already shutting down or an SCMI SystemPower
+ * requested is already being served.
+ */
+ if (system_state > SYSTEM_RUNNING)
+ return NOTIFY_DONE;
+ mutex_lock(&sc->state_mtx);
+ if (sc->state != SCMI_SYSPOWER_IDLE) {
+ dev_dbg(sc->dev,
+ "Transition already in progress...ignore.\n");
+ mutex_unlock(&sc->state_mtx);
+ return NOTIFY_DONE;
+ }
+ sc->state = SCMI_SYSPOWER_IN_PROGRESS;
+ mutex_unlock(&sc->state_mtx);
+
+ sc->required_transition = er->system_state;
+
+ /* Leaving a trace in logs of who triggered the shutdown/reboot. */
+ dev_info(sc->dev, "Serving shutdown/reboot request: %d\n",
+ sc->required_transition);
+
+ scmi_request_graceful_transition(sc, er->timeout);
+
+ return NOTIFY_OK;
+}
+
+static int scmi_syspower_probe(struct scmi_device *sdev)
+{
+ int ret;
+ struct scmi_syspower_conf *sc;
+ struct scmi_handle *handle = sdev->handle;
+
+ if (!handle)
+ return -ENODEV;
+
+ ret = handle->devm_protocol_acquire(sdev, SCMI_PROTOCOL_SYSTEM);
+ if (ret)
+ return ret;
+
+ sc = devm_kzalloc(&sdev->dev, sizeof(*sc), GFP_KERNEL);
+ if (!sc)
+ return -ENOMEM;
+
+ sc->state = SCMI_SYSPOWER_IDLE;
+ mutex_init(&sc->state_mtx);
+ sc->required_transition = SCMI_SYSTEM_MAX;
+ sc->userspace_nb.notifier_call = &scmi_userspace_notifier;
+ sc->dev = &sdev->dev;
+
+ return handle->notify_ops->devm_event_notifier_register(sdev,
+ SCMI_PROTOCOL_SYSTEM,
+ SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER,
+ NULL, &sc->userspace_nb);
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_SYSTEM, "syspower" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_system_power_driver = {
+ .name = "scmi-system-power",
+ .probe = scmi_syspower_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_system_power_driver);
+
+MODULE_AUTHOR("Cristian Marussi <cristian.marussi@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI SystemPower Control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
new file mode 100644
index 0000000000..0b5853fa9d
--- /dev/null
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -0,0 +1,1152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Sensor Protocol
+ *
+ * Copyright (C) 2018-2022 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/scmi_protocol.h>
+
+#include "protocols.h"
+#include "notify.h"
+
+#define SCMI_MAX_NUM_SENSOR_AXIS 63
+#define SCMIv2_SENSOR_PROTOCOL 0x10000
+
+enum scmi_sensor_protocol_cmd {
+ SENSOR_DESCRIPTION_GET = 0x3,
+ SENSOR_TRIP_POINT_NOTIFY = 0x4,
+ SENSOR_TRIP_POINT_CONFIG = 0x5,
+ SENSOR_READING_GET = 0x6,
+ SENSOR_AXIS_DESCRIPTION_GET = 0x7,
+ SENSOR_LIST_UPDATE_INTERVALS = 0x8,
+ SENSOR_CONFIG_GET = 0x9,
+ SENSOR_CONFIG_SET = 0xA,
+ SENSOR_CONTINUOUS_UPDATE_NOTIFY = 0xB,
+ SENSOR_NAME_GET = 0xC,
+ SENSOR_AXIS_NAME_GET = 0xD,
+};
+
+struct scmi_msg_resp_sensor_attributes {
+ __le16 num_sensors;
+ u8 max_requests;
+ u8 reserved;
+ __le32 reg_addr_low;
+ __le32 reg_addr_high;
+ __le32 reg_size;
+};
+
+/* v3 attributes_low macros */
+#define SUPPORTS_UPDATE_NOTIFY(x) FIELD_GET(BIT(30), (x))
+#define SENSOR_TSTAMP_EXP(x) FIELD_GET(GENMASK(14, 10), (x))
+#define SUPPORTS_TIMESTAMP(x) FIELD_GET(BIT(9), (x))
+#define SUPPORTS_EXTEND_ATTRS(x) FIELD_GET(BIT(8), (x))
+
+/* v2 attributes_high macros */
+#define SENSOR_UPDATE_BASE(x) FIELD_GET(GENMASK(31, 27), (x))
+#define SENSOR_UPDATE_SCALE(x) FIELD_GET(GENMASK(26, 22), (x))
+
+/* v3 attributes_high macros */
+#define SENSOR_AXIS_NUMBER(x) FIELD_GET(GENMASK(21, 16), (x))
+#define SUPPORTS_AXIS(x) FIELD_GET(BIT(8), (x))
+
+/* v3 resolution macros */
+#define SENSOR_RES(x) FIELD_GET(GENMASK(26, 0), (x))
+#define SENSOR_RES_EXP(x) FIELD_GET(GENMASK(31, 27), (x))
+
+struct scmi_msg_resp_attrs {
+ __le32 min_range_low;
+ __le32 min_range_high;
+ __le32 max_range_low;
+ __le32 max_range_high;
+};
+
+struct scmi_msg_sensor_description {
+ __le32 desc_index;
+};
+
+struct scmi_msg_resp_sensor_description {
+ __le16 num_returned;
+ __le16 num_remaining;
+ struct scmi_sensor_descriptor {
+ __le32 id;
+ __le32 attributes_low;
+/* Common attributes_low macros */
+#define SUPPORTS_ASYNC_READ(x) FIELD_GET(BIT(31), (x))
+#define SUPPORTS_EXTENDED_NAMES(x) FIELD_GET(BIT(29), (x))
+#define NUM_TRIP_POINTS(x) FIELD_GET(GENMASK(7, 0), (x))
+ __le32 attributes_high;
+/* Common attributes_high macros */
+#define SENSOR_SCALE(x) FIELD_GET(GENMASK(15, 11), (x))
+#define SENSOR_SCALE_SIGN BIT(4)
+#define SENSOR_SCALE_EXTEND GENMASK(31, 5)
+#define SENSOR_TYPE(x) FIELD_GET(GENMASK(7, 0), (x))
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
+ /* only for version > 2.0 */
+ __le32 power;
+ __le32 resolution;
+ struct scmi_msg_resp_attrs scalar_attrs;
+ } desc[];
+};
+
+/* Base scmi_sensor_descriptor size excluding extended attrs after name */
+#define SCMI_MSG_RESP_SENS_DESCR_BASE_SZ 28
+
+/* Sign extend to a full s32 */
+#define S32_EXT(v) \
+ ({ \
+ int __v = (v); \
+ \
+ if (__v & SENSOR_SCALE_SIGN) \
+ __v |= SENSOR_SCALE_EXTEND; \
+ __v; \
+ })
+
+struct scmi_msg_sensor_axis_description_get {
+ __le32 id;
+ __le32 axis_desc_index;
+};
+
+struct scmi_msg_resp_sensor_axis_description {
+ __le32 num_axis_flags;
+#define NUM_AXIS_RETURNED(x) FIELD_GET(GENMASK(5, 0), (x))
+#define NUM_AXIS_REMAINING(x) FIELD_GET(GENMASK(31, 26), (x))
+ struct scmi_axis_descriptor {
+ __le32 id;
+ __le32 attributes_low;
+#define SUPPORTS_EXTENDED_AXIS_NAMES(x) FIELD_GET(BIT(9), (x))
+ __le32 attributes_high;
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
+ __le32 resolution;
+ struct scmi_msg_resp_attrs attrs;
+ } desc[];
+};
+
+struct scmi_msg_resp_sensor_axis_names_description {
+ __le32 num_axis_flags;
+ struct scmi_sensor_axis_name_descriptor {
+ __le32 axis_id;
+ u8 name[SCMI_MAX_STR_SIZE];
+ } desc[];
+};
+
+/* Base scmi_axis_descriptor size excluding extended attrs after name */
+#define SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ 28
+
+struct scmi_msg_sensor_list_update_intervals {
+ __le32 id;
+ __le32 index;
+};
+
+struct scmi_msg_resp_sensor_list_update_intervals {
+ __le32 num_intervals_flags;
+#define NUM_INTERVALS_RETURNED(x) FIELD_GET(GENMASK(11, 0), (x))
+#define SEGMENTED_INTVL_FORMAT(x) FIELD_GET(BIT(12), (x))
+#define NUM_INTERVALS_REMAINING(x) FIELD_GET(GENMASK(31, 16), (x))
+ __le32 intervals[];
+};
+
+struct scmi_msg_sensor_request_notify {
+ __le32 id;
+ __le32 event_control;
+#define SENSOR_NOTIFY_ALL BIT(0)
+};
+
+struct scmi_msg_set_sensor_trip_point {
+ __le32 id;
+ __le32 event_control;
+#define SENSOR_TP_EVENT_MASK (0x3)
+#define SENSOR_TP_DISABLED 0x0
+#define SENSOR_TP_POSITIVE 0x1
+#define SENSOR_TP_NEGATIVE 0x2
+#define SENSOR_TP_BOTH 0x3
+#define SENSOR_TP_ID(x) (((x) & 0xff) << 4)
+ __le32 value_low;
+ __le32 value_high;
+};
+
+struct scmi_msg_sensor_config_set {
+ __le32 id;
+ __le32 sensor_config;
+};
+
+struct scmi_msg_sensor_reading_get {
+ __le32 id;
+ __le32 flags;
+#define SENSOR_READ_ASYNC BIT(0)
+};
+
+struct scmi_resp_sensor_reading_complete {
+ __le32 id;
+ __le32 readings_low;
+ __le32 readings_high;
+};
+
+struct scmi_sensor_reading_resp {
+ __le32 sensor_value_low;
+ __le32 sensor_value_high;
+ __le32 timestamp_low;
+ __le32 timestamp_high;
+};
+
+struct scmi_resp_sensor_reading_complete_v3 {
+ __le32 id;
+ struct scmi_sensor_reading_resp readings[];
+};
+
+struct scmi_sensor_trip_notify_payld {
+ __le32 agent_id;
+ __le32 sensor_id;
+ __le32 trip_point_desc;
+};
+
+struct scmi_sensor_update_notify_payld {
+ __le32 agent_id;
+ __le32 sensor_id;
+ struct scmi_sensor_reading_resp readings[];
+};
+
+struct sensors_info {
+ u32 version;
+ int num_sensors;
+ int max_requests;
+ u64 reg_addr;
+ u32 reg_size;
+ struct scmi_sensor_info *sensors;
+};
+
+static int scmi_sensor_attributes_get(const struct scmi_protocol_handle *ph,
+ struct sensors_info *si)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_sensor_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ si->num_sensors = le16_to_cpu(attr->num_sensors);
+ si->max_requests = attr->max_requests;
+ si->reg_addr = le32_to_cpu(attr->reg_addr_low) |
+ (u64)le32_to_cpu(attr->reg_addr_high) << 32;
+ si->reg_size = le32_to_cpu(attr->reg_size);
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static inline void scmi_parse_range_attrs(struct scmi_range_attrs *out,
+ const struct scmi_msg_resp_attrs *in)
+{
+ out->min_range = get_unaligned_le64((void *)&in->min_range_low);
+ out->max_range = get_unaligned_le64((void *)&in->max_range_low);
+}
+
+struct scmi_sens_ipriv {
+ void *priv;
+ struct device *dev;
+};
+
+static void iter_intervals_prepare_message(void *message,
+ unsigned int desc_index,
+ const void *p)
+{
+ struct scmi_msg_sensor_list_update_intervals *msg = message;
+ const struct scmi_sensor_info *s;
+
+ s = ((const struct scmi_sens_ipriv *)p)->priv;
+ /* Set the number of sensors to be skipped/already read */
+ msg->id = cpu_to_le32(s->id);
+ msg->index = cpu_to_le32(desc_index);
+}
+
+static int iter_intervals_update_state(struct scmi_iterator_state *st,
+ const void *response, void *p)
+{
+ u32 flags;
+ struct scmi_sensor_info *s = ((struct scmi_sens_ipriv *)p)->priv;
+ struct device *dev = ((struct scmi_sens_ipriv *)p)->dev;
+ const struct scmi_msg_resp_sensor_list_update_intervals *r = response;
+
+ flags = le32_to_cpu(r->num_intervals_flags);
+ st->num_returned = NUM_INTERVALS_RETURNED(flags);
+ st->num_remaining = NUM_INTERVALS_REMAINING(flags);
+
+ /*
+ * Max intervals is not declared previously anywhere so we
+ * assume it's returned+remaining on first call.
+ */
+ if (!st->max_resources) {
+ s->intervals.segmented = SEGMENTED_INTVL_FORMAT(flags);
+ s->intervals.count = st->num_returned + st->num_remaining;
+ /* segmented intervals are reported in one triplet */
+ if (s->intervals.segmented &&
+ (st->num_remaining || st->num_returned != 3)) {
+ dev_err(dev,
+ "Sensor ID:%d advertises an invalid segmented interval (%d)\n",
+ s->id, s->intervals.count);
+ s->intervals.segmented = false;
+ s->intervals.count = 0;
+ return -EINVAL;
+ }
+ /* Direct allocation when exceeding pre-allocated */
+ if (s->intervals.count >= SCMI_MAX_PREALLOC_POOL) {
+ s->intervals.desc =
+ devm_kcalloc(dev,
+ s->intervals.count,
+ sizeof(*s->intervals.desc),
+ GFP_KERNEL);
+ if (!s->intervals.desc) {
+ s->intervals.segmented = false;
+ s->intervals.count = 0;
+ return -ENOMEM;
+ }
+ }
+
+ st->max_resources = s->intervals.count;
+ }
+
+ return 0;
+}
+
+static int
+iter_intervals_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *p)
+{
+ const struct scmi_msg_resp_sensor_list_update_intervals *r = response;
+ struct scmi_sensor_info *s = ((struct scmi_sens_ipriv *)p)->priv;
+
+ s->intervals.desc[st->desc_index + st->loop_idx] =
+ le32_to_cpu(r->intervals[st->loop_idx]);
+
+ return 0;
+}
+
+static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph,
+ struct scmi_sensor_info *s)
+{
+ void *iter;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_intervals_prepare_message,
+ .update_state = iter_intervals_update_state,
+ .process_response = iter_intervals_process_response,
+ };
+ struct scmi_sens_ipriv upriv = {
+ .priv = s,
+ .dev = ph->dev,
+ };
+
+ iter = ph->hops->iter_response_init(ph, &ops, s->intervals.count,
+ SENSOR_LIST_UPDATE_INTERVALS,
+ sizeof(struct scmi_msg_sensor_list_update_intervals),
+ &upriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ return ph->hops->iter_response_run(iter);
+}
+
+struct scmi_apriv {
+ bool any_axes_support_extended_names;
+ struct scmi_sensor_info *s;
+};
+
+static void iter_axes_desc_prepare_message(void *message,
+ const unsigned int desc_index,
+ const void *priv)
+{
+ struct scmi_msg_sensor_axis_description_get *msg = message;
+ const struct scmi_apriv *apriv = priv;
+
+ /* Set the number of sensors to be skipped/already read */
+ msg->id = cpu_to_le32(apriv->s->id);
+ msg->axis_desc_index = cpu_to_le32(desc_index);
+}
+
+static int
+iter_axes_desc_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ u32 flags;
+ const struct scmi_msg_resp_sensor_axis_description *r = response;
+
+ flags = le32_to_cpu(r->num_axis_flags);
+ st->num_returned = NUM_AXIS_RETURNED(flags);
+ st->num_remaining = NUM_AXIS_REMAINING(flags);
+ st->priv = (void *)&r->desc[0];
+
+ return 0;
+}
+
+static int
+iter_axes_desc_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv)
+{
+ u32 attrh, attrl;
+ struct scmi_sensor_axis_info *a;
+ size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ;
+ struct scmi_apriv *apriv = priv;
+ const struct scmi_axis_descriptor *adesc = st->priv;
+
+ attrl = le32_to_cpu(adesc->attributes_low);
+ if (SUPPORTS_EXTENDED_AXIS_NAMES(attrl))
+ apriv->any_axes_support_extended_names = true;
+
+ a = &apriv->s->axis[st->desc_index + st->loop_idx];
+ a->id = le32_to_cpu(adesc->id);
+ a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl);
+
+ attrh = le32_to_cpu(adesc->attributes_high);
+ a->scale = S32_EXT(SENSOR_SCALE(attrh));
+ a->type = SENSOR_TYPE(attrh);
+ strscpy(a->name, adesc->name, SCMI_SHORT_NAME_MAX_SIZE);
+
+ if (a->extended_attrs) {
+ unsigned int ares = le32_to_cpu(adesc->resolution);
+
+ a->resolution = SENSOR_RES(ares);
+ a->exponent = S32_EXT(SENSOR_RES_EXP(ares));
+ dsize += sizeof(adesc->resolution);
+
+ scmi_parse_range_attrs(&a->attrs, &adesc->attrs);
+ dsize += sizeof(adesc->attrs);
+ }
+ st->priv = ((u8 *)adesc + dsize);
+
+ return 0;
+}
+
+static int
+iter_axes_extended_name_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ u32 flags;
+ const struct scmi_msg_resp_sensor_axis_names_description *r = response;
+
+ flags = le32_to_cpu(r->num_axis_flags);
+ st->num_returned = NUM_AXIS_RETURNED(flags);
+ st->num_remaining = NUM_AXIS_REMAINING(flags);
+ st->priv = (void *)&r->desc[0];
+
+ return 0;
+}
+
+static int
+iter_axes_extended_name_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st,
+ void *priv)
+{
+ struct scmi_sensor_axis_info *a;
+ const struct scmi_apriv *apriv = priv;
+ struct scmi_sensor_axis_name_descriptor *adesc = st->priv;
+ u32 axis_id = le32_to_cpu(adesc->axis_id);
+
+ if (axis_id >= st->max_resources)
+ return -EPROTO;
+
+ /*
+ * Pick the corresponding descriptor based on the axis_id embedded
+ * in the reply since the list of axes supporting extended names
+ * can be a subset of all the axes.
+ */
+ a = &apriv->s->axis[axis_id];
+ strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE);
+ st->priv = ++adesc;
+
+ return 0;
+}
+
+static int
+scmi_sensor_axis_extended_names_get(const struct scmi_protocol_handle *ph,
+ struct scmi_sensor_info *s)
+{
+ int ret;
+ void *iter;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_axes_desc_prepare_message,
+ .update_state = iter_axes_extended_name_update_state,
+ .process_response = iter_axes_extended_name_process_response,
+ };
+ struct scmi_apriv apriv = {
+ .any_axes_support_extended_names = false,
+ .s = s,
+ };
+
+ iter = ph->hops->iter_response_init(ph, &ops, s->num_axis,
+ SENSOR_AXIS_NAME_GET,
+ sizeof(struct scmi_msg_sensor_axis_description_get),
+ &apriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ /*
+ * Do not cause whole protocol initialization failure when failing to
+ * get extended names for axes.
+ */
+ ret = ph->hops->iter_response_run(iter);
+ if (ret)
+ dev_warn(ph->dev,
+ "Failed to get axes extended names for %s (ret:%d).\n",
+ s->name, ret);
+
+ return 0;
+}
+
+static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph,
+ struct scmi_sensor_info *s,
+ u32 version)
+{
+ int ret;
+ void *iter;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_axes_desc_prepare_message,
+ .update_state = iter_axes_desc_update_state,
+ .process_response = iter_axes_desc_process_response,
+ };
+ struct scmi_apriv apriv = {
+ .any_axes_support_extended_names = false,
+ .s = s,
+ };
+
+ s->axis = devm_kcalloc(ph->dev, s->num_axis,
+ sizeof(*s->axis), GFP_KERNEL);
+ if (!s->axis)
+ return -ENOMEM;
+
+ iter = ph->hops->iter_response_init(ph, &ops, s->num_axis,
+ SENSOR_AXIS_DESCRIPTION_GET,
+ sizeof(struct scmi_msg_sensor_axis_description_get),
+ &apriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ ret = ph->hops->iter_response_run(iter);
+ if (ret)
+ return ret;
+
+ if (PROTOCOL_REV_MAJOR(version) >= 0x3 &&
+ apriv.any_axes_support_extended_names)
+ ret = scmi_sensor_axis_extended_names_get(ph, s);
+
+ return ret;
+}
+
+static void iter_sens_descr_prepare_message(void *message,
+ unsigned int desc_index,
+ const void *priv)
+{
+ struct scmi_msg_sensor_description *msg = message;
+
+ msg->desc_index = cpu_to_le32(desc_index);
+}
+
+static int iter_sens_descr_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ const struct scmi_msg_resp_sensor_description *r = response;
+
+ st->num_returned = le16_to_cpu(r->num_returned);
+ st->num_remaining = le16_to_cpu(r->num_remaining);
+ st->priv = (void *)&r->desc[0];
+
+ return 0;
+}
+
+static int
+iter_sens_descr_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv)
+
+{
+ int ret = 0;
+ u32 attrh, attrl;
+ size_t dsize = SCMI_MSG_RESP_SENS_DESCR_BASE_SZ;
+ struct scmi_sensor_info *s;
+ struct sensors_info *si = priv;
+ const struct scmi_sensor_descriptor *sdesc = st->priv;
+
+ s = &si->sensors[st->desc_index + st->loop_idx];
+ s->id = le32_to_cpu(sdesc->id);
+
+ attrl = le32_to_cpu(sdesc->attributes_low);
+ /* common bitfields parsing */
+ s->async = SUPPORTS_ASYNC_READ(attrl);
+ s->num_trip_points = NUM_TRIP_POINTS(attrl);
+ /**
+ * only SCMIv3.0 specific bitfield below.
+ * Such bitfields are assumed to be zeroed on non
+ * relevant fw versions...assuming fw not buggy !
+ */
+ s->update = SUPPORTS_UPDATE_NOTIFY(attrl);
+ s->timestamped = SUPPORTS_TIMESTAMP(attrl);
+ if (s->timestamped)
+ s->tstamp_scale = S32_EXT(SENSOR_TSTAMP_EXP(attrl));
+ s->extended_scalar_attrs = SUPPORTS_EXTEND_ATTRS(attrl);
+
+ attrh = le32_to_cpu(sdesc->attributes_high);
+ /* common bitfields parsing */
+ s->scale = S32_EXT(SENSOR_SCALE(attrh));
+ s->type = SENSOR_TYPE(attrh);
+ /* Use pre-allocated pool wherever possible */
+ s->intervals.desc = s->intervals.prealloc_pool;
+ if (si->version == SCMIv2_SENSOR_PROTOCOL) {
+ s->intervals.segmented = false;
+ s->intervals.count = 1;
+ /*
+ * Convert SCMIv2.0 update interval format to
+ * SCMIv3.0 to be used as the common exposed
+ * descriptor, accessible via common macros.
+ */
+ s->intervals.desc[0] = (SENSOR_UPDATE_BASE(attrh) << 5) |
+ SENSOR_UPDATE_SCALE(attrh);
+ } else {
+ /*
+ * From SCMIv3.0 update intervals are retrieved
+ * via a dedicated (optional) command.
+ * Since the command is optional, on error carry
+ * on without any update interval.
+ */
+ if (scmi_sensor_update_intervals(ph, s))
+ dev_dbg(ph->dev,
+ "Update Intervals not available for sensor ID:%d\n",
+ s->id);
+ }
+ /**
+ * only > SCMIv2.0 specific bitfield below.
+ * Such bitfields are assumed to be zeroed on non
+ * relevant fw versions...assuming fw not buggy !
+ */
+ s->num_axis = min_t(unsigned int,
+ SUPPORTS_AXIS(attrh) ?
+ SENSOR_AXIS_NUMBER(attrh) : 0,
+ SCMI_MAX_NUM_SENSOR_AXIS);
+ strscpy(s->name, sdesc->name, SCMI_SHORT_NAME_MAX_SIZE);
+
+ /*
+ * If supported overwrite short name with the extended
+ * one; on error just carry on and use already provided
+ * short name.
+ */
+ if (PROTOCOL_REV_MAJOR(si->version) >= 0x3 &&
+ SUPPORTS_EXTENDED_NAMES(attrl))
+ ph->hops->extended_name_get(ph, SENSOR_NAME_GET, s->id,
+ s->name, SCMI_MAX_STR_SIZE);
+
+ if (s->extended_scalar_attrs) {
+ s->sensor_power = le32_to_cpu(sdesc->power);
+ dsize += sizeof(sdesc->power);
+
+ /* Only for sensors reporting scalar values */
+ if (s->num_axis == 0) {
+ unsigned int sres = le32_to_cpu(sdesc->resolution);
+
+ s->resolution = SENSOR_RES(sres);
+ s->exponent = S32_EXT(SENSOR_RES_EXP(sres));
+ dsize += sizeof(sdesc->resolution);
+
+ scmi_parse_range_attrs(&s->scalar_attrs,
+ &sdesc->scalar_attrs);
+ dsize += sizeof(sdesc->scalar_attrs);
+ }
+ }
+
+ if (s->num_axis > 0)
+ ret = scmi_sensor_axis_description(ph, s, si->version);
+
+ st->priv = ((u8 *)sdesc + dsize);
+
+ return ret;
+}
+
+static int scmi_sensor_description_get(const struct scmi_protocol_handle *ph,
+ struct sensors_info *si)
+{
+ void *iter;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_sens_descr_prepare_message,
+ .update_state = iter_sens_descr_update_state,
+ .process_response = iter_sens_descr_process_response,
+ };
+
+ iter = ph->hops->iter_response_init(ph, &ops, si->num_sensors,
+ SENSOR_DESCRIPTION_GET,
+ sizeof(__le32), si);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ return ph->hops->iter_response_run(iter);
+}
+
+static inline int
+scmi_sensor_request_notify(const struct scmi_protocol_handle *ph, u32 sensor_id,
+ u8 message_id, bool enable)
+{
+ int ret;
+ u32 evt_cntl = enable ? SENSOR_NOTIFY_ALL : 0;
+ struct scmi_xfer *t;
+ struct scmi_msg_sensor_request_notify *cfg;
+
+ ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ cfg = t->tx.buf;
+ cfg->id = cpu_to_le32(sensor_id);
+ cfg->event_control = cpu_to_le32(evt_cntl);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_sensor_trip_point_notify(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, bool enable)
+{
+ return scmi_sensor_request_notify(ph, sensor_id,
+ SENSOR_TRIP_POINT_NOTIFY,
+ enable);
+}
+
+static int
+scmi_sensor_continuous_update_notify(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, bool enable)
+{
+ return scmi_sensor_request_notify(ph, sensor_id,
+ SENSOR_CONTINUOUS_UPDATE_NOTIFY,
+ enable);
+}
+
+static int
+scmi_sensor_trip_point_config(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u8 trip_id, u64 trip_value)
+{
+ int ret;
+ u32 evt_cntl = SENSOR_TP_BOTH;
+ struct scmi_xfer *t;
+ struct scmi_msg_set_sensor_trip_point *trip;
+
+ ret = ph->xops->xfer_get_init(ph, SENSOR_TRIP_POINT_CONFIG,
+ sizeof(*trip), 0, &t);
+ if (ret)
+ return ret;
+
+ trip = t->tx.buf;
+ trip->id = cpu_to_le32(sensor_id);
+ trip->event_control = cpu_to_le32(evt_cntl | SENSOR_TP_ID(trip_id));
+ trip->value_low = cpu_to_le32(trip_value & 0xffffffff);
+ trip->value_high = cpu_to_le32(trip_value >> 32);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u32 *sensor_config)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct sensors_info *si = ph->get_priv(ph);
+
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_GET,
+ sizeof(__le32), sizeof(__le32), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(sensor_id, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+ *sensor_config = get_unaligned_le64(t->rx.buf);
+ s->sensor_config = *sensor_config;
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u32 sensor_config)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_sensor_config_set *msg;
+ struct sensors_info *si = ph->get_priv(ph);
+
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_SET,
+ sizeof(*msg), 0, &t);
+ if (ret)
+ return ret;
+
+ msg = t->tx.buf;
+ msg->id = cpu_to_le32(sensor_id);
+ msg->sensor_config = cpu_to_le32(sensor_config);
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+ s->sensor_config = sensor_config;
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+/**
+ * scmi_sensor_reading_get - Read scalar sensor value
+ * @ph: Protocol handle
+ * @sensor_id: Sensor ID
+ * @value: The 64bit value sensor reading
+ *
+ * This function returns a single 64 bit reading value representing the sensor
+ * value; if the platform SCMI Protocol implementation and the sensor support
+ * multiple axis and timestamped-reads, this just returns the first axis while
+ * dropping the timestamp value.
+ * Use instead the @scmi_sensor_reading_get_timestamped to retrieve the array of
+ * timestamped multi-axis values.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u64 *value)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_sensor_reading_get *sensor;
+ struct scmi_sensor_info *s;
+ struct sensors_info *si = ph->get_priv(ph);
+
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET,
+ sizeof(*sensor), 0, &t);
+ if (ret)
+ return ret;
+
+ sensor = t->tx.buf;
+ sensor->id = cpu_to_le32(sensor_id);
+ s = si->sensors + sensor_id;
+ if (s->async) {
+ sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
+ ret = ph->xops->do_xfer_with_response(ph, t);
+ if (!ret) {
+ struct scmi_resp_sensor_reading_complete *resp;
+
+ resp = t->rx.buf;
+ if (le32_to_cpu(resp->id) == sensor_id)
+ *value =
+ get_unaligned_le64(&resp->readings_low);
+ else
+ ret = -EPROTO;
+ }
+ } else {
+ sensor->flags = cpu_to_le32(0);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *value = get_unaligned_le64(t->rx.buf);
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static inline void
+scmi_parse_sensor_readings(struct scmi_sensor_reading *out,
+ const struct scmi_sensor_reading_resp *in)
+{
+ out->value = get_unaligned_le64((void *)&in->sensor_value_low);
+ out->timestamp = get_unaligned_le64((void *)&in->timestamp_low);
+}
+
+/**
+ * scmi_sensor_reading_get_timestamped - Read multiple-axis timestamped values
+ * @ph: Protocol handle
+ * @sensor_id: Sensor ID
+ * @count: The length of the provided @readings array
+ * @readings: An array of elements each representing a timestamped per-axis
+ * reading of type @struct scmi_sensor_reading.
+ * Returned readings are ordered as the @axis descriptors array
+ * included in @struct scmi_sensor_info and the max number of
+ * returned elements is min(@count, @num_axis); ideally the provided
+ * array should be of length @count equal to @num_axis.
+ *
+ * Return: 0 on Success
+ */
+static int
+scmi_sensor_reading_get_timestamped(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u8 count,
+ struct scmi_sensor_reading *readings)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_sensor_reading_get *sensor;
+ struct scmi_sensor_info *s;
+ struct sensors_info *si = ph->get_priv(ph);
+
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
+
+ s = si->sensors + sensor_id;
+ if (!count || !readings ||
+ (!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis))
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET,
+ sizeof(*sensor), 0, &t);
+ if (ret)
+ return ret;
+
+ sensor = t->tx.buf;
+ sensor->id = cpu_to_le32(sensor_id);
+ if (s->async) {
+ sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
+ ret = ph->xops->do_xfer_with_response(ph, t);
+ if (!ret) {
+ int i;
+ struct scmi_resp_sensor_reading_complete_v3 *resp;
+
+ resp = t->rx.buf;
+ /* Retrieve only the number of requested axis anyway */
+ if (le32_to_cpu(resp->id) == sensor_id)
+ for (i = 0; i < count; i++)
+ scmi_parse_sensor_readings(&readings[i],
+ &resp->readings[i]);
+ else
+ ret = -EPROTO;
+ }
+ } else {
+ sensor->flags = cpu_to_le32(0);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ int i;
+ struct scmi_sensor_reading_resp *resp_readings;
+
+ resp_readings = t->rx.buf;
+ for (i = 0; i < count; i++)
+ scmi_parse_sensor_readings(&readings[i],
+ &resp_readings[i]);
+ }
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static const struct scmi_sensor_info *
+scmi_sensor_info_get(const struct scmi_protocol_handle *ph, u32 sensor_id)
+{
+ struct sensors_info *si = ph->get_priv(ph);
+
+ if (sensor_id >= si->num_sensors)
+ return NULL;
+
+ return si->sensors + sensor_id;
+}
+
+static int scmi_sensor_count_get(const struct scmi_protocol_handle *ph)
+{
+ struct sensors_info *si = ph->get_priv(ph);
+
+ return si->num_sensors;
+}
+
+static const struct scmi_sensor_proto_ops sensor_proto_ops = {
+ .count_get = scmi_sensor_count_get,
+ .info_get = scmi_sensor_info_get,
+ .trip_point_config = scmi_sensor_trip_point_config,
+ .reading_get = scmi_sensor_reading_get,
+ .reading_get_timestamped = scmi_sensor_reading_get_timestamped,
+ .config_get = scmi_sensor_config_get,
+ .config_set = scmi_sensor_config_set,
+};
+
+static int scmi_sensor_set_notify_enabled(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ switch (evt_id) {
+ case SCMI_EVENT_SENSOR_TRIP_POINT_EVENT:
+ ret = scmi_sensor_trip_point_notify(ph, src_id, enable);
+ break;
+ case SCMI_EVENT_SENSOR_UPDATE:
+ ret = scmi_sensor_continuous_update_notify(ph, src_id, enable);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *
+scmi_sensor_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ void *rep = NULL;
+
+ switch (evt_id) {
+ case SCMI_EVENT_SENSOR_TRIP_POINT_EVENT:
+ {
+ const struct scmi_sensor_trip_notify_payld *p = payld;
+ struct scmi_sensor_trip_point_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->sensor_id = le32_to_cpu(p->sensor_id);
+ r->trip_point_desc = le32_to_cpu(p->trip_point_desc);
+ *src_id = r->sensor_id;
+ rep = r;
+ break;
+ }
+ case SCMI_EVENT_SENSOR_UPDATE:
+ {
+ int i;
+ struct scmi_sensor_info *s;
+ const struct scmi_sensor_update_notify_payld *p = payld;
+ struct scmi_sensor_update_report *r = report;
+ struct sensors_info *sinfo = ph->get_priv(ph);
+
+ /* payld_sz is variable for this event */
+ r->sensor_id = le32_to_cpu(p->sensor_id);
+ if (r->sensor_id >= sinfo->num_sensors)
+ break;
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ s = &sinfo->sensors[r->sensor_id];
+ /*
+ * The generated report r (@struct scmi_sensor_update_report)
+ * was pre-allocated to contain up to SCMI_MAX_NUM_SENSOR_AXIS
+ * readings: here it is filled with the effective @num_axis
+ * readings defined for this sensor or 1 for scalar sensors.
+ */
+ r->readings_count = s->num_axis ?: 1;
+ for (i = 0; i < r->readings_count; i++)
+ scmi_parse_sensor_readings(&r->readings[i],
+ &p->readings[i]);
+ *src_id = r->sensor_id;
+ rep = r;
+ break;
+ }
+ default:
+ break;
+ }
+
+ return rep;
+}
+
+static int scmi_sensor_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct sensors_info *si = ph->get_priv(ph);
+
+ return si->num_sensors;
+}
+
+static const struct scmi_event sensor_events[] = {
+ {
+ .id = SCMI_EVENT_SENSOR_TRIP_POINT_EVENT,
+ .max_payld_sz = sizeof(struct scmi_sensor_trip_notify_payld),
+ .max_report_sz = sizeof(struct scmi_sensor_trip_point_report),
+ },
+ {
+ .id = SCMI_EVENT_SENSOR_UPDATE,
+ .max_payld_sz =
+ sizeof(struct scmi_sensor_update_notify_payld) +
+ SCMI_MAX_NUM_SENSOR_AXIS *
+ sizeof(struct scmi_sensor_reading_resp),
+ .max_report_sz = sizeof(struct scmi_sensor_update_report) +
+ SCMI_MAX_NUM_SENSOR_AXIS *
+ sizeof(struct scmi_sensor_reading),
+ },
+};
+
+static const struct scmi_event_ops sensor_event_ops = {
+ .get_num_sources = scmi_sensor_get_num_sources,
+ .set_notify_enabled = scmi_sensor_set_notify_enabled,
+ .fill_custom_report = scmi_sensor_fill_custom_report,
+};
+
+static const struct scmi_protocol_events sensor_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &sensor_event_ops,
+ .evts = sensor_events,
+ .num_events = ARRAY_SIZE(sensor_events),
+};
+
+static int scmi_sensors_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ u32 version;
+ int ret;
+ struct sensors_info *sinfo;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_dbg(ph->dev, "Sensor Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ sinfo = devm_kzalloc(ph->dev, sizeof(*sinfo), GFP_KERNEL);
+ if (!sinfo)
+ return -ENOMEM;
+ sinfo->version = version;
+
+ ret = scmi_sensor_attributes_get(ph, sinfo);
+ if (ret)
+ return ret;
+ sinfo->sensors = devm_kcalloc(ph->dev, sinfo->num_sensors,
+ sizeof(*sinfo->sensors), GFP_KERNEL);
+ if (!sinfo->sensors)
+ return -ENOMEM;
+
+ ret = scmi_sensor_description_get(ph, sinfo);
+ if (ret)
+ return ret;
+
+ return ph->set_priv(ph, sinfo);
+}
+
+static const struct scmi_protocol scmi_sensors = {
+ .id = SCMI_PROTOCOL_SENSOR,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_sensors_protocol_init,
+ .ops = &sensor_proto_ops,
+ .events = &sensor_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(sensors, scmi_sensors)
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
new file mode 100644
index 0000000000..517d52fb3b
--- /dev/null
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * For transport using shared mem structure.
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include <linux/ktime.h>
+#include <linux/io.h>
+#include <linux/processor.h>
+#include <linux/types.h>
+
+#include <asm-generic/bug.h>
+
+#include "common.h"
+
+/*
+ * SCMI specification requires all parameters, message headers, return
+ * arguments or any protocol data to be expressed in little endian
+ * format only.
+ */
+struct scmi_shared_mem {
+ __le32 reserved;
+ __le32 channel_status;
+#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
+#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
+ __le32 reserved1[2];
+ __le32 flags;
+#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
+ __le32 length;
+ __le32 msg_header;
+ u8 msg_payload[];
+};
+
+void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer, struct scmi_chan_info *cinfo)
+{
+ ktime_t stop;
+
+ /*
+ * Ideally channel must be free by now unless OS timeout last
+ * request and platform continued to process the same, wait
+ * until it releases the shared memory, otherwise we may endup
+ * overwriting its response with new message payload or vice-versa.
+ * Giving up anyway after twice the expected channel timeout so as
+ * not to bail-out on intermittent issues where the platform is
+ * occasionally a bit slower to answer.
+ *
+ * Note that after a timeout is detected we bail-out and carry on but
+ * the transport functionality is probably permanently compromised:
+ * this is just to ease debugging and avoid complete hangs on boot
+ * due to a misbehaving SCMI firmware.
+ */
+ stop = ktime_add_ms(ktime_get(), 2 * cinfo->rx_timeout_ms);
+ spin_until_cond((ioread32(&shmem->channel_status) &
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE) ||
+ ktime_after(ktime_get(), stop));
+ if (!(ioread32(&shmem->channel_status) &
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE)) {
+ WARN_ON_ONCE(1);
+ dev_err(cinfo->dev,
+ "Timeout waiting for a free TX channel !\n");
+ return;
+ }
+
+ /* Mark channel busy + clear error */
+ iowrite32(0x0, &shmem->channel_status);
+ iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
+ &shmem->flags);
+ iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
+ iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
+ if (xfer->tx.buf)
+ memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
+}
+
+u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
+{
+ return ioread32(&shmem->msg_header);
+}
+
+void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
+{
+ size_t len = ioread32(&shmem->length);
+
+ xfer->hdr.status = ioread32(shmem->msg_payload);
+ /* Skip the length of header and status in shmem area i.e 8 bytes */
+ xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
+
+ /* Take a copy to the rx buffer.. */
+ memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
+}
+
+void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+ size_t max_len, struct scmi_xfer *xfer)
+{
+ size_t len = ioread32(&shmem->length);
+
+ /* Skip only the length of header in shmem area i.e 4 bytes */
+ xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
+
+ /* Take a copy to the rx buffer.. */
+ memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
+}
+
+void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
+{
+ iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status);
+}
+
+bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
+{
+ u16 xfer_id;
+
+ xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header));
+
+ if (xfer->hdr.seq != xfer_id)
+ return false;
+
+ return ioread32(&shmem->channel_status) &
+ (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+}
+
+bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem)
+{
+ return (ioread32(&shmem->channel_status) &
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+}
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
new file mode 100644
index 0000000000..c193516a25
--- /dev/null
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message SMC/HVC
+ * Transport driver
+ *
+ * Copyright 2020 NXP
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/processor.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+/*
+ * The shmem address is split into 4K page and offset.
+ * This is to make sure the parameters fit in 32bit arguments of the
+ * smc/hvc call to keep it uniform across smc32/smc64 conventions.
+ * This however limits the shmem address to 44 bit.
+ *
+ * These optional parameters can be used to distinguish among multiple
+ * scmi instances that are using the same smc-id.
+ * The page parameter is passed in r1/x1/w1 register and the offset parameter
+ * is passed in r2/x2/w2 register.
+ */
+
+#define SHMEM_SIZE (SZ_4K)
+#define SHMEM_SHIFT 12
+#define SHMEM_PAGE(x) (_UL((x) >> SHMEM_SHIFT))
+#define SHMEM_OFFSET(x) ((x) & (SHMEM_SIZE - 1))
+
+/**
+ * struct scmi_smc - Structure representing a SCMI smc transport
+ *
+ * @irq: An optional IRQ for completion
+ * @cinfo: SCMI channel info
+ * @shmem: Transmit/Receive shared memory area
+ * @shmem_lock: Lock to protect access to Tx/Rx shared memory area.
+ * Used when NOT operating in atomic mode.
+ * @inflight: Atomic flag to protect access to Tx/Rx shared memory area.
+ * Used when operating in atomic mode.
+ * @func_id: smc/hvc call function id
+ * @param_page: 4K page number of the shmem channel
+ * @param_offset: Offset within the 4K page of the shmem channel
+ */
+
+struct scmi_smc {
+ int irq;
+ struct scmi_chan_info *cinfo;
+ struct scmi_shared_mem __iomem *shmem;
+ /* Protect access to shmem area */
+ struct mutex shmem_lock;
+#define INFLIGHT_NONE MSG_TOKEN_MAX
+ atomic_t inflight;
+ u32 func_id;
+ u32 param_page;
+ u32 param_offset;
+};
+
+static irqreturn_t smc_msg_done_isr(int irq, void *data)
+{
+ struct scmi_smc *scmi_info = data;
+
+ scmi_rx_callback(scmi_info->cinfo,
+ shmem_read_header(scmi_info->shmem), NULL);
+
+ return IRQ_HANDLED;
+}
+
+static bool smc_chan_available(struct device_node *of_node, int idx)
+{
+ struct device_node *np = of_parse_phandle(of_node, "shmem", 0);
+ if (!np)
+ return false;
+
+ of_node_put(np);
+ return true;
+}
+
+static inline void smc_channel_lock_init(struct scmi_smc *scmi_info)
+{
+ if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
+ atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
+ else
+ mutex_init(&scmi_info->shmem_lock);
+}
+
+static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight)
+{
+ int ret;
+
+ ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq);
+
+ return ret == INFLIGHT_NONE;
+}
+
+static inline void
+smc_channel_lock_acquire(struct scmi_smc *scmi_info,
+ struct scmi_xfer *xfer __maybe_unused)
+{
+ if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
+ spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight));
+ else
+ mutex_lock(&scmi_info->shmem_lock);
+}
+
+static inline void smc_channel_lock_release(struct scmi_smc *scmi_info)
+{
+ if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
+ atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
+ else
+ mutex_unlock(&scmi_info->shmem_lock);
+}
+
+static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ bool tx)
+{
+ struct device *cdev = cinfo->dev;
+ struct scmi_smc *scmi_info;
+ resource_size_t size;
+ struct resource res;
+ struct device_node *np;
+ u32 func_id;
+ int ret;
+
+ if (!tx)
+ return -ENODEV;
+
+ scmi_info = devm_kzalloc(dev, sizeof(*scmi_info), GFP_KERNEL);
+ if (!scmi_info)
+ return -ENOMEM;
+
+ np = of_parse_phandle(cdev->of_node, "shmem", 0);
+ if (!of_device_is_compatible(np, "arm,scmi-shmem")) {
+ of_node_put(np);
+ return -ENXIO;
+ }
+
+ ret = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
+ if (ret) {
+ dev_err(cdev, "failed to get SCMI Tx shared memory\n");
+ return ret;
+ }
+
+ size = resource_size(&res);
+ scmi_info->shmem = devm_ioremap(dev, res.start, size);
+ if (!scmi_info->shmem) {
+ dev_err(dev, "failed to ioremap SCMI Tx shared memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id);
+ if (ret < 0)
+ return ret;
+
+ if (of_device_is_compatible(dev->of_node, "arm,scmi-smc-param")) {
+ scmi_info->param_page = SHMEM_PAGE(res.start);
+ scmi_info->param_offset = SHMEM_OFFSET(res.start);
+ }
+ /*
+ * If there is an interrupt named "a2p", then the service and
+ * completion of a message is signaled by an interrupt rather than by
+ * the return of the SMC call.
+ */
+ scmi_info->irq = of_irq_get_byname(cdev->of_node, "a2p");
+ if (scmi_info->irq > 0) {
+ ret = request_irq(scmi_info->irq, smc_msg_done_isr,
+ IRQF_NO_SUSPEND, dev_name(dev), scmi_info);
+ if (ret) {
+ dev_err(dev, "failed to setup SCMI smc irq\n");
+ return ret;
+ }
+ } else {
+ cinfo->no_completion_irq = true;
+ }
+
+ scmi_info->func_id = func_id;
+ scmi_info->cinfo = cinfo;
+ smc_channel_lock_init(scmi_info);
+ cinfo->transport_info = scmi_info;
+
+ return 0;
+}
+
+static int smc_chan_free(int id, void *p, void *data)
+{
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ /* Ignore any possible further reception on the IRQ path */
+ if (scmi_info->irq > 0)
+ free_irq(scmi_info->irq, scmi_info);
+
+ cinfo->transport_info = NULL;
+ scmi_info->cinfo = NULL;
+
+ return 0;
+}
+
+static int smc_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+ struct arm_smccc_res res;
+ unsigned long page = scmi_info->param_page;
+ unsigned long offset = scmi_info->param_offset;
+
+ /*
+ * Channel will be released only once response has been
+ * surely fully retrieved, so after .mark_txdone()
+ */
+ smc_channel_lock_acquire(scmi_info, xfer);
+
+ shmem_tx_prepare(scmi_info->shmem, xfer, cinfo);
+
+ arm_smccc_1_1_invoke(scmi_info->func_id, page, offset, 0, 0, 0, 0, 0,
+ &res);
+
+ /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
+ if (res.a0) {
+ smc_channel_lock_release(scmi_info);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void smc_fetch_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ shmem_fetch_response(scmi_info->shmem, xfer);
+}
+
+static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+ struct scmi_xfer *__unused)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ smc_channel_lock_release(scmi_info);
+}
+
+static const struct scmi_transport_ops scmi_smc_ops = {
+ .chan_available = smc_chan_available,
+ .chan_setup = smc_chan_setup,
+ .chan_free = smc_chan_free,
+ .send_message = smc_send_message,
+ .mark_txdone = smc_mark_txdone,
+ .fetch_response = smc_fetch_response,
+};
+
+const struct scmi_desc scmi_smc_desc = {
+ .ops = &scmi_smc_ops,
+ .max_rx_timeout_ms = 30,
+ .max_msg = 20,
+ .max_msg_size = 128,
+ /*
+ * Setting .sync_cmds_atomic_replies to true for SMC assumes that,
+ * once the SMC instruction has completed successfully, the issued
+ * SCMI command would have been already fully processed by the SCMI
+ * platform firmware and so any possible response value expected
+ * for the issued command will be immmediately ready to be fetched
+ * from the shared memory area.
+ */
+ .sync_cmds_completed_on_ret = true,
+ .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE),
+};
diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c
new file mode 100644
index 0000000000..9383d75845
--- /dev/null
+++ b/drivers/firmware/arm_scmi/system.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) System Power Protocol
+ *
+ * Copyright (C) 2020-2022 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications SYSTEM - " fmt
+
+#include <linux/module.h>
+#include <linux/scmi_protocol.h>
+
+#include "protocols.h"
+#include "notify.h"
+
+#define SCMI_SYSTEM_NUM_SOURCES 1
+
+enum scmi_system_protocol_cmd {
+ SYSTEM_POWER_STATE_NOTIFY = 0x5,
+};
+
+struct scmi_system_power_state_notify {
+ __le32 notify_enable;
+};
+
+struct scmi_system_power_state_notifier_payld {
+ __le32 agent_id;
+ __le32 flags;
+ __le32 system_state;
+ __le32 timeout;
+};
+
+struct scmi_system_info {
+ u32 version;
+ bool graceful_timeout_supported;
+};
+
+static int scmi_system_request_notify(const struct scmi_protocol_handle *ph,
+ bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_system_power_state_notify *notify;
+
+ ret = ph->xops->xfer_get_init(ph, SYSTEM_POWER_STATE_NOTIFY,
+ sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_system_set_notify_enabled(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_system_request_notify(ph, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLE - evt[%X] - ret:%d\n", evt_id, ret);
+
+ return ret;
+}
+
+static void *
+scmi_system_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ size_t expected_sz;
+ const struct scmi_system_power_state_notifier_payld *p = payld;
+ struct scmi_system_power_state_notifier_report *r = report;
+ struct scmi_system_info *pinfo = ph->get_priv(ph);
+
+ expected_sz = pinfo->graceful_timeout_supported ?
+ sizeof(*p) : sizeof(*p) - sizeof(__le32);
+ if (evt_id != SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER ||
+ payld_sz != expected_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->flags = le32_to_cpu(p->flags);
+ r->system_state = le32_to_cpu(p->system_state);
+ if (pinfo->graceful_timeout_supported &&
+ r->system_state == SCMI_SYSTEM_SHUTDOWN &&
+ SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(r->flags))
+ r->timeout = le32_to_cpu(p->timeout);
+ else
+ r->timeout = 0x00;
+ *src_id = 0;
+
+ return r;
+}
+
+static const struct scmi_event system_events[] = {
+ {
+ .id = SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER,
+ .max_payld_sz =
+ sizeof(struct scmi_system_power_state_notifier_payld),
+ .max_report_sz =
+ sizeof(struct scmi_system_power_state_notifier_report),
+ },
+};
+
+static const struct scmi_event_ops system_event_ops = {
+ .set_notify_enabled = scmi_system_set_notify_enabled,
+ .fill_custom_report = scmi_system_fill_custom_report,
+};
+
+static const struct scmi_protocol_events system_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &system_event_ops,
+ .evts = system_events,
+ .num_events = ARRAY_SIZE(system_events),
+ .num_sources = SCMI_SYSTEM_NUM_SOURCES,
+};
+
+static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ int ret;
+ u32 version;
+ struct scmi_system_info *pinfo;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_dbg(ph->dev, "System Power Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ pinfo->version = version;
+ if (PROTOCOL_REV_MAJOR(pinfo->version) >= 0x2)
+ pinfo->graceful_timeout_supported = true;
+
+ return ph->set_priv(ph, pinfo);
+}
+
+static const struct scmi_protocol scmi_system = {
+ .id = SCMI_PROTOCOL_SYSTEM,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_system_protocol_init,
+ .ops = NULL,
+ .events = &system_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(system, scmi_system)
diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
new file mode 100644
index 0000000000..d68c01cb7a
--- /dev/null
+++ b/drivers/firmware/arm_scmi/virtio.c
@@ -0,0 +1,939 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Virtio Transport driver for Arm System Control and Management Interface
+ * (SCMI).
+ *
+ * Copyright (C) 2020-2022 OpenSynergy.
+ * Copyright (C) 2021-2022 ARM Ltd.
+ */
+
+/**
+ * DOC: Theory of Operation
+ *
+ * The scmi-virtio transport implements a driver for the virtio SCMI device.
+ *
+ * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx
+ * channel (virtio eventq, P2A channel). Each channel is implemented through a
+ * virtqueue. Access to each virtqueue is protected by spinlocks.
+ */
+
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+
+#include <uapi/linux/virtio_ids.h>
+#include <uapi/linux/virtio_scmi.h>
+
+#include "common.h"
+
+#define VIRTIO_MAX_RX_TIMEOUT_MS 60000
+#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
+#define VIRTIO_SCMI_MAX_PDU_SIZE \
+ (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
+#define DESCRIPTORS_PER_TX_MSG 2
+
+/**
+ * struct scmi_vio_channel - Transport channel information
+ *
+ * @vqueue: Associated virtqueue
+ * @cinfo: SCMI Tx or Rx channel
+ * @free_lock: Protects access to the @free_list.
+ * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only
+ * @deferred_tx_work: Worker for TX deferred replies processing
+ * @deferred_tx_wq: Workqueue for TX deferred replies
+ * @pending_lock: Protects access to the @pending_cmds_list.
+ * @pending_cmds_list: List of pre-fetched commands queueud for later processing
+ * @is_rx: Whether channel is an Rx channel
+ * @max_msg: Maximum number of pending messages for this channel.
+ * @lock: Protects access to all members except users, free_list and
+ * pending_cmds_list.
+ * @shutdown_done: A reference to a completion used when freeing this channel.
+ * @users: A reference count to currently active users of this channel.
+ */
+struct scmi_vio_channel {
+ struct virtqueue *vqueue;
+ struct scmi_chan_info *cinfo;
+ /* lock to protect access to the free list. */
+ spinlock_t free_lock;
+ struct list_head free_list;
+ /* lock to protect access to the pending list. */
+ spinlock_t pending_lock;
+ struct list_head pending_cmds_list;
+ struct work_struct deferred_tx_work;
+ struct workqueue_struct *deferred_tx_wq;
+ bool is_rx;
+ unsigned int max_msg;
+ /*
+ * Lock to protect access to all members except users, free_list and
+ * pending_cmds_list
+ */
+ spinlock_t lock;
+ struct completion *shutdown_done;
+ refcount_t users;
+};
+
+enum poll_states {
+ VIO_MSG_NOT_POLLED,
+ VIO_MSG_POLL_TIMEOUT,
+ VIO_MSG_POLLING,
+ VIO_MSG_POLL_DONE,
+};
+
+/**
+ * struct scmi_vio_msg - Transport PDU information
+ *
+ * @request: SDU used for commands
+ * @input: SDU used for (delayed) responses and notifications
+ * @list: List which scmi_vio_msg may be part of
+ * @rx_len: Input SDU size in bytes, once input has been received
+ * @poll_idx: Last used index registered for polling purposes if this message
+ * transaction reply was configured for polling.
+ * @poll_status: Polling state for this message.
+ * @poll_lock: A lock to protect @poll_status
+ * @users: A reference count to track this message users and avoid premature
+ * freeing (and reuse) when polling and IRQ execution paths interleave.
+ */
+struct scmi_vio_msg {
+ struct scmi_msg_payld *request;
+ struct scmi_msg_payld *input;
+ struct list_head list;
+ unsigned int rx_len;
+ unsigned int poll_idx;
+ enum poll_states poll_status;
+ /* Lock to protect access to poll_status */
+ spinlock_t poll_lock;
+ refcount_t users;
+};
+
+/* Only one SCMI VirtIO device can possibly exist */
+static struct virtio_device *scmi_vdev;
+
+static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch,
+ struct scmi_chan_info *cinfo)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vioch->lock, flags);
+ cinfo->transport_info = vioch;
+ /* Indirectly setting channel not available any more */
+ vioch->cinfo = cinfo;
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ refcount_set(&vioch->users, 1);
+}
+
+static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch)
+{
+ return refcount_inc_not_zero(&vioch->users);
+}
+
+static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch)
+{
+ if (refcount_dec_and_test(&vioch->users)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&vioch->lock, flags);
+ if (vioch->shutdown_done) {
+ vioch->cinfo = NULL;
+ complete(vioch->shutdown_done);
+ }
+ spin_unlock_irqrestore(&vioch->lock, flags);
+ }
+}
+
+static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
+{
+ unsigned long flags;
+ DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done);
+
+ /*
+ * Prepare to wait for the last release if not already released
+ * or in progress.
+ */
+ spin_lock_irqsave(&vioch->lock, flags);
+ if (!vioch->cinfo || vioch->shutdown_done) {
+ spin_unlock_irqrestore(&vioch->lock, flags);
+ return;
+ }
+
+ vioch->shutdown_done = &vioch_shutdown_done;
+ if (!vioch->is_rx && vioch->deferred_tx_wq)
+ /* Cannot be kicked anymore after this...*/
+ vioch->deferred_tx_wq = NULL;
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ scmi_vio_channel_release(vioch);
+
+ /* Let any possibly concurrent RX path release the channel */
+ wait_for_completion(vioch->shutdown_done);
+}
+
+/* Assumes to be called with vio channel acquired already */
+static struct scmi_vio_msg *
+scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch)
+{
+ unsigned long flags;
+ struct scmi_vio_msg *msg;
+
+ spin_lock_irqsave(&vioch->free_lock, flags);
+ if (list_empty(&vioch->free_list)) {
+ spin_unlock_irqrestore(&vioch->free_lock, flags);
+ return NULL;
+ }
+
+ msg = list_first_entry(&vioch->free_list, typeof(*msg), list);
+ list_del_init(&msg->list);
+ spin_unlock_irqrestore(&vioch->free_lock, flags);
+
+ /* Still no users, no need to acquire poll_lock */
+ msg->poll_status = VIO_MSG_NOT_POLLED;
+ refcount_set(&msg->users, 1);
+
+ return msg;
+}
+
+static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg)
+{
+ return refcount_inc_not_zero(&msg->users);
+}
+
+/* Assumes to be called with vio channel acquired already */
+static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch,
+ struct scmi_vio_msg *msg)
+{
+ bool ret;
+
+ ret = refcount_dec_and_test(&msg->users);
+ if (ret) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&vioch->free_lock, flags);
+ list_add_tail(&msg->list, &vioch->free_list);
+ spin_unlock_irqrestore(&vioch->free_lock, flags);
+ }
+
+ return ret;
+}
+
+static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
+{
+ return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS);
+}
+
+static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
+ struct scmi_vio_msg *msg)
+{
+ struct scatterlist sg_in;
+ int rc;
+ unsigned long flags;
+ struct device *dev = &vioch->vqueue->vdev->dev;
+
+ sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE);
+
+ spin_lock_irqsave(&vioch->lock, flags);
+
+ rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
+ if (rc)
+ dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc);
+ else
+ virtqueue_kick(vioch->vqueue);
+
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ return rc;
+}
+
+/*
+ * Assume to be called with channel already acquired or not ready at all;
+ * vioch->lock MUST NOT have been already acquired.
+ */
+static void scmi_finalize_message(struct scmi_vio_channel *vioch,
+ struct scmi_vio_msg *msg)
+{
+ if (vioch->is_rx)
+ scmi_vio_feed_vq_rx(vioch, msg);
+ else
+ scmi_vio_msg_release(vioch, msg);
+}
+
+static void scmi_vio_complete_cb(struct virtqueue *vqueue)
+{
+ unsigned long flags;
+ unsigned int length;
+ struct scmi_vio_channel *vioch;
+ struct scmi_vio_msg *msg;
+ bool cb_enabled = true;
+
+ if (WARN_ON_ONCE(!vqueue->vdev->priv))
+ return;
+ vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index];
+
+ for (;;) {
+ if (!scmi_vio_channel_acquire(vioch))
+ return;
+
+ spin_lock_irqsave(&vioch->lock, flags);
+ if (cb_enabled) {
+ virtqueue_disable_cb(vqueue);
+ cb_enabled = false;
+ }
+
+ msg = virtqueue_get_buf(vqueue, &length);
+ if (!msg) {
+ if (virtqueue_enable_cb(vqueue)) {
+ spin_unlock_irqrestore(&vioch->lock, flags);
+ scmi_vio_channel_release(vioch);
+ return;
+ }
+ cb_enabled = true;
+ }
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ if (msg) {
+ msg->rx_len = length;
+ scmi_rx_callback(vioch->cinfo,
+ msg_read_header(msg->input), msg);
+
+ scmi_finalize_message(vioch, msg);
+ }
+
+ /*
+ * Release vio channel between loop iterations to allow
+ * virtio_chan_free() to eventually fully release it when
+ * shutting down; in such a case, any outstanding message will
+ * be ignored since this loop will bail out at the next
+ * iteration.
+ */
+ scmi_vio_channel_release(vioch);
+ }
+}
+
+static void scmi_vio_deferred_tx_worker(struct work_struct *work)
+{
+ unsigned long flags;
+ struct scmi_vio_channel *vioch;
+ struct scmi_vio_msg *msg, *tmp;
+
+ vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work);
+
+ if (!scmi_vio_channel_acquire(vioch))
+ return;
+
+ /*
+ * Process pre-fetched messages: these could be non-polled messages or
+ * late timed-out replies to polled messages dequeued by chance while
+ * polling for some other messages: this worker is in charge to process
+ * the valid non-expired messages and anyway finally free all of them.
+ */
+ spin_lock_irqsave(&vioch->pending_lock, flags);
+
+ /* Scan the list of possibly pre-fetched messages during polling. */
+ list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) {
+ list_del(&msg->list);
+
+ /*
+ * Channel is acquired here (cannot vanish) and this message
+ * is no more processed elsewhere so no poll_lock needed.
+ */
+ if (msg->poll_status == VIO_MSG_NOT_POLLED)
+ scmi_rx_callback(vioch->cinfo,
+ msg_read_header(msg->input), msg);
+
+ /* Free the processed message once done */
+ scmi_vio_msg_release(vioch, msg);
+ }
+
+ spin_unlock_irqrestore(&vioch->pending_lock, flags);
+
+ /* Process possibly still pending messages */
+ scmi_vio_complete_cb(vioch->vqueue);
+
+ scmi_vio_channel_release(vioch);
+}
+
+static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" };
+
+static vq_callback_t *scmi_vio_complete_callbacks[] = {
+ scmi_vio_complete_cb,
+ scmi_vio_complete_cb
+};
+
+static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo)
+{
+ struct scmi_vio_channel *vioch = base_cinfo->transport_info;
+
+ return vioch->max_msg;
+}
+
+static int virtio_link_supplier(struct device *dev)
+{
+ if (!scmi_vdev) {
+ dev_notice(dev,
+ "Deferring probe after not finding a bound scmi-virtio device\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (!device_link_add(dev, &scmi_vdev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER)) {
+ dev_err(dev, "Adding link to supplier virtio device failed\n");
+ return -ECANCELED;
+ }
+
+ return 0;
+}
+
+static bool virtio_chan_available(struct device_node *of_node, int idx)
+{
+ struct scmi_vio_channel *channels, *vioch = NULL;
+
+ if (WARN_ON_ONCE(!scmi_vdev))
+ return false;
+
+ channels = (struct scmi_vio_channel *)scmi_vdev->priv;
+
+ switch (idx) {
+ case VIRTIO_SCMI_VQ_TX:
+ vioch = &channels[VIRTIO_SCMI_VQ_TX];
+ break;
+ case VIRTIO_SCMI_VQ_RX:
+ if (scmi_vio_have_vq_rx(scmi_vdev))
+ vioch = &channels[VIRTIO_SCMI_VQ_RX];
+ break;
+ default:
+ return false;
+ }
+
+ return vioch && !vioch->cinfo;
+}
+
+static void scmi_destroy_tx_workqueue(void *deferred_tx_wq)
+{
+ destroy_workqueue(deferred_tx_wq);
+}
+
+static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ bool tx)
+{
+ struct scmi_vio_channel *vioch;
+ int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX;
+ int i;
+
+ if (!scmi_vdev)
+ return -EPROBE_DEFER;
+
+ vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index];
+
+ /* Setup a deferred worker for polling. */
+ if (tx && !vioch->deferred_tx_wq) {
+ int ret;
+
+ vioch->deferred_tx_wq =
+ alloc_workqueue(dev_name(&scmi_vdev->dev),
+ WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
+ 0);
+ if (!vioch->deferred_tx_wq)
+ return -ENOMEM;
+
+ ret = devm_add_action_or_reset(dev, scmi_destroy_tx_workqueue,
+ vioch->deferred_tx_wq);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&vioch->deferred_tx_work,
+ scmi_vio_deferred_tx_worker);
+ }
+
+ for (i = 0; i < vioch->max_msg; i++) {
+ struct scmi_vio_msg *msg;
+
+ msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ if (tx) {
+ msg->request = devm_kzalloc(dev,
+ VIRTIO_SCMI_MAX_PDU_SIZE,
+ GFP_KERNEL);
+ if (!msg->request)
+ return -ENOMEM;
+ spin_lock_init(&msg->poll_lock);
+ refcount_set(&msg->users, 1);
+ }
+
+ msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE,
+ GFP_KERNEL);
+ if (!msg->input)
+ return -ENOMEM;
+
+ scmi_finalize_message(vioch, msg);
+ }
+
+ scmi_vio_channel_ready(vioch, cinfo);
+
+ return 0;
+}
+
+static int virtio_chan_free(int id, void *p, void *data)
+{
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_vio_channel *vioch = cinfo->transport_info;
+
+ /*
+ * Break device to inhibit further traffic flowing while shutting down
+ * the channels: doing it later holding vioch->lock creates unsafe
+ * locking dependency chains as reported by LOCKDEP.
+ */
+ virtio_break_device(vioch->vqueue->vdev);
+ scmi_vio_channel_cleanup_sync(vioch);
+
+ return 0;
+}
+
+static int virtio_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_vio_channel *vioch = cinfo->transport_info;
+ struct scatterlist sg_out;
+ struct scatterlist sg_in;
+ struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in };
+ unsigned long flags;
+ int rc;
+ struct scmi_vio_msg *msg;
+
+ if (!scmi_vio_channel_acquire(vioch))
+ return -EINVAL;
+
+ msg = scmi_virtio_get_free_msg(vioch);
+ if (!msg) {
+ scmi_vio_channel_release(vioch);
+ return -EBUSY;
+ }
+
+ msg_tx_prepare(msg->request, xfer);
+
+ sg_init_one(&sg_out, msg->request, msg_command_size(xfer));
+ sg_init_one(&sg_in, msg->input, msg_response_size(xfer));
+
+ spin_lock_irqsave(&vioch->lock, flags);
+
+ /*
+ * If polling was requested for this transaction:
+ * - retrieve last used index (will be used as polling reference)
+ * - bind the polled message to the xfer via .priv
+ * - grab an additional msg refcount for the poll-path
+ */
+ if (xfer->hdr.poll_completion) {
+ msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);
+ /* Still no users, no need to acquire poll_lock */
+ msg->poll_status = VIO_MSG_POLLING;
+ scmi_vio_msg_acquire(msg);
+ /* Ensure initialized msg is visibly bound to xfer */
+ smp_store_mb(xfer->priv, msg);
+ }
+
+ rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC);
+ if (rc)
+ dev_err(vioch->cinfo->dev,
+ "failed to add to TX virtqueue (%d)\n", rc);
+ else
+ virtqueue_kick(vioch->vqueue);
+
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ if (rc) {
+ /* Ensure order between xfer->priv clear and vq feeding */
+ smp_store_mb(xfer->priv, NULL);
+ if (xfer->hdr.poll_completion)
+ scmi_vio_msg_release(vioch, msg);
+ scmi_vio_msg_release(vioch, msg);
+ }
+
+ scmi_vio_channel_release(vioch);
+
+ return rc;
+}
+
+static void virtio_fetch_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_vio_msg *msg = xfer->priv;
+
+ if (msg)
+ msg_fetch_response(msg->input, msg->rx_len, xfer);
+}
+
+static void virtio_fetch_notification(struct scmi_chan_info *cinfo,
+ size_t max_len, struct scmi_xfer *xfer)
+{
+ struct scmi_vio_msg *msg = xfer->priv;
+
+ if (msg)
+ msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer);
+}
+
+/**
+ * virtio_mark_txdone - Mark transmission done
+ *
+ * Free only completed polling transfer messages.
+ *
+ * Note that in the SCMI VirtIO transport we never explicitly release still
+ * outstanding but timed-out messages by forcibly re-adding them to the
+ * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the
+ * TX deferred worker, eventually clean up such messages once, finally, a late
+ * reply is received and discarded (if ever).
+ *
+ * This approach was deemed preferable since those pending timed-out buffers are
+ * still effectively owned by the SCMI platform VirtIO device even after timeout
+ * expiration: forcibly freeing and reusing them before they had been returned
+ * explicitly by the SCMI platform could lead to subtle bugs due to message
+ * corruption.
+ * An SCMI platform VirtIO device which never returns message buffers is
+ * anyway broken and it will quickly lead to exhaustion of available messages.
+ *
+ * For this same reason, here, we take care to free only the polled messages
+ * that had been somehow replied (only if not by chance already processed on the
+ * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also
+ * any timed-out polled message if that indeed appears to have been at least
+ * dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such
+ * messages won't be freed elsewhere. Any other polled message is marked as
+ * VIO_MSG_POLL_TIMEOUT.
+ *
+ * Possible late replies to timed-out polled messages will be eventually freed
+ * by RX callbacks if delivered on the IRQ path or by the deferred TX worker if
+ * dequeued on some other polling path.
+ *
+ * @cinfo: SCMI channel info
+ * @ret: Transmission return code
+ * @xfer: Transfer descriptor
+ */
+static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+ struct scmi_xfer *xfer)
+{
+ unsigned long flags;
+ struct scmi_vio_channel *vioch = cinfo->transport_info;
+ struct scmi_vio_msg *msg = xfer->priv;
+
+ if (!msg || !scmi_vio_channel_acquire(vioch))
+ return;
+
+ /* Ensure msg is unbound from xfer anyway at this point */
+ smp_store_mb(xfer->priv, NULL);
+
+ /* Must be a polled xfer and not already freed on the IRQ path */
+ if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) {
+ scmi_vio_channel_release(vioch);
+ return;
+ }
+
+ spin_lock_irqsave(&msg->poll_lock, flags);
+ /* Do not free timedout polled messages only if still inflight */
+ if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE)
+ scmi_vio_msg_release(vioch, msg);
+ else if (msg->poll_status == VIO_MSG_POLLING)
+ msg->poll_status = VIO_MSG_POLL_TIMEOUT;
+ spin_unlock_irqrestore(&msg->poll_lock, flags);
+
+ scmi_vio_channel_release(vioch);
+}
+
+/**
+ * virtio_poll_done - Provide polling support for VirtIO transport
+ *
+ * @cinfo: SCMI channel info
+ * @xfer: Reference to the transfer being poll for.
+ *
+ * VirtIO core provides a polling mechanism based only on last used indexes:
+ * this means that it is possible to poll the virtqueues waiting for something
+ * new to arrive from the host side, but the only way to check if the freshly
+ * arrived buffer was indeed what we were waiting for is to compare the newly
+ * arrived message descriptor with the one we are polling on.
+ *
+ * As a consequence it can happen to dequeue something different from the buffer
+ * we were poll-waiting for: if that is the case such early fetched buffers are
+ * then added to a the @pending_cmds_list list for later processing by a
+ * dedicated deferred worker.
+ *
+ * So, basically, once something new is spotted we proceed to de-queue all the
+ * freshly received used buffers until we found the one we were polling on, or,
+ * we have 'seemingly' emptied the virtqueue; if some buffers are still pending
+ * in the vqueue at the end of the polling loop (possible due to inherent races
+ * in virtqueues handling mechanisms), we similarly kick the deferred worker
+ * and let it process those, to avoid indefinitely looping in the .poll_done
+ * busy-waiting helper.
+ *
+ * Finally, we delegate to the deferred worker also the final free of any timed
+ * out reply to a polled message that we should dequeue.
+ *
+ * Note that, since we do NOT have per-message suppress notification mechanism,
+ * the message we are polling for could be alternatively delivered via usual
+ * IRQs callbacks on another core which happened to have IRQs enabled while we
+ * are actively polling for it here: in such a case it will be handled as such
+ * by scmi_rx_callback() and the polling loop in the SCMI Core TX path will be
+ * transparently terminated anyway.
+ *
+ * Return: True once polling has successfully completed.
+ */
+static bool virtio_poll_done(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ bool pending, found = false;
+ unsigned int length, any_prefetched = 0;
+ unsigned long flags;
+ struct scmi_vio_msg *next_msg, *msg = xfer->priv;
+ struct scmi_vio_channel *vioch = cinfo->transport_info;
+
+ if (!msg)
+ return true;
+
+ /*
+ * Processed already by other polling loop on another CPU ?
+ *
+ * Note that this message is acquired on the poll path so cannot vanish
+ * while inside this loop iteration even if concurrently processed on
+ * the IRQ path.
+ *
+ * Avoid to acquire poll_lock since polled_status can be changed
+ * in a relevant manner only later in this same thread of execution:
+ * any other possible changes made concurrently by other polling loops
+ * or by a reply delivered on the IRQ path have no meaningful impact on
+ * this loop iteration: in other words it is harmless to allow this
+ * possible race but let has avoid spinlocking with irqs off in this
+ * initial part of the polling loop.
+ */
+ if (msg->poll_status == VIO_MSG_POLL_DONE)
+ return true;
+
+ if (!scmi_vio_channel_acquire(vioch))
+ return true;
+
+ /* Has cmdq index moved at all ? */
+ pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
+ if (!pending) {
+ scmi_vio_channel_release(vioch);
+ return false;
+ }
+
+ spin_lock_irqsave(&vioch->lock, flags);
+ virtqueue_disable_cb(vioch->vqueue);
+
+ /*
+ * Process all new messages till the polled-for message is found OR
+ * the vqueue is empty.
+ */
+ while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) {
+ bool next_msg_done = false;
+
+ /*
+ * Mark any dequeued buffer message as VIO_MSG_POLL_DONE so
+ * that can be properly freed even on timeout in mark_txdone.
+ */
+ spin_lock(&next_msg->poll_lock);
+ if (next_msg->poll_status == VIO_MSG_POLLING) {
+ next_msg->poll_status = VIO_MSG_POLL_DONE;
+ next_msg_done = true;
+ }
+ spin_unlock(&next_msg->poll_lock);
+
+ next_msg->rx_len = length;
+ /* Is the message we were polling for ? */
+ if (next_msg == msg) {
+ found = true;
+ break;
+ } else if (next_msg_done) {
+ /* Skip the rest if this was another polled msg */
+ continue;
+ }
+
+ /*
+ * Enqueue for later processing any non-polled message and any
+ * timed-out polled one that we happen to have dequeued.
+ */
+ spin_lock(&next_msg->poll_lock);
+ if (next_msg->poll_status == VIO_MSG_NOT_POLLED ||
+ next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) {
+ spin_unlock(&next_msg->poll_lock);
+
+ any_prefetched++;
+ spin_lock(&vioch->pending_lock);
+ list_add_tail(&next_msg->list,
+ &vioch->pending_cmds_list);
+ spin_unlock(&vioch->pending_lock);
+ } else {
+ spin_unlock(&next_msg->poll_lock);
+ }
+ }
+
+ /*
+ * When the polling loop has successfully terminated if something
+ * else was queued in the meantime, it will be served by a deferred
+ * worker OR by the normal IRQ/callback OR by other poll loops.
+ *
+ * If we are still looking for the polled reply, the polling index has
+ * to be updated to the current vqueue last used index.
+ */
+ if (found) {
+ pending = !virtqueue_enable_cb(vioch->vqueue);
+ } else {
+ msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);
+ pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
+ }
+
+ if (vioch->deferred_tx_wq && (any_prefetched || pending))
+ queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work);
+
+ spin_unlock_irqrestore(&vioch->lock, flags);
+
+ scmi_vio_channel_release(vioch);
+
+ return found;
+}
+
+static const struct scmi_transport_ops scmi_virtio_ops = {
+ .link_supplier = virtio_link_supplier,
+ .chan_available = virtio_chan_available,
+ .chan_setup = virtio_chan_setup,
+ .chan_free = virtio_chan_free,
+ .get_max_msg = virtio_get_max_msg,
+ .send_message = virtio_send_message,
+ .fetch_response = virtio_fetch_response,
+ .fetch_notification = virtio_fetch_notification,
+ .mark_txdone = virtio_mark_txdone,
+ .poll_done = virtio_poll_done,
+};
+
+static int scmi_vio_probe(struct virtio_device *vdev)
+{
+ struct device *dev = &vdev->dev;
+ struct scmi_vio_channel *channels;
+ bool have_vq_rx;
+ int vq_cnt;
+ int i;
+ int ret;
+ struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT];
+
+ /* Only one SCMI VirtiO device allowed */
+ if (scmi_vdev) {
+ dev_err(dev,
+ "One SCMI Virtio device was already initialized: only one allowed.\n");
+ return -EBUSY;
+ }
+
+ have_vq_rx = scmi_vio_have_vq_rx(vdev);
+ vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1;
+
+ channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ if (have_vq_rx)
+ channels[VIRTIO_SCMI_VQ_RX].is_rx = true;
+
+ ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_complete_callbacks,
+ scmi_vio_vqueue_names, NULL);
+ if (ret) {
+ dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt);
+ return ret;
+ }
+
+ for (i = 0; i < vq_cnt; i++) {
+ unsigned int sz;
+
+ spin_lock_init(&channels[i].lock);
+ spin_lock_init(&channels[i].free_lock);
+ INIT_LIST_HEAD(&channels[i].free_list);
+ spin_lock_init(&channels[i].pending_lock);
+ INIT_LIST_HEAD(&channels[i].pending_cmds_list);
+ channels[i].vqueue = vqs[i];
+
+ sz = virtqueue_get_vring_size(channels[i].vqueue);
+ /* Tx messages need multiple descriptors. */
+ if (!channels[i].is_rx)
+ sz /= DESCRIPTORS_PER_TX_MSG;
+
+ if (sz > MSG_TOKEN_MAX) {
+ dev_info(dev,
+ "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n",
+ channels[i].is_rx ? "rx" : "tx",
+ sz, MSG_TOKEN_MAX);
+ sz = MSG_TOKEN_MAX;
+ }
+ channels[i].max_msg = sz;
+ }
+
+ vdev->priv = channels;
+ /* Ensure initialized scmi_vdev is visible */
+ smp_store_mb(scmi_vdev, vdev);
+
+ return 0;
+}
+
+static void scmi_vio_remove(struct virtio_device *vdev)
+{
+ /*
+ * Once we get here, virtio_chan_free() will have already been called by
+ * the SCMI core for any existing channel and, as a consequence, all the
+ * virtio channels will have been already marked NOT ready, causing any
+ * outstanding message on any vqueue to be ignored by complete_cb: now
+ * we can just stop processing buffers and destroy the vqueues.
+ */
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+ /* Ensure scmi_vdev is visible as NULL */
+ smp_store_mb(scmi_vdev, NULL);
+}
+
+static int scmi_vio_validate(struct virtio_device *vdev)
+{
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE
+ if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
+ dev_err(&vdev->dev,
+ "device does not comply with spec version 1.x\n");
+ return -EINVAL;
+ }
+#endif
+ return 0;
+}
+
+static unsigned int features[] = {
+ VIRTIO_SCMI_F_P2A_CHANNELS,
+};
+
+static const struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID },
+ { 0 }
+};
+
+static struct virtio_driver virtio_scmi_driver = {
+ .driver.name = "scmi-virtio",
+ .driver.owner = THIS_MODULE,
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .id_table = id_table,
+ .probe = scmi_vio_probe,
+ .remove = scmi_vio_remove,
+ .validate = scmi_vio_validate,
+};
+
+static int __init virtio_scmi_init(void)
+{
+ return register_virtio_driver(&virtio_scmi_driver);
+}
+
+static void virtio_scmi_exit(void)
+{
+ unregister_virtio_driver(&virtio_scmi_driver);
+}
+
+const struct scmi_desc scmi_virtio_desc = {
+ .transport_init = virtio_scmi_init,
+ .transport_exit = virtio_scmi_exit,
+ .ops = &scmi_virtio_ops,
+ /* for non-realtime virtio devices */
+ .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS,
+ .max_msg = 0, /* overridden by virtio_get_max_msg() */
+ .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE,
+ .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE),
+};
diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c
new file mode 100644
index 0000000000..eaa8d94492
--- /dev/null
+++ b/drivers/firmware/arm_scmi/voltage.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Voltage Protocol
+ *
+ * Copyright (C) 2020-2022 ARM Ltd.
+ */
+
+#include <linux/module.h>
+#include <linux/scmi_protocol.h>
+
+#include "protocols.h"
+
+#define VOLTAGE_DOMS_NUM_MASK GENMASK(15, 0)
+#define REMAINING_LEVELS_MASK GENMASK(31, 16)
+#define RETURNED_LEVELS_MASK GENMASK(11, 0)
+
+enum scmi_voltage_protocol_cmd {
+ VOLTAGE_DOMAIN_ATTRIBUTES = 0x3,
+ VOLTAGE_DESCRIBE_LEVELS = 0x4,
+ VOLTAGE_CONFIG_SET = 0x5,
+ VOLTAGE_CONFIG_GET = 0x6,
+ VOLTAGE_LEVEL_SET = 0x7,
+ VOLTAGE_LEVEL_GET = 0x8,
+ VOLTAGE_DOMAIN_NAME_GET = 0x09,
+};
+
+#define NUM_VOLTAGE_DOMAINS(x) ((u16)(FIELD_GET(VOLTAGE_DOMS_NUM_MASK, (x))))
+
+struct scmi_msg_resp_domain_attributes {
+ __le32 attr;
+#define SUPPORTS_ASYNC_LEVEL_SET(x) ((x) & BIT(31))
+#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(30))
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
+};
+
+struct scmi_msg_cmd_describe_levels {
+ __le32 domain_id;
+ __le32 level_index;
+};
+
+struct scmi_msg_resp_describe_levels {
+ __le32 flags;
+#define NUM_REMAINING_LEVELS(f) ((u16)(FIELD_GET(REMAINING_LEVELS_MASK, (f))))
+#define NUM_RETURNED_LEVELS(f) ((u16)(FIELD_GET(RETURNED_LEVELS_MASK, (f))))
+#define SUPPORTS_SEGMENTED_LEVELS(f) ((f) & BIT(12))
+ __le32 voltage[];
+};
+
+struct scmi_msg_cmd_config_set {
+ __le32 domain_id;
+ __le32 config;
+};
+
+struct scmi_msg_cmd_level_set {
+ __le32 domain_id;
+ __le32 flags;
+ __le32 voltage_level;
+};
+
+struct scmi_resp_voltage_level_set_complete {
+ __le32 domain_id;
+ __le32 voltage_level;
+};
+
+struct voltage_info {
+ unsigned int version;
+ unsigned int num_domains;
+ struct scmi_voltage_info *domains;
+};
+
+static int scmi_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct voltage_info *vinfo)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(__le32), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ vinfo->num_domains =
+ NUM_VOLTAGE_DOMAINS(get_unaligned_le32(t->rx.buf));
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_init_voltage_levels(struct device *dev,
+ struct scmi_voltage_info *v,
+ u32 num_returned, u32 num_remaining,
+ bool segmented)
+{
+ u32 num_levels;
+
+ num_levels = num_returned + num_remaining;
+ /*
+ * segmented levels entries are represented by a single triplet
+ * returned all in one go.
+ */
+ if (!num_levels ||
+ (segmented && (num_remaining || num_returned != 3))) {
+ dev_err(dev,
+ "Invalid level descriptor(%d/%d/%d) for voltage dom %d\n",
+ num_levels, num_returned, num_remaining, v->id);
+ return -EINVAL;
+ }
+
+ v->levels_uv = devm_kcalloc(dev, num_levels, sizeof(u32), GFP_KERNEL);
+ if (!v->levels_uv)
+ return -ENOMEM;
+
+ v->num_levels = num_levels;
+ v->segmented = segmented;
+
+ return 0;
+}
+
+struct scmi_volt_ipriv {
+ struct device *dev;
+ struct scmi_voltage_info *v;
+};
+
+static void iter_volt_levels_prepare_message(void *message,
+ unsigned int desc_index,
+ const void *priv)
+{
+ struct scmi_msg_cmd_describe_levels *msg = message;
+ const struct scmi_volt_ipriv *p = priv;
+
+ msg->domain_id = cpu_to_le32(p->v->id);
+ msg->level_index = cpu_to_le32(desc_index);
+}
+
+static int iter_volt_levels_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ int ret = 0;
+ u32 flags;
+ const struct scmi_msg_resp_describe_levels *r = response;
+ struct scmi_volt_ipriv *p = priv;
+
+ flags = le32_to_cpu(r->flags);
+ st->num_returned = NUM_RETURNED_LEVELS(flags);
+ st->num_remaining = NUM_REMAINING_LEVELS(flags);
+
+ /* Allocate space for num_levels if not already done */
+ if (!p->v->num_levels) {
+ ret = scmi_init_voltage_levels(p->dev, p->v, st->num_returned,
+ st->num_remaining,
+ SUPPORTS_SEGMENTED_LEVELS(flags));
+ if (!ret)
+ st->max_resources = p->v->num_levels;
+ }
+
+ return ret;
+}
+
+static int
+iter_volt_levels_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv)
+{
+ s32 val;
+ const struct scmi_msg_resp_describe_levels *r = response;
+ struct scmi_volt_ipriv *p = priv;
+
+ val = (s32)le32_to_cpu(r->voltage[st->loop_idx]);
+ p->v->levels_uv[st->desc_index + st->loop_idx] = val;
+ if (val < 0)
+ p->v->negative_volts_allowed = true;
+
+ return 0;
+}
+
+static int scmi_voltage_levels_get(const struct scmi_protocol_handle *ph,
+ struct scmi_voltage_info *v)
+{
+ int ret;
+ void *iter;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_volt_levels_prepare_message,
+ .update_state = iter_volt_levels_update_state,
+ .process_response = iter_volt_levels_process_response,
+ };
+ struct scmi_volt_ipriv vpriv = {
+ .dev = ph->dev,
+ .v = v,
+ };
+
+ iter = ph->hops->iter_response_init(ph, &ops, v->num_levels,
+ VOLTAGE_DESCRIBE_LEVELS,
+ sizeof(struct scmi_msg_cmd_describe_levels),
+ &vpriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ ret = ph->hops->iter_response_run(iter);
+ if (ret) {
+ v->num_levels = 0;
+ devm_kfree(ph->dev, v->levels_uv);
+ }
+
+ return ret;
+}
+
+static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
+ struct voltage_info *vinfo)
+{
+ int ret, dom;
+ struct scmi_xfer *td;
+ struct scmi_msg_resp_domain_attributes *resp_dom;
+
+ ret = ph->xops->xfer_get_init(ph, VOLTAGE_DOMAIN_ATTRIBUTES,
+ sizeof(__le32), sizeof(*resp_dom), &td);
+ if (ret)
+ return ret;
+ resp_dom = td->rx.buf;
+
+ for (dom = 0; dom < vinfo->num_domains; dom++) {
+ u32 attributes;
+ struct scmi_voltage_info *v;
+
+ /* Retrieve domain attributes at first ... */
+ put_unaligned_le32(dom, td->tx.buf);
+ /* Skip domain on comms error */
+ if (ph->xops->do_xfer(ph, td))
+ continue;
+
+ v = vinfo->domains + dom;
+ v->id = dom;
+ attributes = le32_to_cpu(resp_dom->attr);
+ strscpy(v->name, resp_dom->name, SCMI_SHORT_NAME_MAX_SIZE);
+
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (PROTOCOL_REV_MAJOR(vinfo->version) >= 0x2) {
+ if (SUPPORTS_EXTENDED_NAMES(attributes))
+ ph->hops->extended_name_get(ph,
+ VOLTAGE_DOMAIN_NAME_GET,
+ v->id, v->name,
+ SCMI_MAX_STR_SIZE);
+ if (SUPPORTS_ASYNC_LEVEL_SET(attributes))
+ v->async_level_set = true;
+ }
+
+ /* Skip invalid voltage descriptors */
+ scmi_voltage_levels_get(ph, v);
+ }
+
+ ph->xops->xfer_put(ph, td);
+
+ return ret;
+}
+
+static int __scmi_voltage_get_u32(const struct scmi_protocol_handle *ph,
+ u8 cmd_id, u32 domain_id, u32 *value)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct voltage_info *vinfo = ph->get_priv(ph);
+
+ if (domain_id >= vinfo->num_domains)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(__le32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain_id, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *value = get_unaligned_le32(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_voltage_config_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 config)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct voltage_info *vinfo = ph->get_priv(ph);
+ struct scmi_msg_cmd_config_set *cmd;
+
+ if (domain_id >= vinfo->num_domains)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, VOLTAGE_CONFIG_SET,
+ sizeof(*cmd), 0, &t);
+ if (ret)
+ return ret;
+
+ cmd = t->tx.buf;
+ cmd->domain_id = cpu_to_le32(domain_id);
+ cmd->config = cpu_to_le32(config & GENMASK(3, 0));
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_voltage_config_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *config)
+{
+ return __scmi_voltage_get_u32(ph, VOLTAGE_CONFIG_GET,
+ domain_id, config);
+}
+
+static int scmi_voltage_level_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id,
+ enum scmi_voltage_level_mode mode,
+ s32 volt_uV)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct voltage_info *vinfo = ph->get_priv(ph);
+ struct scmi_msg_cmd_level_set *cmd;
+ struct scmi_voltage_info *v;
+
+ if (domain_id >= vinfo->num_domains)
+ return -EINVAL;
+
+ ret = ph->xops->xfer_get_init(ph, VOLTAGE_LEVEL_SET,
+ sizeof(*cmd), 0, &t);
+ if (ret)
+ return ret;
+
+ v = vinfo->domains + domain_id;
+
+ cmd = t->tx.buf;
+ cmd->domain_id = cpu_to_le32(domain_id);
+ cmd->voltage_level = cpu_to_le32(volt_uV);
+
+ if (!v->async_level_set || mode != SCMI_VOLTAGE_LEVEL_SET_AUTO) {
+ cmd->flags = cpu_to_le32(0x0);
+ ret = ph->xops->do_xfer(ph, t);
+ } else {
+ cmd->flags = cpu_to_le32(0x1);
+ ret = ph->xops->do_xfer_with_response(ph, t);
+ if (!ret) {
+ struct scmi_resp_voltage_level_set_complete *resp;
+
+ resp = t->rx.buf;
+ if (le32_to_cpu(resp->domain_id) == domain_id)
+ dev_dbg(ph->dev,
+ "Voltage domain %d set async to %d\n",
+ v->id,
+ le32_to_cpu(resp->voltage_level));
+ else
+ ret = -EPROTO;
+ }
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_voltage_level_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, s32 *volt_uV)
+{
+ return __scmi_voltage_get_u32(ph, VOLTAGE_LEVEL_GET,
+ domain_id, (u32 *)volt_uV);
+}
+
+static const struct scmi_voltage_info * __must_check
+scmi_voltage_info_get(const struct scmi_protocol_handle *ph, u32 domain_id)
+{
+ struct voltage_info *vinfo = ph->get_priv(ph);
+
+ if (domain_id >= vinfo->num_domains ||
+ !vinfo->domains[domain_id].num_levels)
+ return NULL;
+
+ return vinfo->domains + domain_id;
+}
+
+static int scmi_voltage_domains_num_get(const struct scmi_protocol_handle *ph)
+{
+ struct voltage_info *vinfo = ph->get_priv(ph);
+
+ return vinfo->num_domains;
+}
+
+static struct scmi_voltage_proto_ops voltage_proto_ops = {
+ .num_domains_get = scmi_voltage_domains_num_get,
+ .info_get = scmi_voltage_info_get,
+ .config_set = scmi_voltage_config_set,
+ .config_get = scmi_voltage_config_get,
+ .level_set = scmi_voltage_level_set,
+ .level_get = scmi_voltage_level_get,
+};
+
+static int scmi_voltage_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ int ret;
+ u32 version;
+ struct voltage_info *vinfo;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_dbg(ph->dev, "Voltage Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ vinfo = devm_kzalloc(ph->dev, sizeof(*vinfo), GFP_KERNEL);
+ if (!vinfo)
+ return -ENOMEM;
+ vinfo->version = version;
+
+ ret = scmi_protocol_attributes_get(ph, vinfo);
+ if (ret)
+ return ret;
+
+ if (vinfo->num_domains) {
+ vinfo->domains = devm_kcalloc(ph->dev, vinfo->num_domains,
+ sizeof(*vinfo->domains),
+ GFP_KERNEL);
+ if (!vinfo->domains)
+ return -ENOMEM;
+ ret = scmi_voltage_descriptors_get(ph, vinfo);
+ if (ret)
+ return ret;
+ } else {
+ dev_warn(ph->dev, "No Voltage domains found.\n");
+ }
+
+ return ph->set_priv(ph, vinfo);
+}
+
+static const struct scmi_protocol scmi_voltage = {
+ .id = SCMI_PROTOCOL_VOLTAGE,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_voltage_protocol_init,
+ .ops = &voltage_proto_ops,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(voltage, scmi_voltage)
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
new file mode 100644
index 0000000000..435d0e2658
--- /dev/null
+++ b/drivers/firmware/arm_scpi.c
@@ -0,0 +1,1060 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * System Control and Power Interface (SCPI) Message Protocol driver
+ *
+ * SCPI Message Protocol is used between the System Control Processor(SCP)
+ * and the Application Processors(AP). The Message Handling Unit(MHU)
+ * provides a mechanism for inter-processor communication between SCP's
+ * Cortex M3 and AP.
+ *
+ * SCP offers control and management of the core/cluster power states,
+ * various power domain DVFS including the core/cluster, certain system
+ * clocks configuration, thermal sensors and many others.
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/printk.h>
+#include <linux/pm_opp.h>
+#include <linux/scpi_protocol.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/spinlock.h>
+
+#define CMD_ID_MASK GENMASK(6, 0)
+#define CMD_TOKEN_ID_MASK GENMASK(15, 8)
+#define CMD_DATA_SIZE_MASK GENMASK(24, 16)
+#define CMD_LEGACY_DATA_SIZE_MASK GENMASK(28, 20)
+#define PACK_SCPI_CMD(cmd_id, tx_sz) \
+ (FIELD_PREP(CMD_ID_MASK, cmd_id) | \
+ FIELD_PREP(CMD_DATA_SIZE_MASK, tx_sz))
+#define PACK_LEGACY_SCPI_CMD(cmd_id, tx_sz) \
+ (FIELD_PREP(CMD_ID_MASK, cmd_id) | \
+ FIELD_PREP(CMD_LEGACY_DATA_SIZE_MASK, tx_sz))
+
+#define CMD_SIZE(cmd) FIELD_GET(CMD_DATA_SIZE_MASK, cmd)
+#define CMD_UNIQ_MASK (CMD_TOKEN_ID_MASK | CMD_ID_MASK)
+#define CMD_XTRACT_UNIQ(cmd) ((cmd) & CMD_UNIQ_MASK)
+
+#define SCPI_SLOT 0
+
+#define MAX_DVFS_DOMAINS 8
+#define MAX_DVFS_OPPS 16
+
+#define PROTO_REV_MAJOR_MASK GENMASK(31, 16)
+#define PROTO_REV_MINOR_MASK GENMASK(15, 0)
+
+#define FW_REV_MAJOR_MASK GENMASK(31, 24)
+#define FW_REV_MINOR_MASK GENMASK(23, 16)
+#define FW_REV_PATCH_MASK GENMASK(15, 0)
+
+#define MAX_RX_TIMEOUT (msecs_to_jiffies(30))
+
+enum scpi_error_codes {
+ SCPI_SUCCESS = 0, /* Success */
+ SCPI_ERR_PARAM = 1, /* Invalid parameter(s) */
+ SCPI_ERR_ALIGN = 2, /* Invalid alignment */
+ SCPI_ERR_SIZE = 3, /* Invalid size */
+ SCPI_ERR_HANDLER = 4, /* Invalid handler/callback */
+ SCPI_ERR_ACCESS = 5, /* Invalid access/permission denied */
+ SCPI_ERR_RANGE = 6, /* Value out of range */
+ SCPI_ERR_TIMEOUT = 7, /* Timeout has occurred */
+ SCPI_ERR_NOMEM = 8, /* Invalid memory area or pointer */
+ SCPI_ERR_PWRSTATE = 9, /* Invalid power state */
+ SCPI_ERR_SUPPORT = 10, /* Not supported or disabled */
+ SCPI_ERR_DEVICE = 11, /* Device error */
+ SCPI_ERR_BUSY = 12, /* Device busy */
+ SCPI_ERR_MAX
+};
+
+/* SCPI Standard commands */
+enum scpi_std_cmd {
+ SCPI_CMD_INVALID = 0x00,
+ SCPI_CMD_SCPI_READY = 0x01,
+ SCPI_CMD_SCPI_CAPABILITIES = 0x02,
+ SCPI_CMD_SET_CSS_PWR_STATE = 0x03,
+ SCPI_CMD_GET_CSS_PWR_STATE = 0x04,
+ SCPI_CMD_SET_SYS_PWR_STATE = 0x05,
+ SCPI_CMD_SET_CPU_TIMER = 0x06,
+ SCPI_CMD_CANCEL_CPU_TIMER = 0x07,
+ SCPI_CMD_DVFS_CAPABILITIES = 0x08,
+ SCPI_CMD_GET_DVFS_INFO = 0x09,
+ SCPI_CMD_SET_DVFS = 0x0a,
+ SCPI_CMD_GET_DVFS = 0x0b,
+ SCPI_CMD_GET_DVFS_STAT = 0x0c,
+ SCPI_CMD_CLOCK_CAPABILITIES = 0x0d,
+ SCPI_CMD_GET_CLOCK_INFO = 0x0e,
+ SCPI_CMD_SET_CLOCK_VALUE = 0x0f,
+ SCPI_CMD_GET_CLOCK_VALUE = 0x10,
+ SCPI_CMD_PSU_CAPABILITIES = 0x11,
+ SCPI_CMD_GET_PSU_INFO = 0x12,
+ SCPI_CMD_SET_PSU = 0x13,
+ SCPI_CMD_GET_PSU = 0x14,
+ SCPI_CMD_SENSOR_CAPABILITIES = 0x15,
+ SCPI_CMD_SENSOR_INFO = 0x16,
+ SCPI_CMD_SENSOR_VALUE = 0x17,
+ SCPI_CMD_SENSOR_CFG_PERIODIC = 0x18,
+ SCPI_CMD_SENSOR_CFG_BOUNDS = 0x19,
+ SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1a,
+ SCPI_CMD_SET_DEVICE_PWR_STATE = 0x1b,
+ SCPI_CMD_GET_DEVICE_PWR_STATE = 0x1c,
+ SCPI_CMD_COUNT
+};
+
+/* SCPI Legacy Commands */
+enum legacy_scpi_std_cmd {
+ LEGACY_SCPI_CMD_INVALID = 0x00,
+ LEGACY_SCPI_CMD_SCPI_READY = 0x01,
+ LEGACY_SCPI_CMD_SCPI_CAPABILITIES = 0x02,
+ LEGACY_SCPI_CMD_EVENT = 0x03,
+ LEGACY_SCPI_CMD_SET_CSS_PWR_STATE = 0x04,
+ LEGACY_SCPI_CMD_GET_CSS_PWR_STATE = 0x05,
+ LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT = 0x06,
+ LEGACY_SCPI_CMD_GET_PWR_STATE_STAT = 0x07,
+ LEGACY_SCPI_CMD_SYS_PWR_STATE = 0x08,
+ LEGACY_SCPI_CMD_L2_READY = 0x09,
+ LEGACY_SCPI_CMD_SET_AP_TIMER = 0x0a,
+ LEGACY_SCPI_CMD_CANCEL_AP_TIME = 0x0b,
+ LEGACY_SCPI_CMD_DVFS_CAPABILITIES = 0x0c,
+ LEGACY_SCPI_CMD_GET_DVFS_INFO = 0x0d,
+ LEGACY_SCPI_CMD_SET_DVFS = 0x0e,
+ LEGACY_SCPI_CMD_GET_DVFS = 0x0f,
+ LEGACY_SCPI_CMD_GET_DVFS_STAT = 0x10,
+ LEGACY_SCPI_CMD_SET_RTC = 0x11,
+ LEGACY_SCPI_CMD_GET_RTC = 0x12,
+ LEGACY_SCPI_CMD_CLOCK_CAPABILITIES = 0x13,
+ LEGACY_SCPI_CMD_SET_CLOCK_INDEX = 0x14,
+ LEGACY_SCPI_CMD_SET_CLOCK_VALUE = 0x15,
+ LEGACY_SCPI_CMD_GET_CLOCK_VALUE = 0x16,
+ LEGACY_SCPI_CMD_PSU_CAPABILITIES = 0x17,
+ LEGACY_SCPI_CMD_SET_PSU = 0x18,
+ LEGACY_SCPI_CMD_GET_PSU = 0x19,
+ LEGACY_SCPI_CMD_SENSOR_CAPABILITIES = 0x1a,
+ LEGACY_SCPI_CMD_SENSOR_INFO = 0x1b,
+ LEGACY_SCPI_CMD_SENSOR_VALUE = 0x1c,
+ LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC = 0x1d,
+ LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS = 0x1e,
+ LEGACY_SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1f,
+ LEGACY_SCPI_CMD_COUNT
+};
+
+/* List all commands that are required to go through the high priority link */
+static int legacy_hpriority_cmds[] = {
+ LEGACY_SCPI_CMD_GET_CSS_PWR_STATE,
+ LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT,
+ LEGACY_SCPI_CMD_GET_PWR_STATE_STAT,
+ LEGACY_SCPI_CMD_SET_DVFS,
+ LEGACY_SCPI_CMD_GET_DVFS,
+ LEGACY_SCPI_CMD_SET_RTC,
+ LEGACY_SCPI_CMD_GET_RTC,
+ LEGACY_SCPI_CMD_SET_CLOCK_INDEX,
+ LEGACY_SCPI_CMD_SET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_GET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_SET_PSU,
+ LEGACY_SCPI_CMD_GET_PSU,
+ LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC,
+ LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS,
+};
+
+/* List all commands used by this driver, used as indexes */
+enum scpi_drv_cmds {
+ CMD_SCPI_CAPABILITIES = 0,
+ CMD_GET_CLOCK_INFO,
+ CMD_GET_CLOCK_VALUE,
+ CMD_SET_CLOCK_VALUE,
+ CMD_GET_DVFS,
+ CMD_SET_DVFS,
+ CMD_GET_DVFS_INFO,
+ CMD_SENSOR_CAPABILITIES,
+ CMD_SENSOR_INFO,
+ CMD_SENSOR_VALUE,
+ CMD_SET_DEVICE_PWR_STATE,
+ CMD_GET_DEVICE_PWR_STATE,
+ CMD_MAX_COUNT,
+};
+
+static int scpi_std_commands[CMD_MAX_COUNT] = {
+ SCPI_CMD_SCPI_CAPABILITIES,
+ SCPI_CMD_GET_CLOCK_INFO,
+ SCPI_CMD_GET_CLOCK_VALUE,
+ SCPI_CMD_SET_CLOCK_VALUE,
+ SCPI_CMD_GET_DVFS,
+ SCPI_CMD_SET_DVFS,
+ SCPI_CMD_GET_DVFS_INFO,
+ SCPI_CMD_SENSOR_CAPABILITIES,
+ SCPI_CMD_SENSOR_INFO,
+ SCPI_CMD_SENSOR_VALUE,
+ SCPI_CMD_SET_DEVICE_PWR_STATE,
+ SCPI_CMD_GET_DEVICE_PWR_STATE,
+};
+
+static int scpi_legacy_commands[CMD_MAX_COUNT] = {
+ LEGACY_SCPI_CMD_SCPI_CAPABILITIES,
+ -1, /* GET_CLOCK_INFO */
+ LEGACY_SCPI_CMD_GET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_SET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_GET_DVFS,
+ LEGACY_SCPI_CMD_SET_DVFS,
+ LEGACY_SCPI_CMD_GET_DVFS_INFO,
+ LEGACY_SCPI_CMD_SENSOR_CAPABILITIES,
+ LEGACY_SCPI_CMD_SENSOR_INFO,
+ LEGACY_SCPI_CMD_SENSOR_VALUE,
+ -1, /* SET_DEVICE_PWR_STATE */
+ -1, /* GET_DEVICE_PWR_STATE */
+};
+
+struct scpi_xfer {
+ u32 slot; /* has to be first element */
+ u32 cmd;
+ u32 status;
+ const void *tx_buf;
+ void *rx_buf;
+ unsigned int tx_len;
+ unsigned int rx_len;
+ struct list_head node;
+ struct completion done;
+};
+
+struct scpi_chan {
+ struct mbox_client cl;
+ struct mbox_chan *chan;
+ void __iomem *tx_payload;
+ void __iomem *rx_payload;
+ struct list_head rx_pending;
+ struct list_head xfers_list;
+ struct scpi_xfer *xfers;
+ spinlock_t rx_lock; /* locking for the rx pending list */
+ struct mutex xfers_lock;
+ u8 token;
+};
+
+struct scpi_drvinfo {
+ u32 protocol_version;
+ u32 firmware_version;
+ bool is_legacy;
+ int num_chans;
+ int *commands;
+ DECLARE_BITMAP(cmd_priority, LEGACY_SCPI_CMD_COUNT);
+ atomic_t next_chan;
+ struct scpi_ops *scpi_ops;
+ struct scpi_chan *channels;
+ struct scpi_dvfs_info *dvfs[MAX_DVFS_DOMAINS];
+};
+
+/*
+ * The SCP firmware only executes in little-endian mode, so any buffers
+ * shared through SCPI should have their contents converted to little-endian
+ */
+struct scpi_shared_mem {
+ __le32 command;
+ __le32 status;
+ u8 payload[];
+} __packed;
+
+struct legacy_scpi_shared_mem {
+ __le32 status;
+ u8 payload[];
+} __packed;
+
+struct scp_capabilities {
+ __le32 protocol_version;
+ __le32 event_version;
+ __le32 platform_version;
+ __le32 commands[4];
+} __packed;
+
+struct clk_get_info {
+ __le16 id;
+ __le16 flags;
+ __le32 min_rate;
+ __le32 max_rate;
+ u8 name[20];
+} __packed;
+
+struct clk_set_value {
+ __le16 id;
+ __le16 reserved;
+ __le32 rate;
+} __packed;
+
+struct legacy_clk_set_value {
+ __le32 rate;
+ __le16 id;
+ __le16 reserved;
+} __packed;
+
+struct dvfs_info {
+ u8 domain;
+ u8 opp_count;
+ __le16 latency;
+ struct {
+ __le32 freq;
+ __le32 m_volt;
+ } opps[MAX_DVFS_OPPS];
+} __packed;
+
+struct dvfs_set {
+ u8 domain;
+ u8 index;
+} __packed;
+
+struct _scpi_sensor_info {
+ __le16 sensor_id;
+ u8 class;
+ u8 trigger_type;
+ char name[20];
+};
+
+struct dev_pstate_set {
+ __le16 dev_id;
+ u8 pstate;
+} __packed;
+
+static struct scpi_drvinfo *scpi_info;
+
+static int scpi_linux_errmap[SCPI_ERR_MAX] = {
+ /* better than switch case as long as return value is continuous */
+ 0, /* SCPI_SUCCESS */
+ -EINVAL, /* SCPI_ERR_PARAM */
+ -ENOEXEC, /* SCPI_ERR_ALIGN */
+ -EMSGSIZE, /* SCPI_ERR_SIZE */
+ -EINVAL, /* SCPI_ERR_HANDLER */
+ -EACCES, /* SCPI_ERR_ACCESS */
+ -ERANGE, /* SCPI_ERR_RANGE */
+ -ETIMEDOUT, /* SCPI_ERR_TIMEOUT */
+ -ENOMEM, /* SCPI_ERR_NOMEM */
+ -EINVAL, /* SCPI_ERR_PWRSTATE */
+ -EOPNOTSUPP, /* SCPI_ERR_SUPPORT */
+ -EIO, /* SCPI_ERR_DEVICE */
+ -EBUSY, /* SCPI_ERR_BUSY */
+};
+
+static inline int scpi_to_linux_errno(int errno)
+{
+ if (errno >= SCPI_SUCCESS && errno < SCPI_ERR_MAX)
+ return scpi_linux_errmap[errno];
+ return -EIO;
+}
+
+static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
+{
+ unsigned long flags;
+ struct scpi_xfer *t, *match = NULL;
+
+ spin_lock_irqsave(&ch->rx_lock, flags);
+ if (list_empty(&ch->rx_pending)) {
+ spin_unlock_irqrestore(&ch->rx_lock, flags);
+ return;
+ }
+
+ /* Command type is not replied by the SCP Firmware in legacy Mode
+ * We should consider that command is the head of pending RX commands
+ * if the list is not empty. In TX only mode, the list would be empty.
+ */
+ if (scpi_info->is_legacy) {
+ match = list_first_entry(&ch->rx_pending, struct scpi_xfer,
+ node);
+ list_del(&match->node);
+ } else {
+ list_for_each_entry(t, &ch->rx_pending, node)
+ if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) {
+ list_del(&t->node);
+ match = t;
+ break;
+ }
+ }
+ /* check if wait_for_completion is in progress or timed-out */
+ if (match && !completion_done(&match->done)) {
+ unsigned int len;
+
+ if (scpi_info->is_legacy) {
+ struct legacy_scpi_shared_mem __iomem *mem =
+ ch->rx_payload;
+
+ /* RX Length is not replied by the legacy Firmware */
+ len = match->rx_len;
+
+ match->status = ioread32(&mem->status);
+ memcpy_fromio(match->rx_buf, mem->payload, len);
+ } else {
+ struct scpi_shared_mem __iomem *mem = ch->rx_payload;
+
+ len = min_t(unsigned int, match->rx_len, CMD_SIZE(cmd));
+
+ match->status = ioread32(&mem->status);
+ memcpy_fromio(match->rx_buf, mem->payload, len);
+ }
+
+ if (match->rx_len > len)
+ memset(match->rx_buf + len, 0, match->rx_len - len);
+ complete(&match->done);
+ }
+ spin_unlock_irqrestore(&ch->rx_lock, flags);
+}
+
+static void scpi_handle_remote_msg(struct mbox_client *c, void *msg)
+{
+ struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
+ struct scpi_shared_mem __iomem *mem = ch->rx_payload;
+ u32 cmd = 0;
+
+ if (!scpi_info->is_legacy)
+ cmd = ioread32(&mem->command);
+
+ scpi_process_cmd(ch, cmd);
+}
+
+static void scpi_tx_prepare(struct mbox_client *c, void *msg)
+{
+ unsigned long flags;
+ struct scpi_xfer *t = msg;
+ struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
+ struct scpi_shared_mem __iomem *mem = ch->tx_payload;
+
+ if (t->tx_buf) {
+ if (scpi_info->is_legacy)
+ memcpy_toio(ch->tx_payload, t->tx_buf, t->tx_len);
+ else
+ memcpy_toio(mem->payload, t->tx_buf, t->tx_len);
+ }
+
+ if (t->rx_buf) {
+ if (!(++ch->token))
+ ++ch->token;
+ t->cmd |= FIELD_PREP(CMD_TOKEN_ID_MASK, ch->token);
+ spin_lock_irqsave(&ch->rx_lock, flags);
+ list_add_tail(&t->node, &ch->rx_pending);
+ spin_unlock_irqrestore(&ch->rx_lock, flags);
+ }
+
+ if (!scpi_info->is_legacy)
+ iowrite32(t->cmd, &mem->command);
+}
+
+static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch)
+{
+ struct scpi_xfer *t;
+
+ mutex_lock(&ch->xfers_lock);
+ if (list_empty(&ch->xfers_list)) {
+ mutex_unlock(&ch->xfers_lock);
+ return NULL;
+ }
+ t = list_first_entry(&ch->xfers_list, struct scpi_xfer, node);
+ list_del(&t->node);
+ mutex_unlock(&ch->xfers_lock);
+ return t;
+}
+
+static void put_scpi_xfer(struct scpi_xfer *t, struct scpi_chan *ch)
+{
+ mutex_lock(&ch->xfers_lock);
+ list_add_tail(&t->node, &ch->xfers_list);
+ mutex_unlock(&ch->xfers_lock);
+}
+
+static int scpi_send_message(u8 idx, void *tx_buf, unsigned int tx_len,
+ void *rx_buf, unsigned int rx_len)
+{
+ int ret;
+ u8 chan;
+ u8 cmd;
+ struct scpi_xfer *msg;
+ struct scpi_chan *scpi_chan;
+
+ if (scpi_info->commands[idx] < 0)
+ return -EOPNOTSUPP;
+
+ cmd = scpi_info->commands[idx];
+
+ if (scpi_info->is_legacy)
+ chan = test_bit(cmd, scpi_info->cmd_priority) ? 1 : 0;
+ else
+ chan = atomic_inc_return(&scpi_info->next_chan) %
+ scpi_info->num_chans;
+ scpi_chan = scpi_info->channels + chan;
+
+ msg = get_scpi_xfer(scpi_chan);
+ if (!msg)
+ return -ENOMEM;
+
+ if (scpi_info->is_legacy) {
+ msg->cmd = PACK_LEGACY_SCPI_CMD(cmd, tx_len);
+ msg->slot = msg->cmd;
+ } else {
+ msg->slot = BIT(SCPI_SLOT);
+ msg->cmd = PACK_SCPI_CMD(cmd, tx_len);
+ }
+ msg->tx_buf = tx_buf;
+ msg->tx_len = tx_len;
+ msg->rx_buf = rx_buf;
+ msg->rx_len = rx_len;
+ reinit_completion(&msg->done);
+
+ ret = mbox_send_message(scpi_chan->chan, msg);
+ if (ret < 0 || !rx_buf)
+ goto out;
+
+ if (!wait_for_completion_timeout(&msg->done, MAX_RX_TIMEOUT))
+ ret = -ETIMEDOUT;
+ else
+ /* first status word */
+ ret = msg->status;
+out:
+ if (ret < 0 && rx_buf) /* remove entry from the list if timed-out */
+ scpi_process_cmd(scpi_chan, msg->cmd);
+
+ put_scpi_xfer(msg, scpi_chan);
+ /* SCPI error codes > 0, translate them to Linux scale*/
+ return ret > 0 ? scpi_to_linux_errno(ret) : ret;
+}
+
+static u32 scpi_get_version(void)
+{
+ return scpi_info->protocol_version;
+}
+
+static int
+scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max)
+{
+ int ret;
+ struct clk_get_info clk;
+ __le16 le_clk_id = cpu_to_le16(clk_id);
+
+ ret = scpi_send_message(CMD_GET_CLOCK_INFO, &le_clk_id,
+ sizeof(le_clk_id), &clk, sizeof(clk));
+ if (!ret) {
+ *min = le32_to_cpu(clk.min_rate);
+ *max = le32_to_cpu(clk.max_rate);
+ }
+ return ret;
+}
+
+static unsigned long scpi_clk_get_val(u16 clk_id)
+{
+ int ret;
+ __le32 rate;
+ __le16 le_clk_id = cpu_to_le16(clk_id);
+
+ ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
+ sizeof(le_clk_id), &rate, sizeof(rate));
+ if (ret)
+ return 0;
+
+ return le32_to_cpu(rate);
+}
+
+static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
+{
+ int stat;
+ struct clk_set_value clk = {
+ .id = cpu_to_le16(clk_id),
+ .rate = cpu_to_le32(rate)
+ };
+
+ return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
+ &stat, sizeof(stat));
+}
+
+static int legacy_scpi_clk_set_val(u16 clk_id, unsigned long rate)
+{
+ int stat;
+ struct legacy_clk_set_value clk = {
+ .id = cpu_to_le16(clk_id),
+ .rate = cpu_to_le32(rate)
+ };
+
+ return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
+ &stat, sizeof(stat));
+}
+
+static int scpi_dvfs_get_idx(u8 domain)
+{
+ int ret;
+ u8 dvfs_idx;
+
+ ret = scpi_send_message(CMD_GET_DVFS, &domain, sizeof(domain),
+ &dvfs_idx, sizeof(dvfs_idx));
+
+ return ret ? ret : dvfs_idx;
+}
+
+static int scpi_dvfs_set_idx(u8 domain, u8 index)
+{
+ int stat;
+ struct dvfs_set dvfs = {domain, index};
+
+ return scpi_send_message(CMD_SET_DVFS, &dvfs, sizeof(dvfs),
+ &stat, sizeof(stat));
+}
+
+static int opp_cmp_func(const void *opp1, const void *opp2)
+{
+ const struct scpi_opp *t1 = opp1, *t2 = opp2;
+
+ return t1->freq - t2->freq;
+}
+
+static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
+{
+ struct scpi_dvfs_info *info;
+ struct scpi_opp *opp;
+ struct dvfs_info buf;
+ int ret, i;
+
+ if (domain >= MAX_DVFS_DOMAINS)
+ return ERR_PTR(-EINVAL);
+
+ if (scpi_info->dvfs[domain]) /* data already populated */
+ return scpi_info->dvfs[domain];
+
+ ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain),
+ &buf, sizeof(buf));
+ if (ret)
+ return ERR_PTR(ret);
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ info->count = buf.opp_count;
+ info->latency = le16_to_cpu(buf.latency) * 1000; /* uS to nS */
+
+ info->opps = kcalloc(info->count, sizeof(*opp), GFP_KERNEL);
+ if (!info->opps) {
+ kfree(info);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0, opp = info->opps; i < info->count; i++, opp++) {
+ opp->freq = le32_to_cpu(buf.opps[i].freq);
+ opp->m_volt = le32_to_cpu(buf.opps[i].m_volt);
+ }
+
+ sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL);
+
+ scpi_info->dvfs[domain] = info;
+ return info;
+}
+
+static int scpi_dev_domain_id(struct device *dev)
+{
+ struct of_phandle_args clkspec;
+
+ if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
+ 0, &clkspec))
+ return -EINVAL;
+
+ return clkspec.args[0];
+}
+
+static struct scpi_dvfs_info *scpi_dvfs_info(struct device *dev)
+{
+ int domain = scpi_dev_domain_id(dev);
+
+ if (domain < 0)
+ return ERR_PTR(domain);
+
+ return scpi_dvfs_get_info(domain);
+}
+
+static int scpi_dvfs_get_transition_latency(struct device *dev)
+{
+ struct scpi_dvfs_info *info = scpi_dvfs_info(dev);
+
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ return info->latency;
+}
+
+static int scpi_dvfs_add_opps_to_device(struct device *dev)
+{
+ int idx, ret;
+ struct scpi_opp *opp;
+ struct scpi_dvfs_info *info = scpi_dvfs_info(dev);
+
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ if (!info->opps)
+ return -EIO;
+
+ for (opp = info->opps, idx = 0; idx < info->count; idx++, opp++) {
+ ret = dev_pm_opp_add(dev, opp->freq, opp->m_volt * 1000);
+ if (ret) {
+ dev_warn(dev, "failed to add opp %uHz %umV\n",
+ opp->freq, opp->m_volt);
+ while (idx-- > 0)
+ dev_pm_opp_remove(dev, (--opp)->freq);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int scpi_sensor_get_capability(u16 *sensors)
+{
+ __le16 cap;
+ int ret;
+
+ ret = scpi_send_message(CMD_SENSOR_CAPABILITIES, NULL, 0, &cap,
+ sizeof(cap));
+ if (!ret)
+ *sensors = le16_to_cpu(cap);
+
+ return ret;
+}
+
+static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
+{
+ __le16 id = cpu_to_le16(sensor_id);
+ struct _scpi_sensor_info _info;
+ int ret;
+
+ ret = scpi_send_message(CMD_SENSOR_INFO, &id, sizeof(id),
+ &_info, sizeof(_info));
+ if (!ret) {
+ memcpy(info, &_info, sizeof(*info));
+ info->sensor_id = le16_to_cpu(_info.sensor_id);
+ }
+
+ return ret;
+}
+
+static int scpi_sensor_get_value(u16 sensor, u64 *val)
+{
+ __le16 id = cpu_to_le16(sensor);
+ __le64 value;
+ int ret;
+
+ ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
+ &value, sizeof(value));
+ if (ret)
+ return ret;
+
+ if (scpi_info->is_legacy)
+ /* only 32-bits supported, upper 32 bits can be junk */
+ *val = le32_to_cpup((__le32 *)&value);
+ else
+ *val = le64_to_cpu(value);
+
+ return 0;
+}
+
+static int scpi_device_get_power_state(u16 dev_id)
+{
+ int ret;
+ u8 pstate;
+ __le16 id = cpu_to_le16(dev_id);
+
+ ret = scpi_send_message(CMD_GET_DEVICE_PWR_STATE, &id,
+ sizeof(id), &pstate, sizeof(pstate));
+ return ret ? ret : pstate;
+}
+
+static int scpi_device_set_power_state(u16 dev_id, u8 pstate)
+{
+ int stat;
+ struct dev_pstate_set dev_set = {
+ .dev_id = cpu_to_le16(dev_id),
+ .pstate = pstate,
+ };
+
+ return scpi_send_message(CMD_SET_DEVICE_PWR_STATE, &dev_set,
+ sizeof(dev_set), &stat, sizeof(stat));
+}
+
+static struct scpi_ops scpi_ops = {
+ .get_version = scpi_get_version,
+ .clk_get_range = scpi_clk_get_range,
+ .clk_get_val = scpi_clk_get_val,
+ .clk_set_val = scpi_clk_set_val,
+ .dvfs_get_idx = scpi_dvfs_get_idx,
+ .dvfs_set_idx = scpi_dvfs_set_idx,
+ .dvfs_get_info = scpi_dvfs_get_info,
+ .device_domain_id = scpi_dev_domain_id,
+ .get_transition_latency = scpi_dvfs_get_transition_latency,
+ .add_opps_to_device = scpi_dvfs_add_opps_to_device,
+ .sensor_get_capability = scpi_sensor_get_capability,
+ .sensor_get_info = scpi_sensor_get_info,
+ .sensor_get_value = scpi_sensor_get_value,
+ .device_get_power_state = scpi_device_get_power_state,
+ .device_set_power_state = scpi_device_set_power_state,
+};
+
+struct scpi_ops *get_scpi_ops(void)
+{
+ return scpi_info ? scpi_info->scpi_ops : NULL;
+}
+EXPORT_SYMBOL_GPL(get_scpi_ops);
+
+static int scpi_init_versions(struct scpi_drvinfo *info)
+{
+ int ret;
+ struct scp_capabilities caps;
+
+ ret = scpi_send_message(CMD_SCPI_CAPABILITIES, NULL, 0,
+ &caps, sizeof(caps));
+ if (!ret) {
+ info->protocol_version = le32_to_cpu(caps.protocol_version);
+ info->firmware_version = le32_to_cpu(caps.platform_version);
+ }
+ /* Ignore error if not implemented */
+ if (info->is_legacy && ret == -EOPNOTSUPP)
+ return 0;
+
+ return ret;
+}
+
+static ssize_t protocol_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lu.%lu\n",
+ FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version),
+ FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version));
+}
+static DEVICE_ATTR_RO(protocol_version);
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lu.%lu.%lu\n",
+ FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version),
+ FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version),
+ FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version));
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static struct attribute *versions_attrs[] = {
+ &dev_attr_firmware_version.attr,
+ &dev_attr_protocol_version.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(versions);
+
+static void scpi_free_channels(void *data)
+{
+ struct scpi_drvinfo *info = data;
+ int i;
+
+ for (i = 0; i < info->num_chans; i++)
+ mbox_free_channel(info->channels[i].chan);
+}
+
+static int scpi_remove(struct platform_device *pdev)
+{
+ int i;
+ struct scpi_drvinfo *info = platform_get_drvdata(pdev);
+
+ scpi_info = NULL; /* stop exporting SCPI ops through get_scpi_ops */
+
+ for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) {
+ kfree(info->dvfs[i]->opps);
+ kfree(info->dvfs[i]);
+ }
+
+ return 0;
+}
+
+#define MAX_SCPI_XFERS 10
+static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch)
+{
+ int i;
+ struct scpi_xfer *xfers;
+
+ xfers = devm_kcalloc(dev, MAX_SCPI_XFERS, sizeof(*xfers), GFP_KERNEL);
+ if (!xfers)
+ return -ENOMEM;
+
+ ch->xfers = xfers;
+ for (i = 0; i < MAX_SCPI_XFERS; i++, xfers++) {
+ init_completion(&xfers->done);
+ list_add_tail(&xfers->node, &ch->xfers_list);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id legacy_scpi_of_match[] = {
+ {.compatible = "arm,scpi-pre-1.0"},
+ {},
+};
+
+static const struct of_device_id shmem_of_match[] __maybe_unused = {
+ { .compatible = "amlogic,meson-gxbb-scp-shmem", },
+ { .compatible = "amlogic,meson-axg-scp-shmem", },
+ { .compatible = "arm,juno-scp-shmem", },
+ { .compatible = "arm,scp-shmem", },
+ { }
+};
+
+static int scpi_probe(struct platform_device *pdev)
+{
+ int count, idx, ret;
+ struct resource res;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct scpi_drvinfo *scpi_drvinfo;
+
+ scpi_drvinfo = devm_kzalloc(dev, sizeof(*scpi_drvinfo), GFP_KERNEL);
+ if (!scpi_drvinfo)
+ return -ENOMEM;
+
+ if (of_match_device(legacy_scpi_of_match, &pdev->dev))
+ scpi_drvinfo->is_legacy = true;
+
+ count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
+ if (count < 0) {
+ dev_err(dev, "no mboxes property in '%pOF'\n", np);
+ return -ENODEV;
+ }
+
+ scpi_drvinfo->channels =
+ devm_kcalloc(dev, count, sizeof(struct scpi_chan), GFP_KERNEL);
+ if (!scpi_drvinfo->channels)
+ return -ENOMEM;
+
+ ret = devm_add_action(dev, scpi_free_channels, scpi_drvinfo);
+ if (ret)
+ return ret;
+
+ for (; scpi_drvinfo->num_chans < count; scpi_drvinfo->num_chans++) {
+ resource_size_t size;
+ int idx = scpi_drvinfo->num_chans;
+ struct scpi_chan *pchan = scpi_drvinfo->channels + idx;
+ struct mbox_client *cl = &pchan->cl;
+ struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
+
+ if (!of_match_node(shmem_of_match, shmem))
+ return -ENXIO;
+
+ ret = of_address_to_resource(shmem, 0, &res);
+ of_node_put(shmem);
+ if (ret) {
+ dev_err(dev, "failed to get SCPI payload mem resource\n");
+ return ret;
+ }
+
+ size = resource_size(&res);
+ pchan->rx_payload = devm_ioremap(dev, res.start, size);
+ if (!pchan->rx_payload) {
+ dev_err(dev, "failed to ioremap SCPI payload\n");
+ return -EADDRNOTAVAIL;
+ }
+ pchan->tx_payload = pchan->rx_payload + (size >> 1);
+
+ cl->dev = dev;
+ cl->rx_callback = scpi_handle_remote_msg;
+ cl->tx_prepare = scpi_tx_prepare;
+ cl->tx_block = true;
+ cl->tx_tout = 20;
+ cl->knows_txdone = false; /* controller can't ack */
+
+ INIT_LIST_HEAD(&pchan->rx_pending);
+ INIT_LIST_HEAD(&pchan->xfers_list);
+ spin_lock_init(&pchan->rx_lock);
+ mutex_init(&pchan->xfers_lock);
+
+ ret = scpi_alloc_xfer_list(dev, pchan);
+ if (!ret) {
+ pchan->chan = mbox_request_channel(cl, idx);
+ if (!IS_ERR(pchan->chan))
+ continue;
+ ret = PTR_ERR(pchan->chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get channel%d err %d\n",
+ idx, ret);
+ }
+ return ret;
+ }
+
+ scpi_drvinfo->commands = scpi_std_commands;
+
+ platform_set_drvdata(pdev, scpi_drvinfo);
+
+ if (scpi_drvinfo->is_legacy) {
+ /* Replace with legacy variants */
+ scpi_ops.clk_set_val = legacy_scpi_clk_set_val;
+ scpi_drvinfo->commands = scpi_legacy_commands;
+
+ /* Fill priority bitmap */
+ for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++)
+ set_bit(legacy_hpriority_cmds[idx],
+ scpi_drvinfo->cmd_priority);
+ }
+
+ scpi_info = scpi_drvinfo;
+
+ ret = scpi_init_versions(scpi_drvinfo);
+ if (ret) {
+ dev_err(dev, "incorrect or no SCP firmware found\n");
+ scpi_info = NULL;
+ return ret;
+ }
+
+ if (scpi_drvinfo->is_legacy && !scpi_drvinfo->protocol_version &&
+ !scpi_drvinfo->firmware_version)
+ dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n");
+ else
+ dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n",
+ FIELD_GET(PROTO_REV_MAJOR_MASK,
+ scpi_drvinfo->protocol_version),
+ FIELD_GET(PROTO_REV_MINOR_MASK,
+ scpi_drvinfo->protocol_version),
+ FIELD_GET(FW_REV_MAJOR_MASK,
+ scpi_drvinfo->firmware_version),
+ FIELD_GET(FW_REV_MINOR_MASK,
+ scpi_drvinfo->firmware_version),
+ FIELD_GET(FW_REV_PATCH_MASK,
+ scpi_drvinfo->firmware_version));
+
+ scpi_drvinfo->scpi_ops = &scpi_ops;
+
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ scpi_info = NULL;
+
+ return ret;
+}
+
+static const struct of_device_id scpi_of_match[] = {
+ {.compatible = "arm,scpi"},
+ {.compatible = "arm,scpi-pre-1.0"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, scpi_of_match);
+
+static struct platform_driver scpi_driver = {
+ .driver = {
+ .name = "scpi_protocol",
+ .of_match_table = scpi_of_match,
+ .dev_groups = versions_groups,
+ },
+ .probe = scpi_probe,
+ .remove = scpi_remove,
+};
+module_platform_driver(scpi_driver);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCPI mailbox protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
new file mode 100644
index 0000000000..285fe7ad49
--- /dev/null
+++ b/drivers/firmware/arm_sdei.c
@@ -0,0 +1,1116 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#define pr_fmt(fmt) "sdei: " fmt
+
+#include <acpi/ghes.h>
+#include <linux/acpi.h>
+#include <linux/arm_sdei.h>
+#include <linux/arm-smccc.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/errno.h>
+#include <linux/hardirq.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/kvm_host.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/percpu.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+
+/*
+ * The call to use to reach the firmware.
+ */
+static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res);
+
+/* entry point from firmware to arch asm code */
+static unsigned long sdei_entry_point;
+
+static int sdei_hp_state;
+
+struct sdei_event {
+ /* These three are protected by the sdei_list_lock */
+ struct list_head list;
+ bool reregister;
+ bool reenable;
+
+ u32 event_num;
+ u8 type;
+ u8 priority;
+
+ /* This pointer is handed to firmware as the event argument. */
+ union {
+ /* Shared events */
+ struct sdei_registered_event *registered;
+
+ /* CPU private events */
+ struct sdei_registered_event __percpu *private_registered;
+ };
+};
+
+/* Take the mutex for any API call or modification. Take the mutex first. */
+static DEFINE_MUTEX(sdei_events_lock);
+
+/* and then hold this when modifying the list */
+static DEFINE_SPINLOCK(sdei_list_lock);
+static LIST_HEAD(sdei_list);
+
+/* Private events are registered/enabled via IPI passing one of these */
+struct sdei_crosscall_args {
+ struct sdei_event *event;
+ atomic_t errors;
+ int first_error;
+};
+
+#define CROSSCALL_INIT(arg, event) \
+ do { \
+ arg.event = event; \
+ arg.first_error = 0; \
+ atomic_set(&arg.errors, 0); \
+ } while (0)
+
+static inline int sdei_do_local_call(smp_call_func_t fn,
+ struct sdei_event *event)
+{
+ struct sdei_crosscall_args arg;
+
+ CROSSCALL_INIT(arg, event);
+ fn(&arg);
+
+ return arg.first_error;
+}
+
+static inline int sdei_do_cross_call(smp_call_func_t fn,
+ struct sdei_event *event)
+{
+ struct sdei_crosscall_args arg;
+
+ CROSSCALL_INIT(arg, event);
+ on_each_cpu(fn, &arg, true);
+
+ return arg.first_error;
+}
+
+static inline void
+sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
+{
+ if (err && (atomic_inc_return(&arg->errors) == 1))
+ arg->first_error = err;
+}
+
+static int sdei_to_linux_errno(unsigned long sdei_err)
+{
+ switch (sdei_err) {
+ case SDEI_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case SDEI_INVALID_PARAMETERS:
+ return -EINVAL;
+ case SDEI_DENIED:
+ return -EPERM;
+ case SDEI_PENDING:
+ return -EINPROGRESS;
+ case SDEI_OUT_OF_RESOURCE:
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
+ unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4,
+ u64 *result)
+{
+ int err;
+ struct arm_smccc_res res;
+
+ if (sdei_firmware_call) {
+ sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
+ &res);
+ err = sdei_to_linux_errno(res.a0);
+ } else {
+ /*
+ * !sdei_firmware_call means we failed to probe or called
+ * sdei_mark_interface_broken(). -EIO is not an error returned
+ * by sdei_to_linux_errno() and is used to suppress messages
+ * from this driver.
+ */
+ err = -EIO;
+ res.a0 = SDEI_NOT_SUPPORTED;
+ }
+
+ if (result)
+ *result = res.a0;
+
+ return err;
+}
+NOKPROBE_SYMBOL(invoke_sdei_fn);
+
+static struct sdei_event *sdei_event_find(u32 event_num)
+{
+ struct sdei_event *e, *found = NULL;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(e, &sdei_list, list) {
+ if (e->event_num == event_num) {
+ found = e;
+ break;
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return found;
+}
+
+int sdei_api_event_context(u32 query, u64 *result)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
+ result);
+}
+NOKPROBE_SYMBOL(sdei_api_event_context);
+
+static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
+ 0, 0, result);
+}
+
+static struct sdei_event *sdei_event_create(u32 event_num,
+ sdei_event_callback *cb,
+ void *cb_arg)
+{
+ int err;
+ u64 result;
+ struct sdei_event *event;
+ struct sdei_registered_event *reg;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&event->list);
+ event->event_num = event_num;
+
+ err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
+ &result);
+ if (err)
+ goto fail;
+ event->priority = result;
+
+ err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
+ &result);
+ if (err)
+ goto fail;
+ event->type = result;
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED) {
+ reg = kzalloc(sizeof(*reg), GFP_KERNEL);
+ if (!reg) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ reg->event_num = event->event_num;
+ reg->priority = event->priority;
+
+ reg->callback = cb;
+ reg->callback_arg = cb_arg;
+ event->registered = reg;
+ } else {
+ int cpu;
+ struct sdei_registered_event __percpu *regs;
+
+ regs = alloc_percpu(struct sdei_registered_event);
+ if (!regs) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ for_each_possible_cpu(cpu) {
+ reg = per_cpu_ptr(regs, cpu);
+
+ reg->event_num = event->event_num;
+ reg->priority = event->priority;
+ reg->callback = cb;
+ reg->callback_arg = cb_arg;
+ }
+
+ event->private_registered = regs;
+ }
+
+ spin_lock(&sdei_list_lock);
+ list_add(&event->list, &sdei_list);
+ spin_unlock(&sdei_list_lock);
+
+ return event;
+
+fail:
+ kfree(event);
+ return ERR_PTR(err);
+}
+
+static void sdei_event_destroy_llocked(struct sdei_event *event)
+{
+ lockdep_assert_held(&sdei_events_lock);
+ lockdep_assert_held(&sdei_list_lock);
+
+ list_del(&event->list);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ kfree(event->registered);
+ else
+ free_percpu(event->private_registered);
+
+ kfree(event);
+}
+
+static void sdei_event_destroy(struct sdei_event *event)
+{
+ spin_lock(&sdei_list_lock);
+ sdei_event_destroy_llocked(event);
+ spin_unlock(&sdei_list_lock);
+}
+
+static int sdei_api_get_version(u64 *version)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
+}
+
+int sdei_mask_local_cpu(void)
+{
+ int err;
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
+ if (err && err != -EIO) {
+ pr_warn_once("failed to mask CPU[%u]: %d\n",
+ smp_processor_id(), err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void _ipi_mask_cpu(void *ignored)
+{
+ WARN_ON_ONCE(preemptible());
+ sdei_mask_local_cpu();
+}
+
+int sdei_unmask_local_cpu(void)
+{
+ int err;
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
+ if (err && err != -EIO) {
+ pr_warn_once("failed to unmask CPU[%u]: %d\n",
+ smp_processor_id(), err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void _ipi_unmask_cpu(void *ignored)
+{
+ WARN_ON_ONCE(preemptible());
+ sdei_unmask_local_cpu();
+}
+
+static void _ipi_private_reset(void *ignored)
+{
+ int err;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
+ NULL);
+ if (err && err != -EIO)
+ pr_warn_once("failed to reset CPU[%u]: %d\n",
+ smp_processor_id(), err);
+}
+
+static int sdei_api_shared_reset(void)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
+ NULL);
+}
+
+static void sdei_mark_interface_broken(void)
+{
+ pr_err("disabling SDEI firmware interface\n");
+ on_each_cpu(&_ipi_mask_cpu, NULL, true);
+ sdei_firmware_call = NULL;
+}
+
+static int sdei_platform_reset(void)
+{
+ int err;
+
+ on_each_cpu(&_ipi_private_reset, NULL, true);
+ err = sdei_api_shared_reset();
+ if (err) {
+ pr_err("Failed to reset platform: %d\n", err);
+ sdei_mark_interface_broken();
+ }
+
+ return err;
+}
+
+static int sdei_api_event_enable(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
+ 0, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_enable(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ err = sdei_api_event_enable(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_enable(u32 event_num)
+{
+ int err = -EINVAL;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ if (!event) {
+ mutex_unlock(&sdei_events_lock);
+ return -ENOENT;
+ }
+
+
+ cpus_read_lock();
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_enable(event->event_num);
+ else
+ err = sdei_do_cross_call(_local_event_enable, event);
+
+ if (!err) {
+ spin_lock(&sdei_list_lock);
+ event->reenable = true;
+ spin_unlock(&sdei_list_lock);
+ }
+ cpus_read_unlock();
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+static int sdei_api_event_disable(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
+ 0, 0, NULL);
+}
+
+static void _ipi_event_disable(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ err = sdei_api_event_disable(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_disable(u32 event_num)
+{
+ int err = -EINVAL;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ if (!event) {
+ mutex_unlock(&sdei_events_lock);
+ return -ENOENT;
+ }
+
+ spin_lock(&sdei_list_lock);
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_disable(event->event_num);
+ else
+ err = sdei_do_cross_call(_ipi_event_disable, event);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+static int sdei_api_event_unregister(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
+ 0, 0, 0, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_unregister(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ err = sdei_api_event_unregister(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_unregister(u32 event_num)
+{
+ int err;
+ struct sdei_event *event;
+
+ WARN_ON(in_nmi());
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ if (!event) {
+ pr_warn("Event %u not registered\n", event_num);
+ err = -ENOENT;
+ goto unlock;
+ }
+
+ spin_lock(&sdei_list_lock);
+ event->reregister = false;
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_unregister(event->event_num);
+ else
+ err = sdei_do_cross_call(_local_event_unregister, event);
+
+ if (err)
+ goto unlock;
+
+ sdei_event_destroy(event);
+unlock:
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+/*
+ * unregister events, but don't destroy them as they are re-registered by
+ * sdei_reregister_shared().
+ */
+static int sdei_unregister_shared(void)
+{
+ int err = 0;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type != SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ err = sdei_api_event_unregister(event->event_num);
+ if (err)
+ break;
+ }
+ spin_unlock(&sdei_list_lock);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
+ void *arg, u64 flags, u64 affinity)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
+ (unsigned long)entry_point, (unsigned long)arg,
+ flags, affinity, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_register(void *data)
+{
+ int err;
+ struct sdei_registered_event *reg;
+ struct sdei_crosscall_args *arg = data;
+
+ reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
+ err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
+ reg, 0, 0);
+
+ sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
+{
+ int err;
+ struct sdei_event *event;
+
+ WARN_ON(in_nmi());
+
+ mutex_lock(&sdei_events_lock);
+ if (sdei_event_find(event_num)) {
+ pr_warn("Event %u already registered\n", event_num);
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ event = sdei_event_create(event_num, cb, arg);
+ if (IS_ERR(event)) {
+ err = PTR_ERR(event);
+ pr_warn("Failed to create event %u: %d\n", event_num, err);
+ goto unlock;
+ }
+
+ cpus_read_lock();
+ if (event->type == SDEI_EVENT_TYPE_SHARED) {
+ err = sdei_api_event_register(event->event_num,
+ sdei_entry_point,
+ event->registered,
+ SDEI_EVENT_REGISTER_RM_ANY, 0);
+ } else {
+ err = sdei_do_cross_call(_local_event_register, event);
+ if (err)
+ sdei_do_cross_call(_local_event_unregister, event);
+ }
+
+ if (err) {
+ sdei_event_destroy(event);
+ pr_warn("Failed to register event %u: %d\n", event_num, err);
+ goto cpu_unlock;
+ }
+
+ spin_lock(&sdei_list_lock);
+ event->reregister = true;
+ spin_unlock(&sdei_list_lock);
+cpu_unlock:
+ cpus_read_unlock();
+unlock:
+ mutex_unlock(&sdei_events_lock);
+ return err;
+}
+
+static int sdei_reregister_shared(void)
+{
+ int err = 0;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type != SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ if (event->reregister) {
+ err = sdei_api_event_register(event->event_num,
+ sdei_entry_point, event->registered,
+ SDEI_EVENT_REGISTER_RM_ANY, 0);
+ if (err) {
+ pr_err("Failed to re-register event %u\n",
+ event->event_num);
+ sdei_event_destroy_llocked(event);
+ break;
+ }
+ }
+
+ if (event->reenable) {
+ err = sdei_api_event_enable(event->event_num);
+ if (err) {
+ pr_err("Failed to re-enable event %u\n",
+ event->event_num);
+ break;
+ }
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+static int sdei_cpuhp_down(unsigned int cpu)
+{
+ struct sdei_event *event;
+ int err;
+
+ /* un-register private events */
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ err = sdei_do_local_call(_local_event_unregister, event);
+ if (err) {
+ pr_err("Failed to unregister event %u: %d\n",
+ event->event_num, err);
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return sdei_mask_local_cpu();
+}
+
+static int sdei_cpuhp_up(unsigned int cpu)
+{
+ struct sdei_event *event;
+ int err;
+
+ /* re-register/enable private events */
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ if (event->reregister) {
+ err = sdei_do_local_call(_local_event_register, event);
+ if (err) {
+ pr_err("Failed to re-register event %u: %d\n",
+ event->event_num, err);
+ }
+ }
+
+ if (event->reenable) {
+ err = sdei_do_local_call(_local_event_enable, event);
+ if (err) {
+ pr_err("Failed to re-enable event %u: %d\n",
+ event->event_num, err);
+ }
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return sdei_unmask_local_cpu();
+}
+
+/* When entering idle, mask/unmask events for this cpu */
+static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ int rv;
+
+ WARN_ON_ONCE(preemptible());
+
+ switch (action) {
+ case CPU_PM_ENTER:
+ rv = sdei_mask_local_cpu();
+ break;
+ case CPU_PM_EXIT:
+ case CPU_PM_ENTER_FAILED:
+ rv = sdei_unmask_local_cpu();
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ if (rv)
+ return notifier_from_errno(rv);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sdei_pm_nb = {
+ .notifier_call = sdei_pm_notifier,
+};
+
+static int sdei_device_suspend(struct device *dev)
+{
+ on_each_cpu(_ipi_mask_cpu, NULL, true);
+
+ return 0;
+}
+
+static int sdei_device_resume(struct device *dev)
+{
+ on_each_cpu(_ipi_unmask_cpu, NULL, true);
+
+ return 0;
+}
+
+/*
+ * We need all events to be reregistered when we resume from hibernate.
+ *
+ * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
+ * events during freeze, then re-register and re-enable them during thaw
+ * and restore.
+ */
+static int sdei_device_freeze(struct device *dev)
+{
+ int err;
+
+ /* unregister private events */
+ cpuhp_remove_state(sdei_entry_point);
+
+ err = sdei_unregister_shared();
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int sdei_device_thaw(struct device *dev)
+{
+ int err;
+
+ /* re-register shared events */
+ err = sdei_reregister_shared();
+ if (err) {
+ pr_warn("Failed to re-register shared events...\n");
+ sdei_mark_interface_broken();
+ return err;
+ }
+
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
+ &sdei_cpuhp_up, &sdei_cpuhp_down);
+ if (err < 0) {
+ pr_warn("Failed to re-register CPU hotplug notifier...\n");
+ return err;
+ }
+
+ sdei_hp_state = err;
+ return 0;
+}
+
+static int sdei_device_restore(struct device *dev)
+{
+ int err;
+
+ err = sdei_platform_reset();
+ if (err)
+ return err;
+
+ return sdei_device_thaw(dev);
+}
+
+static const struct dev_pm_ops sdei_pm_ops = {
+ .suspend = sdei_device_suspend,
+ .resume = sdei_device_resume,
+ .freeze = sdei_device_freeze,
+ .thaw = sdei_device_thaw,
+ .restore = sdei_device_restore,
+};
+
+/*
+ * Mask all CPUs and unregister all events on panic, reboot or kexec.
+ */
+static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ /*
+ * We are going to reset the interface, after this there is no point
+ * doing work when we take CPUs offline.
+ */
+ cpuhp_remove_state(sdei_hp_state);
+
+ sdei_platform_reset();
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sdei_reboot_nb = {
+ .notifier_call = sdei_reboot_notifier,
+};
+
+static void sdei_smccc_smc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res)
+{
+ arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
+}
+NOKPROBE_SYMBOL(sdei_smccc_smc);
+
+static void sdei_smccc_hvc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res)
+{
+ arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
+}
+NOKPROBE_SYMBOL(sdei_smccc_hvc);
+
+int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
+ sdei_event_callback *critical_cb)
+{
+ int err;
+ u64 result;
+ u32 event_num;
+ sdei_event_callback *cb;
+
+ if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
+ return -EOPNOTSUPP;
+
+ event_num = ghes->generic->notify.vector;
+ if (event_num == 0) {
+ /*
+ * Event 0 is reserved by the specification for
+ * SDEI_EVENT_SIGNAL.
+ */
+ return -EINVAL;
+ }
+
+ err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
+ &result);
+ if (err)
+ return err;
+
+ if (result == SDEI_EVENT_PRIORITY_CRITICAL)
+ cb = critical_cb;
+ else
+ cb = normal_cb;
+
+ err = sdei_event_register(event_num, cb, ghes);
+ if (!err)
+ err = sdei_event_enable(event_num);
+
+ return err;
+}
+
+int sdei_unregister_ghes(struct ghes *ghes)
+{
+ int i;
+ int err;
+ u32 event_num = ghes->generic->notify.vector;
+
+ might_sleep();
+
+ if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
+ return -EOPNOTSUPP;
+
+ /*
+ * The event may be running on another CPU. Disable it
+ * to stop new events, then try to unregister a few times.
+ */
+ err = sdei_event_disable(event_num);
+ if (err)
+ return err;
+
+ for (i = 0; i < 3; i++) {
+ err = sdei_event_unregister(event_num);
+ if (err != -EINPROGRESS)
+ break;
+
+ schedule();
+ }
+
+ return err;
+}
+
+static int sdei_get_conduit(struct platform_device *pdev)
+{
+ const char *method;
+ struct device_node *np = pdev->dev.of_node;
+
+ sdei_firmware_call = NULL;
+ if (np) {
+ if (of_property_read_string(np, "method", &method)) {
+ pr_warn("missing \"method\" property\n");
+ return SMCCC_CONDUIT_NONE;
+ }
+
+ if (!strcmp("hvc", method)) {
+ sdei_firmware_call = &sdei_smccc_hvc;
+ return SMCCC_CONDUIT_HVC;
+ } else if (!strcmp("smc", method)) {
+ sdei_firmware_call = &sdei_smccc_smc;
+ return SMCCC_CONDUIT_SMC;
+ }
+
+ pr_warn("invalid \"method\" property: %s\n", method);
+ } else if (!acpi_disabled) {
+ if (acpi_psci_use_hvc()) {
+ sdei_firmware_call = &sdei_smccc_hvc;
+ return SMCCC_CONDUIT_HVC;
+ } else {
+ sdei_firmware_call = &sdei_smccc_smc;
+ return SMCCC_CONDUIT_SMC;
+ }
+ }
+
+ return SMCCC_CONDUIT_NONE;
+}
+
+static int sdei_probe(struct platform_device *pdev)
+{
+ int err;
+ u64 ver = 0;
+ int conduit;
+
+ conduit = sdei_get_conduit(pdev);
+ if (!sdei_firmware_call)
+ return 0;
+
+ err = sdei_api_get_version(&ver);
+ if (err) {
+ pr_err("Failed to get SDEI version: %d\n", err);
+ sdei_mark_interface_broken();
+ return err;
+ }
+
+ pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
+ (int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
+ (int)SDEI_VERSION_VENDOR(ver));
+
+ if (SDEI_VERSION_MAJOR(ver) != 1) {
+ pr_warn("Conflicting SDEI version detected.\n");
+ sdei_mark_interface_broken();
+ return -EINVAL;
+ }
+
+ err = sdei_platform_reset();
+ if (err)
+ return err;
+
+ sdei_entry_point = sdei_arch_get_entry_point(conduit);
+ if (!sdei_entry_point) {
+ /* Not supported due to hardware or boot configuration */
+ sdei_mark_interface_broken();
+ return 0;
+ }
+
+ err = cpu_pm_register_notifier(&sdei_pm_nb);
+ if (err) {
+ pr_warn("Failed to register CPU PM notifier...\n");
+ goto error;
+ }
+
+ err = register_reboot_notifier(&sdei_reboot_nb);
+ if (err) {
+ pr_warn("Failed to register reboot notifier...\n");
+ goto remove_cpupm;
+ }
+
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
+ &sdei_cpuhp_up, &sdei_cpuhp_down);
+ if (err < 0) {
+ pr_warn("Failed to register CPU hotplug notifier...\n");
+ goto remove_reboot;
+ }
+
+ sdei_hp_state = err;
+
+ return 0;
+
+remove_reboot:
+ unregister_reboot_notifier(&sdei_reboot_nb);
+
+remove_cpupm:
+ cpu_pm_unregister_notifier(&sdei_pm_nb);
+
+error:
+ sdei_mark_interface_broken();
+ return err;
+}
+
+static const struct of_device_id sdei_of_match[] = {
+ { .compatible = "arm,sdei-1.0" },
+ {}
+};
+
+static struct platform_driver sdei_driver = {
+ .driver = {
+ .name = "sdei",
+ .pm = &sdei_pm_ops,
+ .of_match_table = sdei_of_match,
+ },
+ .probe = sdei_probe,
+};
+
+static bool __init sdei_present_acpi(void)
+{
+ acpi_status status;
+ struct acpi_table_header *sdei_table_header;
+
+ if (acpi_disabled)
+ return false;
+
+ status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
+ }
+ if (ACPI_FAILURE(status))
+ return false;
+
+ acpi_put_table(sdei_table_header);
+
+ return true;
+}
+
+void __init sdei_init(void)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ ret = platform_driver_register(&sdei_driver);
+ if (ret || !sdei_present_acpi())
+ return;
+
+ pdev = platform_device_register_simple(sdei_driver.driver.name,
+ 0, NULL, 0);
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ platform_driver_unregister(&sdei_driver);
+ pr_info("Failed to register ACPI:SDEI platform device %d\n",
+ ret);
+ }
+}
+
+int sdei_event_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg)
+{
+ int err;
+ u32 event_num = arg->event_num;
+
+ err = arg->callback(event_num, regs, arg->callback_arg);
+ if (err)
+ pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
+ event_num, smp_processor_id(), err);
+
+ return err;
+}
+NOKPROBE_SYMBOL(sdei_event_handler);
+
+void sdei_handler_abort(void)
+{
+ /*
+ * If the crash happened in an SDEI event handler then we need to
+ * finish the handler with the firmware so that we can have working
+ * interrupts in the crash kernel.
+ */
+ if (__this_cpu_read(sdei_active_critical_event)) {
+ pr_warn("still in SDEI critical event context, attempting to finish handler.\n");
+ __sdei_handler_abort();
+ __this_cpu_write(sdei_active_critical_event, NULL);
+ }
+ if (__this_cpu_read(sdei_active_normal_event)) {
+ pr_warn("still in SDEI normal event context, attempting to finish handler.\n");
+ __sdei_handler_abort();
+ __this_cpu_write(sdei_active_normal_event, NULL);
+ }
+}
diff --git a/drivers/firmware/broadcom/Kconfig b/drivers/firmware/broadcom/Kconfig
new file mode 100644
index 0000000000..8e3d355a63
--- /dev/null
+++ b/drivers/firmware/broadcom/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config BCM47XX_NVRAM
+ bool "Broadcom NVRAM driver"
+ depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
+ help
+ Broadcom home routers contain flash partition called "nvram" with all
+ important hardware configuration as well as some minor user setup.
+ NVRAM partition contains a text-like data representing name=value
+ pairs.
+ This driver provides an easy way to get value of requested parameter.
+ It simply reads content of NVRAM and parses it. It doesn't control any
+ hardware part itself.
+
+config BCM47XX_SPROM
+ bool "Broadcom SPROM driver"
+ depends on BCM47XX_NVRAM
+ select GENERIC_NET_UTILS
+ help
+ Broadcom devices store configuration data in SPROM. Accessing it is
+ specific to the bus host type, e.g. PCI(e) devices have it mapped in
+ a PCI BAR.
+ In case of SoC devices SPROM content is stored on a flash used by
+ bootloader firmware CFE. This driver provides method to ssb and bcma
+ drivers to read SPROM on SoC.
+
+config TEE_BNXT_FW
+ tristate "Broadcom BNXT firmware manager"
+ depends on (ARCH_BCM_IPROC && OPTEE) || (COMPILE_TEST && TEE)
+ default ARCH_BCM_IPROC
+ help
+ This module help to manage firmware on Broadcom BNXT device. The module
+ registers on tee bus and invoke calls to manage firmware on BNXT device.
diff --git a/drivers/firmware/broadcom/Makefile b/drivers/firmware/broadcom/Makefile
new file mode 100644
index 0000000000..17c5061c47
--- /dev/null
+++ b/drivers/firmware/broadcom/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_BCM47XX_NVRAM) += bcm47xx_nvram.o
+obj-$(CONFIG_BCM47XX_SPROM) += bcm47xx_sprom.o
+obj-$(CONFIG_TEE_BNXT_FW) += tee_bnxt_fw.o
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
new file mode 100644
index 0000000000..0ea5206be4
--- /dev/null
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * BCM947xx nvram variable access
+ *
+ * Copyright (C) 2005 Broadcom Corporation
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mtd/mtd.h>
+#include <linux/bcm47xx_nvram.h>
+
+#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */
+#define NVRAM_SPACE 0x10000
+#define NVRAM_MAX_GPIO_ENTRIES 32
+#define NVRAM_MAX_GPIO_VALUE_LEN 30
+
+#define FLASH_MIN 0x00020000 /* Minimum flash size */
+
+struct nvram_header {
+ u32 magic;
+ u32 len;
+ u32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */
+ u32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */
+ u32 config_ncdl; /* ncdl values for memc */
+};
+
+static char nvram_buf[NVRAM_SPACE];
+static size_t nvram_len;
+static const u32 nvram_sizes[] = {0x6000, 0x8000, 0xF000, 0x10000};
+
+/**
+ * bcm47xx_nvram_is_valid - check for a valid NVRAM at specified memory
+ */
+static bool bcm47xx_nvram_is_valid(void __iomem *nvram)
+{
+ return ((struct nvram_header *)nvram)->magic == NVRAM_MAGIC;
+}
+
+/**
+ * bcm47xx_nvram_copy - copy NVRAM to internal buffer
+ */
+static void bcm47xx_nvram_copy(void __iomem *nvram_start, size_t res_size)
+{
+ struct nvram_header __iomem *header = nvram_start;
+ size_t copy_size;
+
+ copy_size = header->len;
+ if (copy_size > res_size) {
+ pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n");
+ copy_size = res_size;
+ }
+ if (copy_size >= NVRAM_SPACE) {
+ pr_err("nvram on flash (%zu bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
+ copy_size, NVRAM_SPACE - 1);
+ copy_size = NVRAM_SPACE - 1;
+ }
+
+ __ioread32_copy(nvram_buf, nvram_start, DIV_ROUND_UP(copy_size, 4));
+ nvram_buf[NVRAM_SPACE - 1] = '\0';
+ nvram_len = copy_size;
+}
+
+/**
+ * bcm47xx_nvram_find_and_copy - find NVRAM on flash mapping & copy it
+ */
+static int bcm47xx_nvram_find_and_copy(void __iomem *flash_start, size_t res_size)
+{
+ size_t flash_size;
+ size_t offset;
+ int i;
+
+ if (nvram_len) {
+ pr_warn("nvram already initialized\n");
+ return -EEXIST;
+ }
+
+ /* TODO: when nvram is on nand flash check for bad blocks first. */
+
+ /* Try every possible flash size and check for NVRAM at its end */
+ for (flash_size = FLASH_MIN; flash_size <= res_size; flash_size <<= 1) {
+ for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) {
+ offset = flash_size - nvram_sizes[i];
+ if (bcm47xx_nvram_is_valid(flash_start + offset))
+ goto found;
+ }
+ }
+
+ /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */
+
+ offset = 4096;
+ if (bcm47xx_nvram_is_valid(flash_start + offset))
+ goto found;
+
+ offset = 1024;
+ if (bcm47xx_nvram_is_valid(flash_start + offset))
+ goto found;
+
+ pr_err("no nvram found\n");
+ return -ENXIO;
+
+found:
+ bcm47xx_nvram_copy(flash_start + offset, res_size - offset);
+
+ return 0;
+}
+
+int bcm47xx_nvram_init_from_iomem(void __iomem *nvram_start, size_t res_size)
+{
+ if (nvram_len) {
+ pr_warn("nvram already initialized\n");
+ return -EEXIST;
+ }
+
+ if (!bcm47xx_nvram_is_valid(nvram_start)) {
+ pr_err("No valid NVRAM found\n");
+ return -ENOENT;
+ }
+
+ bcm47xx_nvram_copy(nvram_start, res_size);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcm47xx_nvram_init_from_iomem);
+
+/*
+ * On bcm47xx we need access to the NVRAM very early, so we can't use mtd
+ * subsystem to access flash. We can't even use platform device / driver to
+ * store memory offset.
+ * To handle this we provide following symbol. It's supposed to be called as
+ * soon as we get info about flash device, before any NVRAM entry is needed.
+ */
+int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
+{
+ void __iomem *iobase;
+ int err;
+
+ iobase = ioremap(base, lim);
+ if (!iobase)
+ return -ENOMEM;
+
+ err = bcm47xx_nvram_find_and_copy(iobase, lim);
+
+ iounmap(iobase);
+
+ return err;
+}
+
+static int nvram_init(void)
+{
+#ifdef CONFIG_MTD
+ struct mtd_info *mtd;
+ struct nvram_header header;
+ size_t bytes_read;
+ int err;
+
+ mtd = get_mtd_device_nm("nvram");
+ if (IS_ERR(mtd))
+ return -ENODEV;
+
+ err = mtd_read(mtd, 0, sizeof(header), &bytes_read, (uint8_t *)&header);
+ if (!err && header.magic == NVRAM_MAGIC &&
+ header.len > sizeof(header)) {
+ nvram_len = header.len;
+ if (nvram_len >= NVRAM_SPACE) {
+ pr_err("nvram on flash (%zu bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
+ nvram_len, NVRAM_SPACE);
+ nvram_len = NVRAM_SPACE - 1;
+ }
+
+ err = mtd_read(mtd, 0, nvram_len, &nvram_len,
+ (u8 *)nvram_buf);
+ return err;
+ }
+#endif
+
+ return -ENXIO;
+}
+
+int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len)
+{
+ char *var, *value, *end, *eq;
+ int err;
+
+ if (!name)
+ return -EINVAL;
+
+ if (!nvram_len) {
+ err = nvram_init();
+ if (err)
+ return err;
+ }
+
+ /* Look for name=value and return value */
+ var = &nvram_buf[sizeof(struct nvram_header)];
+ end = nvram_buf + sizeof(nvram_buf);
+ while (var < end && *var) {
+ eq = strchr(var, '=');
+ if (!eq)
+ break;
+ value = eq + 1;
+ if (eq - var == strlen(name) &&
+ strncmp(var, name, eq - var) == 0)
+ return snprintf(val, val_len, "%s", value);
+ var = value + strlen(value) + 1;
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL(bcm47xx_nvram_getenv);
+
+int bcm47xx_nvram_gpio_pin(const char *name)
+{
+ int i, err;
+ char nvram_var[] = "gpioXX";
+ char buf[NVRAM_MAX_GPIO_VALUE_LEN];
+
+ /* TODO: Optimize it to don't call getenv so many times */
+ for (i = 0; i < NVRAM_MAX_GPIO_ENTRIES; i++) {
+ err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i);
+ if (err <= 0)
+ continue;
+ err = bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf));
+ if (err <= 0)
+ continue;
+ if (!strcmp(name, buf))
+ return i;
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL(bcm47xx_nvram_gpio_pin);
+
+char *bcm47xx_nvram_get_contents(size_t *nvram_size)
+{
+ int err;
+ char *nvram;
+
+ if (!nvram_len) {
+ err = nvram_init();
+ if (err)
+ return NULL;
+ }
+
+ *nvram_size = nvram_len - sizeof(struct nvram_header);
+ nvram = vmalloc(*nvram_size);
+ if (!nvram)
+ return NULL;
+ memcpy(nvram, &nvram_buf[sizeof(struct nvram_header)], *nvram_size);
+
+ return nvram;
+}
+EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
+
diff --git a/drivers/firmware/broadcom/bcm47xx_sprom.c b/drivers/firmware/broadcom/bcm47xx_sprom.c
new file mode 100644
index 0000000000..14fbcd1165
--- /dev/null
+++ b/drivers/firmware/broadcom/bcm47xx_sprom.c
@@ -0,0 +1,726 @@
+/*
+ * Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org>
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2006 Michael Buesch <m@bues.ch>
+ * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org>
+ * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/bcm47xx_nvram.h>
+#include <linux/bcm47xx_sprom.h>
+#include <linux/bcma/bcma.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/ssb/ssb.h>
+
+static void create_key(const char *prefix, const char *postfix,
+ const char *name, char *buf, int len)
+{
+ if (prefix && postfix)
+ snprintf(buf, len, "%s%s%s", prefix, name, postfix);
+ else if (prefix)
+ snprintf(buf, len, "%s%s", prefix, name);
+ else if (postfix)
+ snprintf(buf, len, "%s%s", name, postfix);
+ else
+ snprintf(buf, len, "%s", name);
+}
+
+static int get_nvram_var(const char *prefix, const char *postfix,
+ const char *name, char *buf, int len, bool fallback)
+{
+ char key[40];
+ int err;
+
+ create_key(prefix, postfix, name, key, sizeof(key));
+
+ err = bcm47xx_nvram_getenv(key, buf, len);
+ if (fallback && err == -ENOENT && prefix) {
+ create_key(NULL, postfix, name, key, sizeof(key));
+ err = bcm47xx_nvram_getenv(key, buf, len);
+ }
+ return err;
+}
+
+#define NVRAM_READ_VAL(type) \
+static void nvram_read_ ## type(const char *prefix, \
+ const char *postfix, const char *name, \
+ type *val, type allset, bool fallback) \
+{ \
+ char buf[100]; \
+ int err; \
+ type var; \
+ \
+ err = get_nvram_var(prefix, postfix, name, buf, sizeof(buf), \
+ fallback); \
+ if (err < 0) \
+ return; \
+ err = kstrto ## type(strim(buf), 0, &var); \
+ if (err) { \
+ pr_warn("can not parse nvram name %s%s%s with value %s got %i\n", \
+ prefix, name, postfix, buf, err); \
+ return; \
+ } \
+ if (allset && var == allset) \
+ return; \
+ *val = var; \
+}
+
+NVRAM_READ_VAL(u8)
+NVRAM_READ_VAL(s8)
+NVRAM_READ_VAL(u16)
+NVRAM_READ_VAL(u32)
+
+#undef NVRAM_READ_VAL
+
+static void nvram_read_u32_2(const char *prefix, const char *name,
+ u16 *val_lo, u16 *val_hi, bool fallback)
+{
+ char buf[100];
+ int err;
+ u32 val;
+
+ err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+ if (err < 0)
+ return;
+ err = kstrtou32(strim(buf), 0, &val);
+ if (err) {
+ pr_warn("can not parse nvram name %s%s with value %s got %i\n",
+ prefix, name, buf, err);
+ return;
+ }
+ *val_lo = (val & 0x0000FFFFU);
+ *val_hi = (val & 0xFFFF0000U) >> 16;
+}
+
+static void nvram_read_leddc(const char *prefix, const char *name,
+ u8 *leddc_on_time, u8 *leddc_off_time,
+ bool fallback)
+{
+ char buf[100];
+ int err;
+ u32 val;
+
+ err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+ if (err < 0)
+ return;
+ err = kstrtou32(strim(buf), 0, &val);
+ if (err) {
+ pr_warn("can not parse nvram name %s%s with value %s got %i\n",
+ prefix, name, buf, err);
+ return;
+ }
+
+ if (val == 0xffff || val == 0xffffffff)
+ return;
+
+ *leddc_on_time = val & 0xff;
+ *leddc_off_time = (val >> 16) & 0xff;
+}
+
+static void nvram_read_macaddr(const char *prefix, const char *name,
+ u8 val[6], bool fallback)
+{
+ char buf[100];
+ int err;
+
+ err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+ if (err < 0)
+ return;
+
+ strreplace(buf, '-', ':');
+ if (!mac_pton(buf, val))
+ pr_warn("Can not parse mac address: %s\n", buf);
+}
+
+static void nvram_read_alpha2(const char *prefix, const char *name,
+ char val[2], bool fallback)
+{
+ char buf[10];
+ int err;
+
+ err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+ if (err < 0)
+ return;
+ if (buf[0] == '0')
+ return;
+ if (strlen(buf) > 2) {
+ pr_warn("alpha2 is too long %s\n", buf);
+ return;
+ }
+ memcpy(val, buf, 2);
+}
+
+/* This is one-function-only macro, it uses local "sprom" variable! */
+#define ENTRY(_revmask, _type, _prefix, _name, _val, _allset, _fallback) \
+ if (_revmask & BIT(sprom->revision)) \
+ nvram_read_ ## _type(_prefix, NULL, _name, &sprom->_val, \
+ _allset, _fallback)
+/*
+ * Special version of filling function that can be safely called for any SPROM
+ * revision. For every NVRAM to SPROM mapping it contains bitmask of revisions
+ * for which the mapping is valid.
+ * It obviously requires some hexadecimal/bitmasks knowledge, but allows
+ * writing cleaner code (easy revisions handling).
+ * Note that while SPROM revision 0 was never used, we still keep BIT(0)
+ * reserved for it, just to keep numbering sane.
+ */
+static void bcm47xx_sprom_fill_auto(struct ssb_sprom *sprom,
+ const char *prefix, bool fallback)
+{
+ const char *pre = prefix;
+ bool fb = fallback;
+
+ /* Broadcom extracts it for rev 8+ but it was found on 2 and 4 too */
+ ENTRY(0xfffffffe, u16, pre, "devid", dev_id, 0, fallback);
+
+ ENTRY(0xfffffffe, u16, pre, "boardrev", board_rev, 0, true);
+ ENTRY(0xfffffffe, u32, pre, "boardflags", boardflags, 0, fb);
+ ENTRY(0xfffffff0, u32, pre, "boardflags2", boardflags2, 0, fb);
+ ENTRY(0xfffff800, u32, pre, "boardflags3", boardflags3, 0, fb);
+ ENTRY(0x00000002, u16, pre, "boardflags", boardflags_lo, 0, fb);
+ ENTRY(0xfffffffc, u16, pre, "boardtype", board_type, 0, true);
+ ENTRY(0xfffffffe, u16, pre, "boardnum", board_num, 0, fb);
+ ENTRY(0x00000002, u8, pre, "cc", country_code, 0, fb);
+ ENTRY(0xfffffff8, u8, pre, "regrev", regrev, 0, fb);
+
+ ENTRY(0xfffffffe, u8, pre, "ledbh0", gpio0, 0xff, fb);
+ ENTRY(0xfffffffe, u8, pre, "ledbh1", gpio1, 0xff, fb);
+ ENTRY(0xfffffffe, u8, pre, "ledbh2", gpio2, 0xff, fb);
+ ENTRY(0xfffffffe, u8, pre, "ledbh3", gpio3, 0xff, fb);
+
+ ENTRY(0x0000070e, u16, pre, "pa0b0", pa0b0, 0, fb);
+ ENTRY(0x0000070e, u16, pre, "pa0b1", pa0b1, 0, fb);
+ ENTRY(0x0000070e, u16, pre, "pa0b2", pa0b2, 0, fb);
+ ENTRY(0x0000070e, u8, pre, "pa0itssit", itssi_bg, 0, fb);
+ ENTRY(0x0000070e, u8, pre, "pa0maxpwr", maxpwr_bg, 0, fb);
+
+ ENTRY(0x0000070c, u8, pre, "opo", opo, 0, fb);
+ ENTRY(0xfffffffe, u8, pre, "aa2g", ant_available_bg, 0, fb);
+ ENTRY(0xfffffffe, u8, pre, "aa5g", ant_available_a, 0, fb);
+ ENTRY(0x000007fe, s8, pre, "ag0", antenna_gain.a0, 0, fb);
+ ENTRY(0x000007fe, s8, pre, "ag1", antenna_gain.a1, 0, fb);
+ ENTRY(0x000007f0, s8, pre, "ag2", antenna_gain.a2, 0, fb);
+ ENTRY(0x000007f0, s8, pre, "ag3", antenna_gain.a3, 0, fb);
+
+ ENTRY(0x0000070e, u16, pre, "pa1b0", pa1b0, 0, fb);
+ ENTRY(0x0000070e, u16, pre, "pa1b1", pa1b1, 0, fb);
+ ENTRY(0x0000070e, u16, pre, "pa1b2", pa1b2, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1lob0", pa1lob0, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1lob1", pa1lob1, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1lob2", pa1lob2, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1hib0", pa1hib0, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1hib1", pa1hib1, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1hib2", pa1hib2, 0, fb);
+ ENTRY(0x0000070e, u8, pre, "pa1itssit", itssi_a, 0, fb);
+ ENTRY(0x0000070e, u8, pre, "pa1maxpwr", maxpwr_a, 0, fb);
+ ENTRY(0x0000070c, u8, pre, "pa1lomaxpwr", maxpwr_al, 0, fb);
+ ENTRY(0x0000070c, u8, pre, "pa1himaxpwr", maxpwr_ah, 0, fb);
+
+ ENTRY(0x00000708, u8, pre, "bxa2g", bxa2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssisav2g", rssisav2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssismc2g", rssismc2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssismf2g", rssismf2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "bxa5g", bxa5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssisav5g", rssisav5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssismc5g", rssismc5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssismf5g", rssismf5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "tri2g", tri2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "tri5g", tri5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "tri5gl", tri5gl, 0, fb);
+ ENTRY(0x00000708, u8, pre, "tri5gh", tri5gh, 0, fb);
+ ENTRY(0x00000708, s8, pre, "rxpo2g", rxpo2g, 0, fb);
+ ENTRY(0x00000708, s8, pre, "rxpo5g", rxpo5g, 0, fb);
+ ENTRY(0xfffffff0, u8, pre, "txchain", txchain, 0xf, fb);
+ ENTRY(0xfffffff0, u8, pre, "rxchain", rxchain, 0xf, fb);
+ ENTRY(0xfffffff0, u8, pre, "antswitch", antswitch, 0xff, fb);
+ ENTRY(0x00000700, u8, pre, "tssipos2g", fem.ghz2.tssipos, 0, fb);
+ ENTRY(0x00000700, u8, pre, "extpagain2g", fem.ghz2.extpa_gain, 0, fb);
+ ENTRY(0x00000700, u8, pre, "pdetrange2g", fem.ghz2.pdet_range, 0, fb);
+ ENTRY(0x00000700, u8, pre, "triso2g", fem.ghz2.tr_iso, 0, fb);
+ ENTRY(0x00000700, u8, pre, "antswctl2g", fem.ghz2.antswlut, 0, fb);
+ ENTRY(0x00000700, u8, pre, "tssipos5g", fem.ghz5.tssipos, 0, fb);
+ ENTRY(0x00000700, u8, pre, "extpagain5g", fem.ghz5.extpa_gain, 0, fb);
+ ENTRY(0x00000700, u8, pre, "pdetrange5g", fem.ghz5.pdet_range, 0, fb);
+ ENTRY(0x00000700, u8, pre, "triso5g", fem.ghz5.tr_iso, 0, fb);
+ ENTRY(0x00000700, u8, pre, "antswctl5g", fem.ghz5.antswlut, 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid2ga0", txpid2g[0], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid2ga1", txpid2g[1], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid2ga2", txpid2g[2], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid2ga3", txpid2g[3], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5ga0", txpid5g[0], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5ga1", txpid5g[1], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5ga2", txpid5g[2], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5ga3", txpid5g[3], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gla0", txpid5gl[0], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gla1", txpid5gl[1], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gla2", txpid5gl[2], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gla3", txpid5gl[3], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gha0", txpid5gh[0], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gha1", txpid5gh[1], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gha2", txpid5gh[2], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gha3", txpid5gh[3], 0, fb);
+
+ ENTRY(0xffffff00, u8, pre, "tempthresh", tempthresh, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "tempoffset", tempoffset, 0, fb);
+ ENTRY(0xffffff00, u16, pre, "rawtempsense", rawtempsense, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "measpower", measpower, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "tempsense_slope", tempsense_slope, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "tempcorrx", tempcorrx, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "tempsense_option", tempsense_option, 0, fb);
+ ENTRY(0x00000700, u8, pre, "freqoffset_corr", freqoffset_corr, 0, fb);
+ ENTRY(0x00000700, u8, pre, "iqcal_swp_dis", iqcal_swp_dis, 0, fb);
+ ENTRY(0x00000700, u8, pre, "hw_iqcal_en", hw_iqcal_en, 0, fb);
+ ENTRY(0x00000700, u8, pre, "elna2g", elna2g, 0, fb);
+ ENTRY(0x00000700, u8, pre, "elna5g", elna5g, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "phycal_tempdelta", phycal_tempdelta, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "temps_period", temps_period, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "temps_hysteresis", temps_hysteresis, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "measpower1", measpower1, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "measpower2", measpower2, 0, fb);
+
+ ENTRY(0x000001f0, u16, pre, "cck2gpo", cck2gpo, 0, fb);
+ ENTRY(0x000001f0, u32, pre, "ofdm2gpo", ofdm2gpo, 0, fb);
+ ENTRY(0x000001f0, u32, pre, "ofdm5gpo", ofdm5gpo, 0, fb);
+ ENTRY(0x000001f0, u32, pre, "ofdm5glpo", ofdm5glpo, 0, fb);
+ ENTRY(0x000001f0, u32, pre, "ofdm5ghpo", ofdm5ghpo, 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo0", mcs2gpo[0], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo1", mcs2gpo[1], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo2", mcs2gpo[2], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo3", mcs2gpo[3], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo4", mcs2gpo[4], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo5", mcs2gpo[5], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo6", mcs2gpo[6], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo7", mcs2gpo[7], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo0", mcs5gpo[0], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo1", mcs5gpo[1], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo2", mcs5gpo[2], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo3", mcs5gpo[3], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo4", mcs5gpo[4], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo5", mcs5gpo[5], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo6", mcs5gpo[6], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo7", mcs5gpo[7], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo0", mcs5glpo[0], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo1", mcs5glpo[1], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo2", mcs5glpo[2], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo3", mcs5glpo[3], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo4", mcs5glpo[4], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo5", mcs5glpo[5], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo6", mcs5glpo[6], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo7", mcs5glpo[7], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo0", mcs5ghpo[0], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo1", mcs5ghpo[1], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo2", mcs5ghpo[2], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo3", mcs5ghpo[3], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo4", mcs5ghpo[4], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo5", mcs5ghpo[5], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo6", mcs5ghpo[6], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo7", mcs5ghpo[7], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "cddpo", cddpo, 0, fb);
+ ENTRY(0x000001f0, u16, pre, "stbcpo", stbcpo, 0, fb);
+ ENTRY(0x000001f0, u16, pre, "bw40po", bw40po, 0, fb);
+ ENTRY(0x000001f0, u16, pre, "bwduppo", bwduppo, 0, fb);
+
+ ENTRY(0xfffffe00, u16, pre, "cckbw202gpo", cckbw202gpo, 0, fb);
+ ENTRY(0xfffffe00, u16, pre, "cckbw20ul2gpo", cckbw20ul2gpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw202gpo", legofdmbw202gpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw20ul2gpo", legofdmbw20ul2gpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw205glpo", legofdmbw205glpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw20ul5glpo", legofdmbw20ul5glpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw205gmpo", legofdmbw205gmpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw20ul5gmpo", legofdmbw20ul5gmpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw205ghpo", legofdmbw205ghpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw20ul5ghpo", legofdmbw20ul5ghpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw202gpo", mcsbw202gpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "mcsbw20ul2gpo", mcsbw20ul2gpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw402gpo", mcsbw402gpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw205glpo", mcsbw205glpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "mcsbw20ul5glpo", mcsbw20ul5glpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw405glpo", mcsbw405glpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw205gmpo", mcsbw205gmpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "mcsbw20ul5gmpo", mcsbw20ul5gmpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw405gmpo", mcsbw405gmpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw205ghpo", mcsbw205ghpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "mcsbw20ul5ghpo", mcsbw20ul5ghpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw405ghpo", mcsbw405ghpo, 0, fb);
+ ENTRY(0x00000600, u16, pre, "mcs32po", mcs32po, 0, fb);
+ ENTRY(0x00000600, u16, pre, "legofdm40duppo", legofdm40duppo, 0, fb);
+ ENTRY(0x00000700, u8, pre, "pcieingress_war", pcieingress_war, 0, fb);
+
+ /* TODO: rev 11 support */
+ ENTRY(0x00000700, u8, pre, "rxgainerr2ga0", rxgainerr2ga[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr2ga1", rxgainerr2ga[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr2ga2", rxgainerr2ga[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gla0", rxgainerr5gla[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gla1", rxgainerr5gla[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gla2", rxgainerr5gla[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gma0", rxgainerr5gma[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gma1", rxgainerr5gma[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gma2", rxgainerr5gma[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gha0", rxgainerr5gha[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gha1", rxgainerr5gha[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gha2", rxgainerr5gha[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gua0", rxgainerr5gua[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gua1", rxgainerr5gua[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gua2", rxgainerr5gua[2], 0, fb);
+
+ ENTRY(0xfffffe00, u8, pre, "sar2g", sar2g, 0, fb);
+ ENTRY(0xfffffe00, u8, pre, "sar5g", sar5g, 0, fb);
+
+ /* TODO: rev 11 support */
+ ENTRY(0x00000700, u8, pre, "noiselvl2ga0", noiselvl2ga[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl2ga1", noiselvl2ga[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl2ga2", noiselvl2ga[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gla0", noiselvl5gla[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gla1", noiselvl5gla[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gla2", noiselvl5gla[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gma0", noiselvl5gma[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gma1", noiselvl5gma[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gma2", noiselvl5gma[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gha0", noiselvl5gha[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gha1", noiselvl5gha[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gha2", noiselvl5gha[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gua0", noiselvl5gua[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gua1", noiselvl5gua[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gua2", noiselvl5gua[2], 0, fb);
+}
+#undef ENTRY /* It's specififc, uses local variable, don't use it (again). */
+
+static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
+ const char *prefix, bool fallback)
+{
+ char postfix[2];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
+ struct ssb_sprom_core_pwr_info *pwr_info;
+
+ pwr_info = &sprom->core_pwr_info[i];
+
+ snprintf(postfix, sizeof(postfix), "%i", i);
+ nvram_read_u8(prefix, postfix, "maxp2ga",
+ &pwr_info->maxpwr_2g, 0, fallback);
+ nvram_read_u8(prefix, postfix, "itt2ga",
+ &pwr_info->itssi_2g, 0, fallback);
+ nvram_read_u8(prefix, postfix, "itt5ga",
+ &pwr_info->itssi_5g, 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa2gw0a",
+ &pwr_info->pa_2g[0], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa2gw1a",
+ &pwr_info->pa_2g[1], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa2gw2a",
+ &pwr_info->pa_2g[2], 0, fallback);
+ nvram_read_u8(prefix, postfix, "maxp5ga",
+ &pwr_info->maxpwr_5g, 0, fallback);
+ nvram_read_u8(prefix, postfix, "maxp5gha",
+ &pwr_info->maxpwr_5gh, 0, fallback);
+ nvram_read_u8(prefix, postfix, "maxp5gla",
+ &pwr_info->maxpwr_5gl, 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5gw0a",
+ &pwr_info->pa_5g[0], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5gw1a",
+ &pwr_info->pa_5g[1], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5gw2a",
+ &pwr_info->pa_5g[2], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5glw0a",
+ &pwr_info->pa_5gl[0], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5glw1a",
+ &pwr_info->pa_5gl[1], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5glw2a",
+ &pwr_info->pa_5gl[2], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5ghw0a",
+ &pwr_info->pa_5gh[0], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5ghw1a",
+ &pwr_info->pa_5gh[1], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5ghw2a",
+ &pwr_info->pa_5gh[2], 0, fallback);
+ }
+}
+
+static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom,
+ const char *prefix, bool fallback)
+{
+ char postfix[2];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
+ struct ssb_sprom_core_pwr_info *pwr_info;
+
+ pwr_info = &sprom->core_pwr_info[i];
+
+ snprintf(postfix, sizeof(postfix), "%i", i);
+ nvram_read_u16(prefix, postfix, "pa2gw3a",
+ &pwr_info->pa_2g[3], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5gw3a",
+ &pwr_info->pa_5g[3], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5glw3a",
+ &pwr_info->pa_5gl[3], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5ghw3a",
+ &pwr_info->pa_5gh[3], 0, fallback);
+ }
+}
+
+static bool bcm47xx_is_valid_mac(u8 *mac)
+{
+ return mac && !(mac[0] == 0x00 && mac[1] == 0x90 && mac[2] == 0x4c);
+}
+
+static int bcm47xx_increase_mac_addr(u8 *mac, u8 num)
+{
+ u8 *oui = mac + ETH_ALEN/2 - 1;
+ u8 *p = mac + ETH_ALEN - 1;
+
+ do {
+ (*p) += num;
+ if (*p > num)
+ break;
+ p--;
+ num = 1;
+ } while (p != oui);
+
+ if (p == oui) {
+ pr_err("unable to fetch mac address\n");
+ return -ENOENT;
+ }
+ return 0;
+}
+
+static int mac_addr_used = 2;
+
+static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom,
+ const char *prefix, bool fallback)
+{
+ bool fb = fallback;
+
+ nvram_read_macaddr(prefix, "et0macaddr", sprom->et0mac, fallback);
+ nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0,
+ fallback);
+
+ nvram_read_macaddr(prefix, "et1macaddr", sprom->et1mac, fallback);
+ nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0,
+ fallback);
+
+ nvram_read_macaddr(prefix, "et2macaddr", sprom->et2mac, fb);
+ nvram_read_u8(prefix, NULL, "et2mdcport", &sprom->et2mdcport, 0, fb);
+ nvram_read_u8(prefix, NULL, "et2phyaddr", &sprom->et2phyaddr, 0, fb);
+
+ nvram_read_macaddr(prefix, "macaddr", sprom->il0mac, fallback);
+ nvram_read_macaddr(prefix, "il0macaddr", sprom->il0mac, fallback);
+
+ /* The address prefix 00:90:4C is used by Broadcom in their initial
+ * configuration. When a mac address with the prefix 00:90:4C is used
+ * all devices from the same series are sharing the same mac address.
+ * To prevent mac address collisions we replace them with a mac address
+ * based on the base address.
+ */
+ if (!bcm47xx_is_valid_mac(sprom->il0mac)) {
+ u8 mac[6];
+
+ nvram_read_macaddr(NULL, "et0macaddr", mac, false);
+ if (bcm47xx_is_valid_mac(mac)) {
+ int err = bcm47xx_increase_mac_addr(mac, mac_addr_used);
+
+ if (!err) {
+ ether_addr_copy(sprom->il0mac, mac);
+ mac_addr_used++;
+ }
+ }
+ }
+}
+
+static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback)
+{
+ nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
+ &sprom->boardflags_hi, fallback);
+ nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
+ &sprom->boardflags2_hi, fallback);
+}
+
+void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback)
+{
+ bcm47xx_fill_sprom_ethernet(sprom, prefix, fallback);
+ bcm47xx_fill_board_data(sprom, prefix, fallback);
+
+ nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0, fallback);
+
+ /* Entries requiring custom functions */
+ nvram_read_alpha2(prefix, "ccode", sprom->alpha2, fallback);
+ if (sprom->revision >= 3)
+ nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time,
+ &sprom->leddc_off_time, fallback);
+
+ switch (sprom->revision) {
+ case 4:
+ case 5:
+ bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_path_r45(sprom, prefix, fallback);
+ break;
+ case 8:
+ case 9:
+ bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
+ break;
+ }
+
+ bcm47xx_sprom_fill_auto(sprom, prefix, fallback);
+}
+
+#if IS_BUILTIN(CONFIG_SSB) && IS_ENABLED(CONFIG_SSB_SPROM)
+static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
+{
+ char prefix[10];
+
+ switch (bus->bustype) {
+ case SSB_BUSTYPE_SSB:
+ bcm47xx_fill_sprom(out, NULL, false);
+ return 0;
+ case SSB_BUSTYPE_PCI:
+ memset(out, 0, sizeof(struct ssb_sprom));
+ snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
+ bus->host_pci->bus->number + 1,
+ PCI_SLOT(bus->host_pci->devfn));
+ bcm47xx_fill_sprom(out, prefix, false);
+ return 0;
+ default:
+ pr_warn("Unable to fill SPROM for given bustype.\n");
+ return -EINVAL;
+ }
+}
+#endif
+
+#if IS_BUILTIN(CONFIG_BCMA)
+/*
+ * Having many NVRAM entries for PCI devices led to repeating prefixes like
+ * pci/1/1/ all the time and wasting flash space. So at some point Broadcom
+ * decided to introduce prefixes like 0: 1: 2: etc.
+ * If we find e.g. devpath0=pci/2/1 or devpath0=pci/2/1/ we should use 0:
+ * instead of pci/2/1/.
+ */
+static void bcm47xx_sprom_apply_prefix_alias(char *prefix, size_t prefix_size)
+{
+ size_t prefix_len = strlen(prefix);
+ size_t short_len = prefix_len - 1;
+ char nvram_var[10];
+ char buf[20];
+ int i;
+
+ /* Passed prefix has to end with a slash */
+ if (prefix_len <= 0 || prefix[prefix_len - 1] != '/')
+ return;
+
+ for (i = 0; i < 3; i++) {
+ if (snprintf(nvram_var, sizeof(nvram_var), "devpath%d", i) <= 0)
+ continue;
+ if (bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf)) < 0)
+ continue;
+ if (!strcmp(buf, prefix) ||
+ (short_len && strlen(buf) == short_len && !strncmp(buf, prefix, short_len))) {
+ snprintf(prefix, prefix_size, "%d:", i);
+ return;
+ }
+ }
+}
+
+static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
+{
+ struct bcma_boardinfo *binfo = &bus->boardinfo;
+ struct bcma_device *core;
+ char buf[10];
+ char *prefix;
+ bool fallback = false;
+
+ switch (bus->hosttype) {
+ case BCMA_HOSTTYPE_PCI:
+ memset(out, 0, sizeof(struct ssb_sprom));
+ /* On BCM47XX all PCI buses share the same domain */
+ if (IS_ENABLED(CONFIG_BCM47XX))
+ snprintf(buf, sizeof(buf), "pci/%u/%u/",
+ bus->host_pci->bus->number + 1,
+ PCI_SLOT(bus->host_pci->devfn));
+ else
+ snprintf(buf, sizeof(buf), "pci/%u/%u/",
+ pci_domain_nr(bus->host_pci->bus) + 1,
+ bus->host_pci->bus->number);
+ bcm47xx_sprom_apply_prefix_alias(buf, sizeof(buf));
+ prefix = buf;
+ break;
+ case BCMA_HOSTTYPE_SOC:
+ memset(out, 0, sizeof(struct ssb_sprom));
+ core = bcma_find_core(bus, BCMA_CORE_80211);
+ if (core) {
+ snprintf(buf, sizeof(buf), "sb/%u/",
+ core->core_index);
+ prefix = buf;
+ fallback = true;
+ } else {
+ prefix = NULL;
+ }
+ break;
+ default:
+ pr_warn("Unable to fill SPROM for given bustype.\n");
+ return -EINVAL;
+ }
+
+ nvram_read_u16(prefix, NULL, "boardvendor", &binfo->vendor, 0, true);
+ if (!binfo->vendor)
+ binfo->vendor = SSB_BOARDVENDOR_BCM;
+ nvram_read_u16(prefix, NULL, "boardtype", &binfo->type, 0, true);
+
+ bcm47xx_fill_sprom(out, prefix, fallback);
+
+ return 0;
+}
+#endif
+
+static unsigned int bcm47xx_sprom_registered;
+
+/*
+ * On bcm47xx we need to register SPROM fallback handler very early, so we can't
+ * use anything like platform device / driver for this.
+ */
+int bcm47xx_sprom_register_fallbacks(void)
+{
+ if (bcm47xx_sprom_registered)
+ return 0;
+
+#if IS_BUILTIN(CONFIG_SSB) && IS_ENABLED(CONFIG_SSB_SPROM)
+ if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb))
+ pr_warn("Failed to register ssb SPROM handler\n");
+#endif
+
+#if IS_BUILTIN(CONFIG_BCMA)
+ if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma))
+ pr_warn("Failed to register bcma SPROM handler\n");
+#endif
+
+ bcm47xx_sprom_registered = 1;
+
+ return 0;
+}
+
+fs_initcall(bcm47xx_sprom_register_fallbacks);
diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c
new file mode 100644
index 0000000000..40e3183a3d
--- /dev/null
+++ b/drivers/firmware/broadcom/tee_bnxt_fw.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Broadcom.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+
+#include <linux/firmware/broadcom/tee_bnxt_fw.h>
+
+#define MAX_SHM_MEM_SZ SZ_4M
+
+#define MAX_TEE_PARAM_ARRY_MEMB 4
+
+enum ta_cmd {
+ /*
+ * TA_CMD_BNXT_FASTBOOT - boot bnxt device by copying f/w into sram
+ *
+ * param[0] unused
+ * param[1] unused
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_ITEM_NOT_FOUND - Corrupt f/w image found on memory
+ */
+ TA_CMD_BNXT_FASTBOOT = 0,
+
+ /*
+ * TA_CMD_BNXT_COPY_COREDUMP - copy the core dump into shm
+ *
+ * param[0] (inout memref) - Coredump buffer memory reference
+ * param[1] (in value) - value.a: offset, data to be copied from
+ * value.b: size of data to be copied
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ * TEE_ERROR_ITEM_NOT_FOUND - Corrupt core dump
+ */
+ TA_CMD_BNXT_COPY_COREDUMP = 3,
+};
+
+/**
+ * struct tee_bnxt_fw_private - OP-TEE bnxt private data
+ * @dev: OP-TEE based bnxt device.
+ * @ctx: OP-TEE context handler.
+ * @session_id: TA session identifier.
+ */
+struct tee_bnxt_fw_private {
+ struct device *dev;
+ struct tee_context *ctx;
+ u32 session_id;
+ struct tee_shm *fw_shm_pool;
+};
+
+static struct tee_bnxt_fw_private pvt_data;
+
+static void prepare_args(int cmd,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ memset(arg, 0, sizeof(*arg));
+ memset(param, 0, MAX_TEE_PARAM_ARRY_MEMB * sizeof(*param));
+
+ arg->func = cmd;
+ arg->session = pvt_data.session_id;
+ arg->num_params = MAX_TEE_PARAM_ARRY_MEMB;
+
+ /* Fill invoke cmd params */
+ switch (cmd) {
+ case TA_CMD_BNXT_COPY_COREDUMP:
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
+ param[0].u.memref.shm = pvt_data.fw_shm_pool;
+ param[0].u.memref.size = MAX_SHM_MEM_SZ;
+ param[0].u.memref.shm_offs = 0;
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ break;
+ case TA_CMD_BNXT_FASTBOOT:
+ default:
+ /* Nothing to do */
+ break;
+ }
+}
+
+/**
+ * tee_bnxt_fw_load() - Load the bnxt firmware
+ * Uses an OP-TEE call to start a secure
+ * boot process.
+ * Returns 0 on success, negative errno otherwise.
+ */
+int tee_bnxt_fw_load(void)
+{
+ int ret = 0;
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_param param[MAX_TEE_PARAM_ARRY_MEMB];
+
+ if (!pvt_data.ctx)
+ return -ENODEV;
+
+ prepare_args(TA_CMD_BNXT_FASTBOOT, &arg, param);
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ dev_err(pvt_data.dev,
+ "TA_CMD_BNXT_FASTBOOT invoke failed TEE err: %x, ret:%x\n",
+ arg.ret, ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(tee_bnxt_fw_load);
+
+/**
+ * tee_bnxt_copy_coredump() - Copy coredump from the allocated memory
+ * Uses an OP-TEE call to copy coredump
+ * @buf: destination buffer where core dump is copied into
+ * @offset: offset from the base address of core dump area
+ * @size: size of the dump
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int tee_bnxt_copy_coredump(void *buf, u32 offset, u32 size)
+{
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_param param[MAX_TEE_PARAM_ARRY_MEMB];
+ void *core_data;
+ u32 rbytes = size;
+ u32 nbytes = 0;
+ int ret = 0;
+
+ if (!pvt_data.ctx)
+ return -ENODEV;
+
+ prepare_args(TA_CMD_BNXT_COPY_COREDUMP, &arg, param);
+
+ while (rbytes) {
+ nbytes = rbytes;
+
+ nbytes = min_t(u32, rbytes, param[0].u.memref.size);
+
+ /* Fill additional invoke cmd params */
+ param[1].u.value.a = offset;
+ param[1].u.value.b = nbytes;
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ dev_err(pvt_data.dev,
+ "TA_CMD_BNXT_COPY_COREDUMP invoke failed TEE err: %x, ret:%x\n",
+ arg.ret, ret);
+ return -EINVAL;
+ }
+
+ core_data = tee_shm_get_va(pvt_data.fw_shm_pool, 0);
+ if (IS_ERR(core_data)) {
+ dev_err(pvt_data.dev, "tee_shm_get_va failed\n");
+ return PTR_ERR(core_data);
+ }
+
+ memcpy(buf, core_data, nbytes);
+
+ rbytes -= nbytes;
+ buf += nbytes;
+ offset += nbytes;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(tee_bnxt_copy_coredump);
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ return (ver->impl_id == TEE_IMPL_ID_OPTEE);
+}
+
+static int tee_bnxt_fw_probe(struct device *dev)
+{
+ struct tee_client_device *bnxt_device = to_tee_client_device(dev);
+ int ret, err = -ENODEV;
+ struct tee_ioctl_open_session_arg sess_arg;
+ struct tee_shm *fw_shm_pool;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ /* Open context with TEE driver */
+ pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL,
+ NULL);
+ if (IS_ERR(pvt_data.ctx))
+ return -ENODEV;
+
+ /* Open session with Bnxt load Trusted App */
+ export_uuid(sess_arg.uuid, &bnxt_device->id.uuid);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL);
+ if (ret < 0 || sess_arg.ret != 0) {
+ dev_err(dev, "tee_client_open_session failed, err: %x\n",
+ sess_arg.ret);
+ err = -EINVAL;
+ goto out_ctx;
+ }
+ pvt_data.session_id = sess_arg.session;
+
+ pvt_data.dev = dev;
+
+ fw_shm_pool = tee_shm_alloc_kernel_buf(pvt_data.ctx, MAX_SHM_MEM_SZ);
+ if (IS_ERR(fw_shm_pool)) {
+ dev_err(pvt_data.dev, "tee_shm_alloc_kernel_buf failed\n");
+ err = PTR_ERR(fw_shm_pool);
+ goto out_sess;
+ }
+
+ pvt_data.fw_shm_pool = fw_shm_pool;
+
+ return 0;
+
+out_sess:
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+out_ctx:
+ tee_client_close_context(pvt_data.ctx);
+
+ return err;
+}
+
+static int tee_bnxt_fw_remove(struct device *dev)
+{
+ tee_shm_free(pvt_data.fw_shm_pool);
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+ tee_client_close_context(pvt_data.ctx);
+ pvt_data.ctx = NULL;
+
+ return 0;
+}
+
+static void tee_bnxt_fw_shutdown(struct device *dev)
+{
+ tee_shm_free(pvt_data.fw_shm_pool);
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+ tee_client_close_context(pvt_data.ctx);
+ pvt_data.ctx = NULL;
+}
+
+static const struct tee_client_device_id tee_bnxt_fw_id_table[] = {
+ {UUID_INIT(0x6272636D, 0x2019, 0x0716,
+ 0x42, 0x43, 0x4D, 0x5F, 0x53, 0x43, 0x48, 0x49)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(tee, tee_bnxt_fw_id_table);
+
+static struct tee_client_driver tee_bnxt_fw_driver = {
+ .id_table = tee_bnxt_fw_id_table,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .bus = &tee_bus_type,
+ .probe = tee_bnxt_fw_probe,
+ .remove = tee_bnxt_fw_remove,
+ .shutdown = tee_bnxt_fw_shutdown,
+ },
+};
+
+static int __init tee_bnxt_fw_mod_init(void)
+{
+ return driver_register(&tee_bnxt_fw_driver.driver);
+}
+
+static void __exit tee_bnxt_fw_mod_exit(void)
+{
+ driver_unregister(&tee_bnxt_fw_driver.driver);
+}
+
+module_init(tee_bnxt_fw_mod_init);
+module_exit(tee_bnxt_fw_mod_exit);
+
+MODULE_AUTHOR("Vikas Gupta <vikas.gupta@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom bnxt firmware manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/cirrus/Kconfig b/drivers/firmware/cirrus/Kconfig
new file mode 100644
index 0000000000..3ccbe14e4b
--- /dev/null
+++ b/drivers/firmware/cirrus/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config FW_CS_DSP
+ tristate
+ default n
diff --git a/drivers/firmware/cirrus/Makefile b/drivers/firmware/cirrus/Makefile
new file mode 100644
index 0000000000..b91318ca0f
--- /dev/null
+++ b/drivers/firmware/cirrus/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+obj-$(CONFIG_FW_CS_DSP) += cs_dsp.o
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
new file mode 100644
index 0000000000..79d4254d1f
--- /dev/null
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -0,0 +1,3344 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * cs_dsp.c -- Cirrus Logic DSP firmware support
+ *
+ * Based on sound/soc/codecs/wm_adsp.c
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ * Copyright (C) 2015-2021 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/wmfw.h>
+
+#define cs_dsp_err(_dsp, fmt, ...) \
+ dev_err(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
+#define cs_dsp_warn(_dsp, fmt, ...) \
+ dev_warn(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
+#define cs_dsp_info(_dsp, fmt, ...) \
+ dev_info(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
+#define cs_dsp_dbg(_dsp, fmt, ...) \
+ dev_dbg(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
+
+#define ADSP1_CONTROL_1 0x00
+#define ADSP1_CONTROL_2 0x02
+#define ADSP1_CONTROL_3 0x03
+#define ADSP1_CONTROL_4 0x04
+#define ADSP1_CONTROL_5 0x06
+#define ADSP1_CONTROL_6 0x07
+#define ADSP1_CONTROL_7 0x08
+#define ADSP1_CONTROL_8 0x09
+#define ADSP1_CONTROL_9 0x0A
+#define ADSP1_CONTROL_10 0x0B
+#define ADSP1_CONTROL_11 0x0C
+#define ADSP1_CONTROL_12 0x0D
+#define ADSP1_CONTROL_13 0x0F
+#define ADSP1_CONTROL_14 0x10
+#define ADSP1_CONTROL_15 0x11
+#define ADSP1_CONTROL_16 0x12
+#define ADSP1_CONTROL_17 0x13
+#define ADSP1_CONTROL_18 0x14
+#define ADSP1_CONTROL_19 0x16
+#define ADSP1_CONTROL_20 0x17
+#define ADSP1_CONTROL_21 0x18
+#define ADSP1_CONTROL_22 0x1A
+#define ADSP1_CONTROL_23 0x1B
+#define ADSP1_CONTROL_24 0x1C
+#define ADSP1_CONTROL_25 0x1E
+#define ADSP1_CONTROL_26 0x20
+#define ADSP1_CONTROL_27 0x21
+#define ADSP1_CONTROL_28 0x22
+#define ADSP1_CONTROL_29 0x23
+#define ADSP1_CONTROL_30 0x24
+#define ADSP1_CONTROL_31 0x26
+
+/*
+ * ADSP1 Control 19
+ */
+#define ADSP1_WDMA_BUFFER_LENGTH_MASK 0x00FF /* DSP1_WDMA_BUFFER_LENGTH - [7:0] */
+#define ADSP1_WDMA_BUFFER_LENGTH_SHIFT 0 /* DSP1_WDMA_BUFFER_LENGTH - [7:0] */
+#define ADSP1_WDMA_BUFFER_LENGTH_WIDTH 8 /* DSP1_WDMA_BUFFER_LENGTH - [7:0] */
+
+/*
+ * ADSP1 Control 30
+ */
+#define ADSP1_DBG_CLK_ENA 0x0008 /* DSP1_DBG_CLK_ENA */
+#define ADSP1_DBG_CLK_ENA_MASK 0x0008 /* DSP1_DBG_CLK_ENA */
+#define ADSP1_DBG_CLK_ENA_SHIFT 3 /* DSP1_DBG_CLK_ENA */
+#define ADSP1_DBG_CLK_ENA_WIDTH 1 /* DSP1_DBG_CLK_ENA */
+#define ADSP1_SYS_ENA 0x0004 /* DSP1_SYS_ENA */
+#define ADSP1_SYS_ENA_MASK 0x0004 /* DSP1_SYS_ENA */
+#define ADSP1_SYS_ENA_SHIFT 2 /* DSP1_SYS_ENA */
+#define ADSP1_SYS_ENA_WIDTH 1 /* DSP1_SYS_ENA */
+#define ADSP1_CORE_ENA 0x0002 /* DSP1_CORE_ENA */
+#define ADSP1_CORE_ENA_MASK 0x0002 /* DSP1_CORE_ENA */
+#define ADSP1_CORE_ENA_SHIFT 1 /* DSP1_CORE_ENA */
+#define ADSP1_CORE_ENA_WIDTH 1 /* DSP1_CORE_ENA */
+#define ADSP1_START 0x0001 /* DSP1_START */
+#define ADSP1_START_MASK 0x0001 /* DSP1_START */
+#define ADSP1_START_SHIFT 0 /* DSP1_START */
+#define ADSP1_START_WIDTH 1 /* DSP1_START */
+
+/*
+ * ADSP1 Control 31
+ */
+#define ADSP1_CLK_SEL_MASK 0x0007 /* CLK_SEL_ENA */
+#define ADSP1_CLK_SEL_SHIFT 0 /* CLK_SEL_ENA */
+#define ADSP1_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */
+
+#define ADSP2_CONTROL 0x0
+#define ADSP2_CLOCKING 0x1
+#define ADSP2V2_CLOCKING 0x2
+#define ADSP2_STATUS1 0x4
+#define ADSP2_WDMA_CONFIG_1 0x30
+#define ADSP2_WDMA_CONFIG_2 0x31
+#define ADSP2V2_WDMA_CONFIG_2 0x32
+#define ADSP2_RDMA_CONFIG_1 0x34
+
+#define ADSP2_SCRATCH0 0x40
+#define ADSP2_SCRATCH1 0x41
+#define ADSP2_SCRATCH2 0x42
+#define ADSP2_SCRATCH3 0x43
+
+#define ADSP2V2_SCRATCH0_1 0x40
+#define ADSP2V2_SCRATCH2_3 0x42
+
+/*
+ * ADSP2 Control
+ */
+#define ADSP2_MEM_ENA 0x0010 /* DSP1_MEM_ENA */
+#define ADSP2_MEM_ENA_MASK 0x0010 /* DSP1_MEM_ENA */
+#define ADSP2_MEM_ENA_SHIFT 4 /* DSP1_MEM_ENA */
+#define ADSP2_MEM_ENA_WIDTH 1 /* DSP1_MEM_ENA */
+#define ADSP2_SYS_ENA 0x0004 /* DSP1_SYS_ENA */
+#define ADSP2_SYS_ENA_MASK 0x0004 /* DSP1_SYS_ENA */
+#define ADSP2_SYS_ENA_SHIFT 2 /* DSP1_SYS_ENA */
+#define ADSP2_SYS_ENA_WIDTH 1 /* DSP1_SYS_ENA */
+#define ADSP2_CORE_ENA 0x0002 /* DSP1_CORE_ENA */
+#define ADSP2_CORE_ENA_MASK 0x0002 /* DSP1_CORE_ENA */
+#define ADSP2_CORE_ENA_SHIFT 1 /* DSP1_CORE_ENA */
+#define ADSP2_CORE_ENA_WIDTH 1 /* DSP1_CORE_ENA */
+#define ADSP2_START 0x0001 /* DSP1_START */
+#define ADSP2_START_MASK 0x0001 /* DSP1_START */
+#define ADSP2_START_SHIFT 0 /* DSP1_START */
+#define ADSP2_START_WIDTH 1 /* DSP1_START */
+
+/*
+ * ADSP2 clocking
+ */
+#define ADSP2_CLK_SEL_MASK 0x0007 /* CLK_SEL_ENA */
+#define ADSP2_CLK_SEL_SHIFT 0 /* CLK_SEL_ENA */
+#define ADSP2_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */
+
+/*
+ * ADSP2V2 clocking
+ */
+#define ADSP2V2_CLK_SEL_MASK 0x70000 /* CLK_SEL_ENA */
+#define ADSP2V2_CLK_SEL_SHIFT 16 /* CLK_SEL_ENA */
+#define ADSP2V2_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */
+
+#define ADSP2V2_RATE_MASK 0x7800 /* DSP_RATE */
+#define ADSP2V2_RATE_SHIFT 11 /* DSP_RATE */
+#define ADSP2V2_RATE_WIDTH 4 /* DSP_RATE */
+
+/*
+ * ADSP2 Status 1
+ */
+#define ADSP2_RAM_RDY 0x0001
+#define ADSP2_RAM_RDY_MASK 0x0001
+#define ADSP2_RAM_RDY_SHIFT 0
+#define ADSP2_RAM_RDY_WIDTH 1
+
+/*
+ * ADSP2 Lock support
+ */
+#define ADSP2_LOCK_CODE_0 0x5555
+#define ADSP2_LOCK_CODE_1 0xAAAA
+
+#define ADSP2_WATCHDOG 0x0A
+#define ADSP2_BUS_ERR_ADDR 0x52
+#define ADSP2_REGION_LOCK_STATUS 0x64
+#define ADSP2_LOCK_REGION_1_LOCK_REGION_0 0x66
+#define ADSP2_LOCK_REGION_3_LOCK_REGION_2 0x68
+#define ADSP2_LOCK_REGION_5_LOCK_REGION_4 0x6A
+#define ADSP2_LOCK_REGION_7_LOCK_REGION_6 0x6C
+#define ADSP2_LOCK_REGION_9_LOCK_REGION_8 0x6E
+#define ADSP2_LOCK_REGION_CTRL 0x7A
+#define ADSP2_PMEM_ERR_ADDR_XMEM_ERR_ADDR 0x7C
+
+#define ADSP2_REGION_LOCK_ERR_MASK 0x8000
+#define ADSP2_ADDR_ERR_MASK 0x4000
+#define ADSP2_WDT_TIMEOUT_STS_MASK 0x2000
+#define ADSP2_CTRL_ERR_PAUSE_ENA 0x0002
+#define ADSP2_CTRL_ERR_EINT 0x0001
+
+#define ADSP2_BUS_ERR_ADDR_MASK 0x00FFFFFF
+#define ADSP2_XMEM_ERR_ADDR_MASK 0x0000FFFF
+#define ADSP2_PMEM_ERR_ADDR_MASK 0x7FFF0000
+#define ADSP2_PMEM_ERR_ADDR_SHIFT 16
+#define ADSP2_WDT_ENA_MASK 0xFFFFFFFD
+
+#define ADSP2_LOCK_REGION_SHIFT 16
+
+/*
+ * Event control messages
+ */
+#define CS_DSP_FW_EVENT_SHUTDOWN 0x000001
+
+/*
+ * HALO system info
+ */
+#define HALO_AHBM_WINDOW_DEBUG_0 0x02040
+#define HALO_AHBM_WINDOW_DEBUG_1 0x02044
+
+/*
+ * HALO core
+ */
+#define HALO_SCRATCH1 0x005c0
+#define HALO_SCRATCH2 0x005c8
+#define HALO_SCRATCH3 0x005d0
+#define HALO_SCRATCH4 0x005d8
+#define HALO_CCM_CORE_CONTROL 0x41000
+#define HALO_CORE_SOFT_RESET 0x00010
+#define HALO_WDT_CONTROL 0x47000
+
+/*
+ * HALO MPU banks
+ */
+#define HALO_MPU_XMEM_ACCESS_0 0x43000
+#define HALO_MPU_YMEM_ACCESS_0 0x43004
+#define HALO_MPU_WINDOW_ACCESS_0 0x43008
+#define HALO_MPU_XREG_ACCESS_0 0x4300C
+#define HALO_MPU_YREG_ACCESS_0 0x43014
+#define HALO_MPU_XMEM_ACCESS_1 0x43018
+#define HALO_MPU_YMEM_ACCESS_1 0x4301C
+#define HALO_MPU_WINDOW_ACCESS_1 0x43020
+#define HALO_MPU_XREG_ACCESS_1 0x43024
+#define HALO_MPU_YREG_ACCESS_1 0x4302C
+#define HALO_MPU_XMEM_ACCESS_2 0x43030
+#define HALO_MPU_YMEM_ACCESS_2 0x43034
+#define HALO_MPU_WINDOW_ACCESS_2 0x43038
+#define HALO_MPU_XREG_ACCESS_2 0x4303C
+#define HALO_MPU_YREG_ACCESS_2 0x43044
+#define HALO_MPU_XMEM_ACCESS_3 0x43048
+#define HALO_MPU_YMEM_ACCESS_3 0x4304C
+#define HALO_MPU_WINDOW_ACCESS_3 0x43050
+#define HALO_MPU_XREG_ACCESS_3 0x43054
+#define HALO_MPU_YREG_ACCESS_3 0x4305C
+#define HALO_MPU_XM_VIO_ADDR 0x43100
+#define HALO_MPU_XM_VIO_STATUS 0x43104
+#define HALO_MPU_YM_VIO_ADDR 0x43108
+#define HALO_MPU_YM_VIO_STATUS 0x4310C
+#define HALO_MPU_PM_VIO_ADDR 0x43110
+#define HALO_MPU_PM_VIO_STATUS 0x43114
+#define HALO_MPU_LOCK_CONFIG 0x43140
+
+/*
+ * HALO_AHBM_WINDOW_DEBUG_1
+ */
+#define HALO_AHBM_CORE_ERR_ADDR_MASK 0x0fffff00
+#define HALO_AHBM_CORE_ERR_ADDR_SHIFT 8
+#define HALO_AHBM_FLAGS_ERR_MASK 0x000000ff
+
+/*
+ * HALO_CCM_CORE_CONTROL
+ */
+#define HALO_CORE_RESET 0x00000200
+#define HALO_CORE_EN 0x00000001
+
+/*
+ * HALO_CORE_SOFT_RESET
+ */
+#define HALO_CORE_SOFT_RESET_MASK 0x00000001
+
+/*
+ * HALO_WDT_CONTROL
+ */
+#define HALO_WDT_EN_MASK 0x00000001
+
+/*
+ * HALO_MPU_?M_VIO_STATUS
+ */
+#define HALO_MPU_VIO_STS_MASK 0x007e0000
+#define HALO_MPU_VIO_STS_SHIFT 17
+#define HALO_MPU_VIO_ERR_WR_MASK 0x00008000
+#define HALO_MPU_VIO_ERR_SRC_MASK 0x00007fff
+#define HALO_MPU_VIO_ERR_SRC_SHIFT 0
+
+struct cs_dsp_ops {
+ bool (*validate_version)(struct cs_dsp *dsp, unsigned int version);
+ unsigned int (*parse_sizes)(struct cs_dsp *dsp,
+ const char * const file,
+ unsigned int pos,
+ const struct firmware *firmware);
+ int (*setup_algs)(struct cs_dsp *dsp);
+ unsigned int (*region_to_reg)(struct cs_dsp_region const *mem,
+ unsigned int offset);
+
+ void (*show_fw_status)(struct cs_dsp *dsp);
+ void (*stop_watchdog)(struct cs_dsp *dsp);
+
+ int (*enable_memory)(struct cs_dsp *dsp);
+ void (*disable_memory)(struct cs_dsp *dsp);
+ int (*lock_memory)(struct cs_dsp *dsp, unsigned int lock_regions);
+
+ int (*enable_core)(struct cs_dsp *dsp);
+ void (*disable_core)(struct cs_dsp *dsp);
+
+ int (*start_core)(struct cs_dsp *dsp);
+ void (*stop_core)(struct cs_dsp *dsp);
+};
+
+static const struct cs_dsp_ops cs_dsp_adsp1_ops;
+static const struct cs_dsp_ops cs_dsp_adsp2_ops[];
+static const struct cs_dsp_ops cs_dsp_halo_ops;
+static const struct cs_dsp_ops cs_dsp_halo_ao_ops;
+
+struct cs_dsp_buf {
+ struct list_head list;
+ void *buf;
+};
+
+static struct cs_dsp_buf *cs_dsp_buf_alloc(const void *src, size_t len,
+ struct list_head *list)
+{
+ struct cs_dsp_buf *buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+
+ if (buf == NULL)
+ return NULL;
+
+ buf->buf = vmalloc(len);
+ if (!buf->buf) {
+ kfree(buf);
+ return NULL;
+ }
+ memcpy(buf->buf, src, len);
+
+ if (list)
+ list_add_tail(&buf->list, list);
+
+ return buf;
+}
+
+static void cs_dsp_buf_free(struct list_head *list)
+{
+ while (!list_empty(list)) {
+ struct cs_dsp_buf *buf = list_first_entry(list,
+ struct cs_dsp_buf,
+ list);
+ list_del(&buf->list);
+ vfree(buf->buf);
+ kfree(buf);
+ }
+}
+
+/**
+ * cs_dsp_mem_region_name() - Return a name string for a memory type
+ * @type: the memory type to match
+ *
+ * Return: A const string identifying the memory region.
+ */
+const char *cs_dsp_mem_region_name(unsigned int type)
+{
+ switch (type) {
+ case WMFW_ADSP1_PM:
+ return "PM";
+ case WMFW_HALO_PM_PACKED:
+ return "PM_PACKED";
+ case WMFW_ADSP1_DM:
+ return "DM";
+ case WMFW_ADSP2_XM:
+ return "XM";
+ case WMFW_HALO_XM_PACKED:
+ return "XM_PACKED";
+ case WMFW_ADSP2_YM:
+ return "YM";
+ case WMFW_HALO_YM_PACKED:
+ return "YM_PACKED";
+ case WMFW_ADSP1_ZM:
+ return "ZM";
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mem_region_name, FW_CS_DSP);
+
+#ifdef CONFIG_DEBUG_FS
+static void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp, const char *s)
+{
+ char *tmp = kasprintf(GFP_KERNEL, "%s\n", s);
+
+ kfree(dsp->wmfw_file_name);
+ dsp->wmfw_file_name = tmp;
+}
+
+static void cs_dsp_debugfs_save_binname(struct cs_dsp *dsp, const char *s)
+{
+ char *tmp = kasprintf(GFP_KERNEL, "%s\n", s);
+
+ kfree(dsp->bin_file_name);
+ dsp->bin_file_name = tmp;
+}
+
+static void cs_dsp_debugfs_clear(struct cs_dsp *dsp)
+{
+ kfree(dsp->wmfw_file_name);
+ kfree(dsp->bin_file_name);
+ dsp->wmfw_file_name = NULL;
+ dsp->bin_file_name = NULL;
+}
+
+static ssize_t cs_dsp_debugfs_wmfw_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct cs_dsp *dsp = file->private_data;
+ ssize_t ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ if (!dsp->wmfw_file_name || !dsp->booted)
+ ret = 0;
+ else
+ ret = simple_read_from_buffer(user_buf, count, ppos,
+ dsp->wmfw_file_name,
+ strlen(dsp->wmfw_file_name));
+
+ mutex_unlock(&dsp->pwr_lock);
+ return ret;
+}
+
+static ssize_t cs_dsp_debugfs_bin_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct cs_dsp *dsp = file->private_data;
+ ssize_t ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ if (!dsp->bin_file_name || !dsp->booted)
+ ret = 0;
+ else
+ ret = simple_read_from_buffer(user_buf, count, ppos,
+ dsp->bin_file_name,
+ strlen(dsp->bin_file_name));
+
+ mutex_unlock(&dsp->pwr_lock);
+ return ret;
+}
+
+static const struct {
+ const char *name;
+ const struct file_operations fops;
+} cs_dsp_debugfs_fops[] = {
+ {
+ .name = "wmfw_file_name",
+ .fops = {
+ .open = simple_open,
+ .read = cs_dsp_debugfs_wmfw_read,
+ },
+ },
+ {
+ .name = "bin_file_name",
+ .fops = {
+ .open = simple_open,
+ .read = cs_dsp_debugfs_bin_read,
+ },
+ },
+};
+
+static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg,
+ unsigned int off);
+
+static int cs_dsp_debugfs_read_controls_show(struct seq_file *s, void *ignored)
+{
+ struct cs_dsp *dsp = s->private;
+ struct cs_dsp_coeff_ctl *ctl;
+ unsigned int reg;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ cs_dsp_coeff_base_reg(ctl, &reg, 0);
+ seq_printf(s, "%22.*s: %#8zx %s:%08x %#8x %s %#8x %#4x %c%c%c%c %s %s\n",
+ ctl->subname_len, ctl->subname, ctl->len,
+ cs_dsp_mem_region_name(ctl->alg_region.type),
+ ctl->offset, reg, ctl->fw_name, ctl->alg_region.alg, ctl->type,
+ ctl->flags & WMFW_CTL_FLAG_VOLATILE ? 'V' : '-',
+ ctl->flags & WMFW_CTL_FLAG_SYS ? 'S' : '-',
+ ctl->flags & WMFW_CTL_FLAG_READABLE ? 'R' : '-',
+ ctl->flags & WMFW_CTL_FLAG_WRITEABLE ? 'W' : '-',
+ ctl->enabled ? "enabled" : "disabled",
+ ctl->set ? "dirty" : "clean");
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(cs_dsp_debugfs_read_controls);
+
+/**
+ * cs_dsp_init_debugfs() - Create and populate DSP representation in debugfs
+ * @dsp: pointer to DSP structure
+ * @debugfs_root: pointer to debugfs directory in which to create this DSP
+ * representation
+ */
+void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root)
+{
+ struct dentry *root = NULL;
+ int i;
+
+ root = debugfs_create_dir(dsp->name, debugfs_root);
+
+ debugfs_create_bool("booted", 0444, root, &dsp->booted);
+ debugfs_create_bool("running", 0444, root, &dsp->running);
+ debugfs_create_x32("fw_id", 0444, root, &dsp->fw_id);
+ debugfs_create_x32("fw_version", 0444, root, &dsp->fw_id_version);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_debugfs_fops); ++i)
+ debugfs_create_file(cs_dsp_debugfs_fops[i].name, 0444, root,
+ dsp, &cs_dsp_debugfs_fops[i].fops);
+
+ debugfs_create_file("controls", 0444, root, dsp,
+ &cs_dsp_debugfs_read_controls_fops);
+
+ dsp->debugfs_root = root;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, FW_CS_DSP);
+
+/**
+ * cs_dsp_cleanup_debugfs() - Removes DSP representation from debugfs
+ * @dsp: pointer to DSP structure
+ */
+void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
+{
+ cs_dsp_debugfs_clear(dsp);
+ debugfs_remove_recursive(dsp->debugfs_root);
+ dsp->debugfs_root = NULL;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP);
+#else
+void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root)
+{
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, FW_CS_DSP);
+
+void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
+{
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP);
+
+static inline void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp,
+ const char *s)
+{
+}
+
+static inline void cs_dsp_debugfs_save_binname(struct cs_dsp *dsp,
+ const char *s)
+{
+}
+
+static inline void cs_dsp_debugfs_clear(struct cs_dsp *dsp)
+{
+}
+#endif
+
+static const struct cs_dsp_region *cs_dsp_find_region(struct cs_dsp *dsp,
+ int type)
+{
+ int i;
+
+ for (i = 0; i < dsp->num_mems; i++)
+ if (dsp->mem[i].type == type)
+ return &dsp->mem[i];
+
+ return NULL;
+}
+
+static unsigned int cs_dsp_region_to_reg(struct cs_dsp_region const *mem,
+ unsigned int offset)
+{
+ switch (mem->type) {
+ case WMFW_ADSP1_PM:
+ return mem->base + (offset * 3);
+ case WMFW_ADSP1_DM:
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP1_ZM:
+ return mem->base + (offset * 2);
+ default:
+ WARN(1, "Unknown memory region type");
+ return offset;
+ }
+}
+
+static unsigned int cs_dsp_halo_region_to_reg(struct cs_dsp_region const *mem,
+ unsigned int offset)
+{
+ switch (mem->type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ return mem->base + (offset * 4);
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ return (mem->base + (offset * 3)) & ~0x3;
+ case WMFW_HALO_PM_PACKED:
+ return mem->base + (offset * 5);
+ default:
+ WARN(1, "Unknown memory region type");
+ return offset;
+ }
+}
+
+static void cs_dsp_read_fw_status(struct cs_dsp *dsp,
+ int noffs, unsigned int *offs)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < noffs; ++i) {
+ ret = regmap_read(dsp->regmap, dsp->base + offs[i], &offs[i]);
+ if (ret) {
+ cs_dsp_err(dsp, "Failed to read SCRATCH%u: %d\n", i, ret);
+ return;
+ }
+ }
+}
+
+static void cs_dsp_adsp2_show_fw_status(struct cs_dsp *dsp)
+{
+ unsigned int offs[] = {
+ ADSP2_SCRATCH0, ADSP2_SCRATCH1, ADSP2_SCRATCH2, ADSP2_SCRATCH3,
+ };
+
+ cs_dsp_read_fw_status(dsp, ARRAY_SIZE(offs), offs);
+
+ cs_dsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
+ offs[0], offs[1], offs[2], offs[3]);
+}
+
+static void cs_dsp_adsp2v2_show_fw_status(struct cs_dsp *dsp)
+{
+ unsigned int offs[] = { ADSP2V2_SCRATCH0_1, ADSP2V2_SCRATCH2_3 };
+
+ cs_dsp_read_fw_status(dsp, ARRAY_SIZE(offs), offs);
+
+ cs_dsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
+ offs[0] & 0xFFFF, offs[0] >> 16,
+ offs[1] & 0xFFFF, offs[1] >> 16);
+}
+
+static void cs_dsp_halo_show_fw_status(struct cs_dsp *dsp)
+{
+ unsigned int offs[] = {
+ HALO_SCRATCH1, HALO_SCRATCH2, HALO_SCRATCH3, HALO_SCRATCH4,
+ };
+
+ cs_dsp_read_fw_status(dsp, ARRAY_SIZE(offs), offs);
+
+ cs_dsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
+ offs[0], offs[1], offs[2], offs[3]);
+}
+
+static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg,
+ unsigned int off)
+{
+ const struct cs_dsp_alg_region *alg_region = &ctl->alg_region;
+ struct cs_dsp *dsp = ctl->dsp;
+ const struct cs_dsp_region *mem;
+
+ mem = cs_dsp_find_region(dsp, alg_region->type);
+ if (!mem) {
+ cs_dsp_err(dsp, "No base for region %x\n",
+ alg_region->type);
+ return -EINVAL;
+ }
+
+ *reg = dsp->ops->region_to_reg(mem, ctl->alg_region.base + ctl->offset + off);
+
+ return 0;
+}
+
+/**
+ * cs_dsp_coeff_write_acked_control() - Sends event_id to the acked control
+ * @ctl: pointer to acked coefficient control
+ * @event_id: the value to write to the given acked control
+ *
+ * Once the value has been written to the control the function shall block
+ * until the running firmware acknowledges the write or timeout is exceeded.
+ *
+ * Must be called with pwr_lock held.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int event_id)
+{
+ struct cs_dsp *dsp = ctl->dsp;
+ __be32 val = cpu_to_be32(event_id);
+ unsigned int reg;
+ int i, ret;
+
+ lockdep_assert_held(&dsp->pwr_lock);
+
+ if (!dsp->running)
+ return -EPERM;
+
+ ret = cs_dsp_coeff_base_reg(ctl, &reg, 0);
+ if (ret)
+ return ret;
+
+ cs_dsp_dbg(dsp, "Sending 0x%x to acked control alg 0x%x %s:0x%x\n",
+ event_id, ctl->alg_region.alg,
+ cs_dsp_mem_region_name(ctl->alg_region.type), ctl->offset);
+
+ ret = regmap_raw_write(dsp->regmap, reg, &val, sizeof(val));
+ if (ret) {
+ cs_dsp_err(dsp, "Failed to write %x: %d\n", reg, ret);
+ return ret;
+ }
+
+ /*
+ * Poll for ack, we initially poll at ~1ms intervals for firmwares
+ * that respond quickly, then go to ~10ms polls. A firmware is unlikely
+ * to ack instantly so we do the first 1ms delay before reading the
+ * control to avoid a pointless bus transaction
+ */
+ for (i = 0; i < CS_DSP_ACKED_CTL_TIMEOUT_MS;) {
+ switch (i) {
+ case 0 ... CS_DSP_ACKED_CTL_N_QUICKPOLLS - 1:
+ usleep_range(1000, 2000);
+ i++;
+ break;
+ default:
+ usleep_range(10000, 20000);
+ i += 10;
+ break;
+ }
+
+ ret = regmap_raw_read(dsp->regmap, reg, &val, sizeof(val));
+ if (ret) {
+ cs_dsp_err(dsp, "Failed to read %x: %d\n", reg, ret);
+ return ret;
+ }
+
+ if (val == 0) {
+ cs_dsp_dbg(dsp, "Acked control ACKED at poll %u\n", i);
+ return 0;
+ }
+ }
+
+ cs_dsp_warn(dsp, "Acked control @0x%x alg:0x%x %s:0x%x timed out\n",
+ reg, ctl->alg_region.alg,
+ cs_dsp_mem_region_name(ctl->alg_region.type),
+ ctl->offset);
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_acked_control, FW_CS_DSP);
+
+static int cs_dsp_coeff_write_ctrl_raw(struct cs_dsp_coeff_ctl *ctl,
+ unsigned int off, const void *buf, size_t len)
+{
+ struct cs_dsp *dsp = ctl->dsp;
+ void *scratch;
+ int ret;
+ unsigned int reg;
+
+ ret = cs_dsp_coeff_base_reg(ctl, &reg, off);
+ if (ret)
+ return ret;
+
+ scratch = kmemdup(buf, len, GFP_KERNEL | GFP_DMA);
+ if (!scratch)
+ return -ENOMEM;
+
+ ret = regmap_raw_write(dsp->regmap, reg, scratch,
+ len);
+ if (ret) {
+ cs_dsp_err(dsp, "Failed to write %zu bytes to %x: %d\n",
+ len, reg, ret);
+ kfree(scratch);
+ return ret;
+ }
+ cs_dsp_dbg(dsp, "Wrote %zu bytes to %x\n", len, reg);
+
+ kfree(scratch);
+
+ return 0;
+}
+
+/**
+ * cs_dsp_coeff_write_ctrl() - Writes the given buffer to the given coefficient control
+ * @ctl: pointer to coefficient control
+ * @off: word offset at which data should be written
+ * @buf: the buffer to write to the given control
+ * @len: the length of the buffer in bytes
+ *
+ * Must be called with pwr_lock held.
+ *
+ * Return: < 0 on error, 1 when the control value changed and 0 when it has not.
+ */
+int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl,
+ unsigned int off, const void *buf, size_t len)
+{
+ int ret = 0;
+
+ if (!ctl)
+ return -ENOENT;
+
+ lockdep_assert_held(&ctl->dsp->pwr_lock);
+
+ if (len + off * sizeof(u32) > ctl->len)
+ return -EINVAL;
+
+ if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) {
+ ret = -EPERM;
+ } else if (buf != ctl->cache) {
+ if (memcmp(ctl->cache + off * sizeof(u32), buf, len))
+ memcpy(ctl->cache + off * sizeof(u32), buf, len);
+ else
+ return 0;
+ }
+
+ ctl->set = 1;
+ if (ctl->enabled && ctl->dsp->running)
+ ret = cs_dsp_coeff_write_ctrl_raw(ctl, off, buf, len);
+
+ if (ret < 0)
+ return ret;
+
+ return 1;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_ctrl, FW_CS_DSP);
+
+static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl,
+ unsigned int off, void *buf, size_t len)
+{
+ struct cs_dsp *dsp = ctl->dsp;
+ void *scratch;
+ int ret;
+ unsigned int reg;
+
+ ret = cs_dsp_coeff_base_reg(ctl, &reg, off);
+ if (ret)
+ return ret;
+
+ scratch = kmalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!scratch)
+ return -ENOMEM;
+
+ ret = regmap_raw_read(dsp->regmap, reg, scratch, len);
+ if (ret) {
+ cs_dsp_err(dsp, "Failed to read %zu bytes from %x: %d\n",
+ len, reg, ret);
+ kfree(scratch);
+ return ret;
+ }
+ cs_dsp_dbg(dsp, "Read %zu bytes from %x\n", len, reg);
+
+ memcpy(buf, scratch, len);
+ kfree(scratch);
+
+ return 0;
+}
+
+/**
+ * cs_dsp_coeff_read_ctrl() - Reads the given coefficient control into the given buffer
+ * @ctl: pointer to coefficient control
+ * @off: word offset at which data should be read
+ * @buf: the buffer to store to the given control
+ * @len: the length of the buffer in bytes
+ *
+ * Must be called with pwr_lock held.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl,
+ unsigned int off, void *buf, size_t len)
+{
+ int ret = 0;
+
+ if (!ctl)
+ return -ENOENT;
+
+ lockdep_assert_held(&ctl->dsp->pwr_lock);
+
+ if (len + off * sizeof(u32) > ctl->len)
+ return -EINVAL;
+
+ if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) {
+ if (ctl->enabled && ctl->dsp->running)
+ return cs_dsp_coeff_read_ctrl_raw(ctl, off, buf, len);
+ else
+ return -EPERM;
+ } else {
+ if (!ctl->flags && ctl->enabled && ctl->dsp->running)
+ ret = cs_dsp_coeff_read_ctrl_raw(ctl, 0, ctl->cache, ctl->len);
+
+ if (buf != ctl->cache)
+ memcpy(buf, ctl->cache + off * sizeof(u32), len);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_read_ctrl, FW_CS_DSP);
+
+static int cs_dsp_coeff_init_control_caches(struct cs_dsp *dsp)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+ int ret;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (!ctl->enabled || ctl->set)
+ continue;
+ if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
+ continue;
+
+ /*
+ * For readable controls populate the cache from the DSP memory.
+ * For non-readable controls the cache was zero-filled when
+ * created so we don't need to do anything.
+ */
+ if (!ctl->flags || (ctl->flags & WMFW_CTL_FLAG_READABLE)) {
+ ret = cs_dsp_coeff_read_ctrl_raw(ctl, 0, ctl->cache, ctl->len);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int cs_dsp_coeff_sync_controls(struct cs_dsp *dsp)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+ int ret;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (!ctl->enabled)
+ continue;
+ if (ctl->set && !(ctl->flags & WMFW_CTL_FLAG_VOLATILE)) {
+ ret = cs_dsp_coeff_write_ctrl_raw(ctl, 0, ctl->cache,
+ ctl->len);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void cs_dsp_signal_event_controls(struct cs_dsp *dsp,
+ unsigned int event)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+ int ret;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (ctl->type != WMFW_CTL_TYPE_HOSTEVENT)
+ continue;
+
+ if (!ctl->enabled)
+ continue;
+
+ ret = cs_dsp_coeff_write_acked_control(ctl, event);
+ if (ret)
+ cs_dsp_warn(dsp,
+ "Failed to send 0x%x event to alg 0x%x (%d)\n",
+ event, ctl->alg_region.alg, ret);
+ }
+}
+
+static void cs_dsp_free_ctl_blk(struct cs_dsp_coeff_ctl *ctl)
+{
+ kfree(ctl->cache);
+ kfree(ctl->subname);
+ kfree(ctl);
+}
+
+static int cs_dsp_create_control(struct cs_dsp *dsp,
+ const struct cs_dsp_alg_region *alg_region,
+ unsigned int offset, unsigned int len,
+ const char *subname, unsigned int subname_len,
+ unsigned int flags, unsigned int type)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+ int ret;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (ctl->fw_name == dsp->fw_name &&
+ ctl->alg_region.alg == alg_region->alg &&
+ ctl->alg_region.type == alg_region->type) {
+ if ((!subname && !ctl->subname) ||
+ (subname && (ctl->subname_len == subname_len) &&
+ !strncmp(ctl->subname, subname, ctl->subname_len))) {
+ if (!ctl->enabled)
+ ctl->enabled = 1;
+ return 0;
+ }
+ }
+ }
+
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return -ENOMEM;
+
+ ctl->fw_name = dsp->fw_name;
+ ctl->alg_region = *alg_region;
+ if (subname && dsp->fw_ver >= 2) {
+ ctl->subname_len = subname_len;
+ ctl->subname = kasprintf(GFP_KERNEL, "%.*s", subname_len, subname);
+ if (!ctl->subname) {
+ ret = -ENOMEM;
+ goto err_ctl;
+ }
+ }
+ ctl->enabled = 1;
+ ctl->set = 0;
+ ctl->dsp = dsp;
+
+ ctl->flags = flags;
+ ctl->type = type;
+ ctl->offset = offset;
+ ctl->len = len;
+ ctl->cache = kzalloc(ctl->len, GFP_KERNEL);
+ if (!ctl->cache) {
+ ret = -ENOMEM;
+ goto err_ctl_subname;
+ }
+
+ list_add(&ctl->list, &dsp->ctl_list);
+
+ if (dsp->client_ops->control_add) {
+ ret = dsp->client_ops->control_add(ctl);
+ if (ret)
+ goto err_list_del;
+ }
+
+ return 0;
+
+err_list_del:
+ list_del(&ctl->list);
+ kfree(ctl->cache);
+err_ctl_subname:
+ kfree(ctl->subname);
+err_ctl:
+ kfree(ctl);
+
+ return ret;
+}
+
+struct cs_dsp_coeff_parsed_alg {
+ int id;
+ const u8 *name;
+ int name_len;
+ int ncoeff;
+};
+
+struct cs_dsp_coeff_parsed_coeff {
+ int offset;
+ int mem_type;
+ const u8 *name;
+ int name_len;
+ unsigned int ctl_type;
+ int flags;
+ int len;
+};
+
+static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str)
+{
+ int length;
+
+ switch (bytes) {
+ case 1:
+ length = **pos;
+ break;
+ case 2:
+ length = le16_to_cpu(*((__le16 *)*pos));
+ break;
+ default:
+ return 0;
+ }
+
+ if (str)
+ *str = *pos + bytes;
+
+ *pos += ((length + bytes) + 3) & ~0x03;
+
+ return length;
+}
+
+static int cs_dsp_coeff_parse_int(int bytes, const u8 **pos)
+{
+ int val = 0;
+
+ switch (bytes) {
+ case 2:
+ val = le16_to_cpu(*((__le16 *)*pos));
+ break;
+ case 4:
+ val = le32_to_cpu(*((__le32 *)*pos));
+ break;
+ default:
+ break;
+ }
+
+ *pos += bytes;
+
+ return val;
+}
+
+static inline void cs_dsp_coeff_parse_alg(struct cs_dsp *dsp, const u8 **data,
+ struct cs_dsp_coeff_parsed_alg *blk)
+{
+ const struct wmfw_adsp_alg_data *raw;
+
+ switch (dsp->fw_ver) {
+ case 0:
+ case 1:
+ raw = (const struct wmfw_adsp_alg_data *)*data;
+ *data = raw->data;
+
+ blk->id = le32_to_cpu(raw->id);
+ blk->name = raw->name;
+ blk->name_len = strlen(raw->name);
+ blk->ncoeff = le32_to_cpu(raw->ncoeff);
+ break;
+ default:
+ blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), data);
+ blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), data,
+ &blk->name);
+ cs_dsp_coeff_parse_string(sizeof(u16), data, NULL);
+ blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), data);
+ break;
+ }
+
+ cs_dsp_dbg(dsp, "Algorithm ID: %#x\n", blk->id);
+ cs_dsp_dbg(dsp, "Algorithm name: %.*s\n", blk->name_len, blk->name);
+ cs_dsp_dbg(dsp, "# of coefficient descriptors: %#x\n", blk->ncoeff);
+}
+
+static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data,
+ struct cs_dsp_coeff_parsed_coeff *blk)
+{
+ const struct wmfw_adsp_coeff_data *raw;
+ const u8 *tmp;
+ int length;
+
+ switch (dsp->fw_ver) {
+ case 0:
+ case 1:
+ raw = (const struct wmfw_adsp_coeff_data *)*data;
+ *data = *data + sizeof(raw->hdr) + le32_to_cpu(raw->hdr.size);
+
+ blk->offset = le16_to_cpu(raw->hdr.offset);
+ blk->mem_type = le16_to_cpu(raw->hdr.type);
+ blk->name = raw->name;
+ blk->name_len = strlen(raw->name);
+ blk->ctl_type = le16_to_cpu(raw->ctl_type);
+ blk->flags = le16_to_cpu(raw->flags);
+ blk->len = le32_to_cpu(raw->len);
+ break;
+ default:
+ tmp = *data;
+ blk->offset = cs_dsp_coeff_parse_int(sizeof(raw->hdr.offset), &tmp);
+ blk->mem_type = cs_dsp_coeff_parse_int(sizeof(raw->hdr.type), &tmp);
+ length = cs_dsp_coeff_parse_int(sizeof(raw->hdr.size), &tmp);
+ blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp,
+ &blk->name);
+ cs_dsp_coeff_parse_string(sizeof(u8), &tmp, NULL);
+ cs_dsp_coeff_parse_string(sizeof(u16), &tmp, NULL);
+ blk->ctl_type = cs_dsp_coeff_parse_int(sizeof(raw->ctl_type), &tmp);
+ blk->flags = cs_dsp_coeff_parse_int(sizeof(raw->flags), &tmp);
+ blk->len = cs_dsp_coeff_parse_int(sizeof(raw->len), &tmp);
+
+ *data = *data + sizeof(raw->hdr) + length;
+ break;
+ }
+
+ cs_dsp_dbg(dsp, "\tCoefficient type: %#x\n", blk->mem_type);
+ cs_dsp_dbg(dsp, "\tCoefficient offset: %#x\n", blk->offset);
+ cs_dsp_dbg(dsp, "\tCoefficient name: %.*s\n", blk->name_len, blk->name);
+ cs_dsp_dbg(dsp, "\tCoefficient flags: %#x\n", blk->flags);
+ cs_dsp_dbg(dsp, "\tALSA control type: %#x\n", blk->ctl_type);
+ cs_dsp_dbg(dsp, "\tALSA control len: %#x\n", blk->len);
+}
+
+static int cs_dsp_check_coeff_flags(struct cs_dsp *dsp,
+ const struct cs_dsp_coeff_parsed_coeff *coeff_blk,
+ unsigned int f_required,
+ unsigned int f_illegal)
+{
+ if ((coeff_blk->flags & f_illegal) ||
+ ((coeff_blk->flags & f_required) != f_required)) {
+ cs_dsp_err(dsp, "Illegal flags 0x%x for control type 0x%x\n",
+ coeff_blk->flags, coeff_blk->ctl_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cs_dsp_parse_coeff(struct cs_dsp *dsp,
+ const struct wmfw_region *region)
+{
+ struct cs_dsp_alg_region alg_region = {};
+ struct cs_dsp_coeff_parsed_alg alg_blk;
+ struct cs_dsp_coeff_parsed_coeff coeff_blk;
+ const u8 *data = region->data;
+ int i, ret;
+
+ cs_dsp_coeff_parse_alg(dsp, &data, &alg_blk);
+ for (i = 0; i < alg_blk.ncoeff; i++) {
+ cs_dsp_coeff_parse_coeff(dsp, &data, &coeff_blk);
+
+ switch (coeff_blk.ctl_type) {
+ case WMFW_CTL_TYPE_BYTES:
+ break;
+ case WMFW_CTL_TYPE_ACKED:
+ if (coeff_blk.flags & WMFW_CTL_FLAG_SYS)
+ continue; /* ignore */
+
+ ret = cs_dsp_check_coeff_flags(dsp, &coeff_blk,
+ WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_READABLE,
+ 0);
+ if (ret)
+ return -EINVAL;
+ break;
+ case WMFW_CTL_TYPE_HOSTEVENT:
+ case WMFW_CTL_TYPE_FWEVENT:
+ ret = cs_dsp_check_coeff_flags(dsp, &coeff_blk,
+ WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_READABLE,
+ 0);
+ if (ret)
+ return -EINVAL;
+ break;
+ case WMFW_CTL_TYPE_HOST_BUFFER:
+ ret = cs_dsp_check_coeff_flags(dsp, &coeff_blk,
+ WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_READABLE,
+ 0);
+ if (ret)
+ return -EINVAL;
+ break;
+ default:
+ cs_dsp_err(dsp, "Unknown control type: %d\n",
+ coeff_blk.ctl_type);
+ return -EINVAL;
+ }
+
+ alg_region.type = coeff_blk.mem_type;
+ alg_region.alg = alg_blk.id;
+
+ ret = cs_dsp_create_control(dsp, &alg_region,
+ coeff_blk.offset,
+ coeff_blk.len,
+ coeff_blk.name,
+ coeff_blk.name_len,
+ coeff_blk.flags,
+ coeff_blk.ctl_type);
+ if (ret < 0)
+ cs_dsp_err(dsp, "Failed to create control: %.*s, %d\n",
+ coeff_blk.name_len, coeff_blk.name, ret);
+ }
+
+ return 0;
+}
+
+static unsigned int cs_dsp_adsp1_parse_sizes(struct cs_dsp *dsp,
+ const char * const file,
+ unsigned int pos,
+ const struct firmware *firmware)
+{
+ const struct wmfw_adsp1_sizes *adsp1_sizes;
+
+ adsp1_sizes = (void *)&firmware->data[pos];
+
+ cs_dsp_dbg(dsp, "%s: %d DM, %d PM, %d ZM\n", file,
+ le32_to_cpu(adsp1_sizes->dm), le32_to_cpu(adsp1_sizes->pm),
+ le32_to_cpu(adsp1_sizes->zm));
+
+ return pos + sizeof(*adsp1_sizes);
+}
+
+static unsigned int cs_dsp_adsp2_parse_sizes(struct cs_dsp *dsp,
+ const char * const file,
+ unsigned int pos,
+ const struct firmware *firmware)
+{
+ const struct wmfw_adsp2_sizes *adsp2_sizes;
+
+ adsp2_sizes = (void *)&firmware->data[pos];
+
+ cs_dsp_dbg(dsp, "%s: %d XM, %d YM %d PM, %d ZM\n", file,
+ le32_to_cpu(adsp2_sizes->xm), le32_to_cpu(adsp2_sizes->ym),
+ le32_to_cpu(adsp2_sizes->pm), le32_to_cpu(adsp2_sizes->zm));
+
+ return pos + sizeof(*adsp2_sizes);
+}
+
+static bool cs_dsp_validate_version(struct cs_dsp *dsp, unsigned int version)
+{
+ switch (version) {
+ case 0:
+ cs_dsp_warn(dsp, "Deprecated file format %d\n", version);
+ return true;
+ case 1:
+ case 2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs_dsp_halo_validate_version(struct cs_dsp *dsp, unsigned int version)
+{
+ switch (version) {
+ case 3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ const char *file)
+{
+ LIST_HEAD(buf_list);
+ struct regmap *regmap = dsp->regmap;
+ unsigned int pos = 0;
+ const struct wmfw_header *header;
+ const struct wmfw_adsp1_sizes *adsp1_sizes;
+ const struct wmfw_footer *footer;
+ const struct wmfw_region *region;
+ const struct cs_dsp_region *mem;
+ const char *region_name;
+ char *text = NULL;
+ struct cs_dsp_buf *buf;
+ unsigned int reg;
+ int regions = 0;
+ int ret, offset, type;
+
+ if (!firmware)
+ return 0;
+
+ ret = -EINVAL;
+
+ pos = sizeof(*header) + sizeof(*adsp1_sizes) + sizeof(*footer);
+ if (pos >= firmware->size) {
+ cs_dsp_err(dsp, "%s: file too short, %zu bytes\n",
+ file, firmware->size);
+ goto out_fw;
+ }
+
+ header = (void *)&firmware->data[0];
+
+ if (memcmp(&header->magic[0], "WMFW", 4) != 0) {
+ cs_dsp_err(dsp, "%s: invalid magic\n", file);
+ goto out_fw;
+ }
+
+ if (!dsp->ops->validate_version(dsp, header->ver)) {
+ cs_dsp_err(dsp, "%s: unknown file format %d\n",
+ file, header->ver);
+ goto out_fw;
+ }
+
+ cs_dsp_info(dsp, "Firmware version: %d\n", header->ver);
+ dsp->fw_ver = header->ver;
+
+ if (header->core != dsp->type) {
+ cs_dsp_err(dsp, "%s: invalid core %d != %d\n",
+ file, header->core, dsp->type);
+ goto out_fw;
+ }
+
+ pos = sizeof(*header);
+ pos = dsp->ops->parse_sizes(dsp, file, pos, firmware);
+
+ footer = (void *)&firmware->data[pos];
+ pos += sizeof(*footer);
+
+ if (le32_to_cpu(header->len) != pos) {
+ cs_dsp_err(dsp, "%s: unexpected header length %d\n",
+ file, le32_to_cpu(header->len));
+ goto out_fw;
+ }
+
+ cs_dsp_dbg(dsp, "%s: timestamp %llu\n", file,
+ le64_to_cpu(footer->timestamp));
+
+ while (pos < firmware->size &&
+ sizeof(*region) < firmware->size - pos) {
+ region = (void *)&(firmware->data[pos]);
+ region_name = "Unknown";
+ reg = 0;
+ text = NULL;
+ offset = le32_to_cpu(region->offset) & 0xffffff;
+ type = be32_to_cpu(region->type) & 0xff;
+
+ switch (type) {
+ case WMFW_NAME_TEXT:
+ region_name = "Firmware name";
+ text = kzalloc(le32_to_cpu(region->len) + 1,
+ GFP_KERNEL);
+ break;
+ case WMFW_ALGORITHM_DATA:
+ region_name = "Algorithm";
+ ret = cs_dsp_parse_coeff(dsp, region);
+ if (ret != 0)
+ goto out_fw;
+ break;
+ case WMFW_INFO_TEXT:
+ region_name = "Information";
+ text = kzalloc(le32_to_cpu(region->len) + 1,
+ GFP_KERNEL);
+ break;
+ case WMFW_ABSOLUTE:
+ region_name = "Absolute";
+ reg = offset;
+ break;
+ case WMFW_ADSP1_PM:
+ case WMFW_ADSP1_DM:
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP1_ZM:
+ case WMFW_HALO_PM_PACKED:
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ mem = cs_dsp_find_region(dsp, type);
+ if (!mem) {
+ cs_dsp_err(dsp, "No region of type: %x\n", type);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
+ region_name = cs_dsp_mem_region_name(type);
+ reg = dsp->ops->region_to_reg(mem, offset);
+ break;
+ default:
+ cs_dsp_warn(dsp,
+ "%s.%d: Unknown region type %x at %d(%x)\n",
+ file, regions, type, pos, pos);
+ break;
+ }
+
+ cs_dsp_dbg(dsp, "%s.%d: %d bytes at %d in %s\n", file,
+ regions, le32_to_cpu(region->len), offset,
+ region_name);
+
+ if (le32_to_cpu(region->len) >
+ firmware->size - pos - sizeof(*region)) {
+ cs_dsp_err(dsp,
+ "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+ file, regions, region_name,
+ le32_to_cpu(region->len), firmware->size);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
+ if (text) {
+ memcpy(text, region->data, le32_to_cpu(region->len));
+ cs_dsp_info(dsp, "%s: %s\n", file, text);
+ kfree(text);
+ text = NULL;
+ }
+
+ if (reg) {
+ buf = cs_dsp_buf_alloc(region->data,
+ le32_to_cpu(region->len),
+ &buf_list);
+ if (!buf) {
+ cs_dsp_err(dsp, "Out of memory\n");
+ ret = -ENOMEM;
+ goto out_fw;
+ }
+
+ ret = regmap_raw_write_async(regmap, reg, buf->buf,
+ le32_to_cpu(region->len));
+ if (ret != 0) {
+ cs_dsp_err(dsp,
+ "%s.%d: Failed to write %d bytes at %d in %s: %d\n",
+ file, regions,
+ le32_to_cpu(region->len), offset,
+ region_name, ret);
+ goto out_fw;
+ }
+ }
+
+ pos += le32_to_cpu(region->len) + sizeof(*region);
+ regions++;
+ }
+
+ ret = regmap_async_complete(regmap);
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
+ goto out_fw;
+ }
+
+ if (pos > firmware->size)
+ cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
+ file, regions, pos - firmware->size);
+
+ cs_dsp_debugfs_save_wmfwname(dsp, file);
+
+out_fw:
+ regmap_async_complete(regmap);
+ cs_dsp_buf_free(&buf_list);
+ kfree(text);
+
+ return ret;
+}
+
+/**
+ * cs_dsp_get_ctl() - Finds a matching coefficient control
+ * @dsp: pointer to DSP structure
+ * @name: pointer to string to match with a control's subname
+ * @type: the algorithm type to match
+ * @alg: the algorithm id to match
+ *
+ * Find cs_dsp_coeff_ctl with input name as its subname
+ *
+ * Return: pointer to the control on success, NULL if not found
+ */
+struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, int type,
+ unsigned int alg)
+{
+ struct cs_dsp_coeff_ctl *pos, *rslt = NULL;
+
+ lockdep_assert_held(&dsp->pwr_lock);
+
+ list_for_each_entry(pos, &dsp->ctl_list, list) {
+ if (!pos->subname)
+ continue;
+ if (strncmp(pos->subname, name, pos->subname_len) == 0 &&
+ pos->fw_name == dsp->fw_name &&
+ pos->alg_region.alg == alg &&
+ pos->alg_region.type == type) {
+ rslt = pos;
+ break;
+ }
+ }
+
+ return rslt;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_get_ctl, FW_CS_DSP);
+
+static void cs_dsp_ctl_fixup_base(struct cs_dsp *dsp,
+ const struct cs_dsp_alg_region *alg_region)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (ctl->fw_name == dsp->fw_name &&
+ alg_region->alg == ctl->alg_region.alg &&
+ alg_region->type == ctl->alg_region.type) {
+ ctl->alg_region.base = alg_region->base;
+ }
+ }
+}
+
+static void *cs_dsp_read_algs(struct cs_dsp *dsp, size_t n_algs,
+ const struct cs_dsp_region *mem,
+ unsigned int pos, unsigned int len)
+{
+ void *alg;
+ unsigned int reg;
+ int ret;
+ __be32 val;
+
+ if (n_algs == 0) {
+ cs_dsp_err(dsp, "No algorithms\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (n_algs > 1024) {
+ cs_dsp_err(dsp, "Algorithm count %zx excessive\n", n_algs);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Read the terminator first to validate the length */
+ reg = dsp->ops->region_to_reg(mem, pos + len);
+
+ ret = regmap_raw_read(dsp->regmap, reg, &val, sizeof(val));
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read algorithm list end: %d\n",
+ ret);
+ return ERR_PTR(ret);
+ }
+
+ if (be32_to_cpu(val) != 0xbedead)
+ cs_dsp_warn(dsp, "Algorithm list end %x 0x%x != 0xbedead\n",
+ reg, be32_to_cpu(val));
+
+ /* Convert length from DSP words to bytes */
+ len *= sizeof(u32);
+
+ alg = kzalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!alg)
+ return ERR_PTR(-ENOMEM);
+
+ reg = dsp->ops->region_to_reg(mem, pos);
+
+ ret = regmap_raw_read(dsp->regmap, reg, alg, len);
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read algorithm list: %d\n", ret);
+ kfree(alg);
+ return ERR_PTR(ret);
+ }
+
+ return alg;
+}
+
+/**
+ * cs_dsp_find_alg_region() - Finds a matching algorithm region
+ * @dsp: pointer to DSP structure
+ * @type: the algorithm type to match
+ * @id: the algorithm id to match
+ *
+ * Return: Pointer to matching algorithm region, or NULL if not found.
+ */
+struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
+ int type, unsigned int id)
+{
+ struct cs_dsp_alg_region *alg_region;
+
+ lockdep_assert_held(&dsp->pwr_lock);
+
+ list_for_each_entry(alg_region, &dsp->alg_regions, list) {
+ if (id == alg_region->alg && type == alg_region->type)
+ return alg_region;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_find_alg_region, FW_CS_DSP);
+
+static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp,
+ int type, __be32 id,
+ __be32 ver, __be32 base)
+{
+ struct cs_dsp_alg_region *alg_region;
+
+ alg_region = kzalloc(sizeof(*alg_region), GFP_KERNEL);
+ if (!alg_region)
+ return ERR_PTR(-ENOMEM);
+
+ alg_region->type = type;
+ alg_region->alg = be32_to_cpu(id);
+ alg_region->ver = be32_to_cpu(ver);
+ alg_region->base = be32_to_cpu(base);
+
+ list_add_tail(&alg_region->list, &dsp->alg_regions);
+
+ if (dsp->fw_ver > 0)
+ cs_dsp_ctl_fixup_base(dsp, alg_region);
+
+ return alg_region;
+}
+
+static void cs_dsp_free_alg_regions(struct cs_dsp *dsp)
+{
+ struct cs_dsp_alg_region *alg_region;
+
+ while (!list_empty(&dsp->alg_regions)) {
+ alg_region = list_first_entry(&dsp->alg_regions,
+ struct cs_dsp_alg_region,
+ list);
+ list_del(&alg_region->list);
+ kfree(alg_region);
+ }
+}
+
+static void cs_dsp_parse_wmfw_id_header(struct cs_dsp *dsp,
+ struct wmfw_id_hdr *fw, int nalgs)
+{
+ dsp->fw_id = be32_to_cpu(fw->id);
+ dsp->fw_id_version = be32_to_cpu(fw->ver);
+
+ cs_dsp_info(dsp, "Firmware: %x v%d.%d.%d, %d algorithms\n",
+ dsp->fw_id, (dsp->fw_id_version & 0xff0000) >> 16,
+ (dsp->fw_id_version & 0xff00) >> 8, dsp->fw_id_version & 0xff,
+ nalgs);
+}
+
+static void cs_dsp_parse_wmfw_v3_id_header(struct cs_dsp *dsp,
+ struct wmfw_v3_id_hdr *fw, int nalgs)
+{
+ dsp->fw_id = be32_to_cpu(fw->id);
+ dsp->fw_id_version = be32_to_cpu(fw->ver);
+ dsp->fw_vendor_id = be32_to_cpu(fw->vendor_id);
+
+ cs_dsp_info(dsp, "Firmware: %x vendor: 0x%x v%d.%d.%d, %d algorithms\n",
+ dsp->fw_id, dsp->fw_vendor_id,
+ (dsp->fw_id_version & 0xff0000) >> 16,
+ (dsp->fw_id_version & 0xff00) >> 8, dsp->fw_id_version & 0xff,
+ nalgs);
+}
+
+static int cs_dsp_create_regions(struct cs_dsp *dsp, __be32 id, __be32 ver,
+ int nregions, const int *type, __be32 *base)
+{
+ struct cs_dsp_alg_region *alg_region;
+ int i;
+
+ for (i = 0; i < nregions; i++) {
+ alg_region = cs_dsp_create_region(dsp, type[i], id, ver, base[i]);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+ }
+
+ return 0;
+}
+
+static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp)
+{
+ struct wmfw_adsp1_id_hdr adsp1_id;
+ struct wmfw_adsp1_alg_hdr *adsp1_alg;
+ struct cs_dsp_alg_region *alg_region;
+ const struct cs_dsp_region *mem;
+ unsigned int pos, len;
+ size_t n_algs;
+ int i, ret;
+
+ mem = cs_dsp_find_region(dsp, WMFW_ADSP1_DM);
+ if (WARN_ON(!mem))
+ return -EINVAL;
+
+ ret = regmap_raw_read(dsp->regmap, mem->base, &adsp1_id,
+ sizeof(adsp1_id));
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read algorithm info: %d\n",
+ ret);
+ return ret;
+ }
+
+ n_algs = be32_to_cpu(adsp1_id.n_algs);
+
+ cs_dsp_parse_wmfw_id_header(dsp, &adsp1_id.fw, n_algs);
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_ZM,
+ adsp1_id.fw.id, adsp1_id.fw.ver,
+ adsp1_id.zm);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_DM,
+ adsp1_id.fw.id, adsp1_id.fw.ver,
+ adsp1_id.dm);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+
+ /* Calculate offset and length in DSP words */
+ pos = sizeof(adsp1_id) / sizeof(u32);
+ len = (sizeof(*adsp1_alg) * n_algs) / sizeof(u32);
+
+ adsp1_alg = cs_dsp_read_algs(dsp, n_algs, mem, pos, len);
+ if (IS_ERR(adsp1_alg))
+ return PTR_ERR(adsp1_alg);
+
+ for (i = 0; i < n_algs; i++) {
+ cs_dsp_info(dsp, "%d: ID %x v%d.%d.%d DM@%x ZM@%x\n",
+ i, be32_to_cpu(adsp1_alg[i].alg.id),
+ (be32_to_cpu(adsp1_alg[i].alg.ver) & 0xff0000) >> 16,
+ (be32_to_cpu(adsp1_alg[i].alg.ver) & 0xff00) >> 8,
+ be32_to_cpu(adsp1_alg[i].alg.ver) & 0xff,
+ be32_to_cpu(adsp1_alg[i].dm),
+ be32_to_cpu(adsp1_alg[i].zm));
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_DM,
+ adsp1_alg[i].alg.id,
+ adsp1_alg[i].alg.ver,
+ adsp1_alg[i].dm);
+ if (IS_ERR(alg_region)) {
+ ret = PTR_ERR(alg_region);
+ goto out;
+ }
+ if (dsp->fw_ver == 0) {
+ if (i + 1 < n_algs) {
+ len = be32_to_cpu(adsp1_alg[i + 1].dm);
+ len -= be32_to_cpu(adsp1_alg[i].dm);
+ len *= 4;
+ cs_dsp_create_control(dsp, alg_region, 0,
+ len, NULL, 0, 0,
+ WMFW_CTL_TYPE_BYTES);
+ } else {
+ cs_dsp_warn(dsp, "Missing length info for region DM with ID %x\n",
+ be32_to_cpu(adsp1_alg[i].alg.id));
+ }
+ }
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_ZM,
+ adsp1_alg[i].alg.id,
+ adsp1_alg[i].alg.ver,
+ adsp1_alg[i].zm);
+ if (IS_ERR(alg_region)) {
+ ret = PTR_ERR(alg_region);
+ goto out;
+ }
+ if (dsp->fw_ver == 0) {
+ if (i + 1 < n_algs) {
+ len = be32_to_cpu(adsp1_alg[i + 1].zm);
+ len -= be32_to_cpu(adsp1_alg[i].zm);
+ len *= 4;
+ cs_dsp_create_control(dsp, alg_region, 0,
+ len, NULL, 0, 0,
+ WMFW_CTL_TYPE_BYTES);
+ } else {
+ cs_dsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
+ be32_to_cpu(adsp1_alg[i].alg.id));
+ }
+ }
+ }
+
+out:
+ kfree(adsp1_alg);
+ return ret;
+}
+
+static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp)
+{
+ struct wmfw_adsp2_id_hdr adsp2_id;
+ struct wmfw_adsp2_alg_hdr *adsp2_alg;
+ struct cs_dsp_alg_region *alg_region;
+ const struct cs_dsp_region *mem;
+ unsigned int pos, len;
+ size_t n_algs;
+ int i, ret;
+
+ mem = cs_dsp_find_region(dsp, WMFW_ADSP2_XM);
+ if (WARN_ON(!mem))
+ return -EINVAL;
+
+ ret = regmap_raw_read(dsp->regmap, mem->base, &adsp2_id,
+ sizeof(adsp2_id));
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read algorithm info: %d\n",
+ ret);
+ return ret;
+ }
+
+ n_algs = be32_to_cpu(adsp2_id.n_algs);
+
+ cs_dsp_parse_wmfw_id_header(dsp, &adsp2_id.fw, n_algs);
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
+ adsp2_id.fw.id, adsp2_id.fw.ver,
+ adsp2_id.xm);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_YM,
+ adsp2_id.fw.id, adsp2_id.fw.ver,
+ adsp2_id.ym);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_ZM,
+ adsp2_id.fw.id, adsp2_id.fw.ver,
+ adsp2_id.zm);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+
+ /* Calculate offset and length in DSP words */
+ pos = sizeof(adsp2_id) / sizeof(u32);
+ len = (sizeof(*adsp2_alg) * n_algs) / sizeof(u32);
+
+ adsp2_alg = cs_dsp_read_algs(dsp, n_algs, mem, pos, len);
+ if (IS_ERR(adsp2_alg))
+ return PTR_ERR(adsp2_alg);
+
+ for (i = 0; i < n_algs; i++) {
+ cs_dsp_dbg(dsp,
+ "%d: ID %x v%d.%d.%d XM@%x YM@%x ZM@%x\n",
+ i, be32_to_cpu(adsp2_alg[i].alg.id),
+ (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff0000) >> 16,
+ (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff00) >> 8,
+ be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff,
+ be32_to_cpu(adsp2_alg[i].xm),
+ be32_to_cpu(adsp2_alg[i].ym),
+ be32_to_cpu(adsp2_alg[i].zm));
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
+ adsp2_alg[i].alg.id,
+ adsp2_alg[i].alg.ver,
+ adsp2_alg[i].xm);
+ if (IS_ERR(alg_region)) {
+ ret = PTR_ERR(alg_region);
+ goto out;
+ }
+ if (dsp->fw_ver == 0) {
+ if (i + 1 < n_algs) {
+ len = be32_to_cpu(adsp2_alg[i + 1].xm);
+ len -= be32_to_cpu(adsp2_alg[i].xm);
+ len *= 4;
+ cs_dsp_create_control(dsp, alg_region, 0,
+ len, NULL, 0, 0,
+ WMFW_CTL_TYPE_BYTES);
+ } else {
+ cs_dsp_warn(dsp, "Missing length info for region XM with ID %x\n",
+ be32_to_cpu(adsp2_alg[i].alg.id));
+ }
+ }
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_YM,
+ adsp2_alg[i].alg.id,
+ adsp2_alg[i].alg.ver,
+ adsp2_alg[i].ym);
+ if (IS_ERR(alg_region)) {
+ ret = PTR_ERR(alg_region);
+ goto out;
+ }
+ if (dsp->fw_ver == 0) {
+ if (i + 1 < n_algs) {
+ len = be32_to_cpu(adsp2_alg[i + 1].ym);
+ len -= be32_to_cpu(adsp2_alg[i].ym);
+ len *= 4;
+ cs_dsp_create_control(dsp, alg_region, 0,
+ len, NULL, 0, 0,
+ WMFW_CTL_TYPE_BYTES);
+ } else {
+ cs_dsp_warn(dsp, "Missing length info for region YM with ID %x\n",
+ be32_to_cpu(adsp2_alg[i].alg.id));
+ }
+ }
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_ZM,
+ adsp2_alg[i].alg.id,
+ adsp2_alg[i].alg.ver,
+ adsp2_alg[i].zm);
+ if (IS_ERR(alg_region)) {
+ ret = PTR_ERR(alg_region);
+ goto out;
+ }
+ if (dsp->fw_ver == 0) {
+ if (i + 1 < n_algs) {
+ len = be32_to_cpu(adsp2_alg[i + 1].zm);
+ len -= be32_to_cpu(adsp2_alg[i].zm);
+ len *= 4;
+ cs_dsp_create_control(dsp, alg_region, 0,
+ len, NULL, 0, 0,
+ WMFW_CTL_TYPE_BYTES);
+ } else {
+ cs_dsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
+ be32_to_cpu(adsp2_alg[i].alg.id));
+ }
+ }
+ }
+
+out:
+ kfree(adsp2_alg);
+ return ret;
+}
+
+static int cs_dsp_halo_create_regions(struct cs_dsp *dsp, __be32 id, __be32 ver,
+ __be32 xm_base, __be32 ym_base)
+{
+ static const int types[] = {
+ WMFW_ADSP2_XM, WMFW_HALO_XM_PACKED,
+ WMFW_ADSP2_YM, WMFW_HALO_YM_PACKED
+ };
+ __be32 bases[] = { xm_base, xm_base, ym_base, ym_base };
+
+ return cs_dsp_create_regions(dsp, id, ver, ARRAY_SIZE(types), types, bases);
+}
+
+static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp)
+{
+ struct wmfw_halo_id_hdr halo_id;
+ struct wmfw_halo_alg_hdr *halo_alg;
+ const struct cs_dsp_region *mem;
+ unsigned int pos, len;
+ size_t n_algs;
+ int i, ret;
+
+ mem = cs_dsp_find_region(dsp, WMFW_ADSP2_XM);
+ if (WARN_ON(!mem))
+ return -EINVAL;
+
+ ret = regmap_raw_read(dsp->regmap, mem->base, &halo_id,
+ sizeof(halo_id));
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read algorithm info: %d\n",
+ ret);
+ return ret;
+ }
+
+ n_algs = be32_to_cpu(halo_id.n_algs);
+
+ cs_dsp_parse_wmfw_v3_id_header(dsp, &halo_id.fw, n_algs);
+
+ ret = cs_dsp_halo_create_regions(dsp, halo_id.fw.id, halo_id.fw.ver,
+ halo_id.xm_base, halo_id.ym_base);
+ if (ret)
+ return ret;
+
+ /* Calculate offset and length in DSP words */
+ pos = sizeof(halo_id) / sizeof(u32);
+ len = (sizeof(*halo_alg) * n_algs) / sizeof(u32);
+
+ halo_alg = cs_dsp_read_algs(dsp, n_algs, mem, pos, len);
+ if (IS_ERR(halo_alg))
+ return PTR_ERR(halo_alg);
+
+ for (i = 0; i < n_algs; i++) {
+ cs_dsp_dbg(dsp,
+ "%d: ID %x v%d.%d.%d XM@%x YM@%x\n",
+ i, be32_to_cpu(halo_alg[i].alg.id),
+ (be32_to_cpu(halo_alg[i].alg.ver) & 0xff0000) >> 16,
+ (be32_to_cpu(halo_alg[i].alg.ver) & 0xff00) >> 8,
+ be32_to_cpu(halo_alg[i].alg.ver) & 0xff,
+ be32_to_cpu(halo_alg[i].xm_base),
+ be32_to_cpu(halo_alg[i].ym_base));
+
+ ret = cs_dsp_halo_create_regions(dsp, halo_alg[i].alg.id,
+ halo_alg[i].alg.ver,
+ halo_alg[i].xm_base,
+ halo_alg[i].ym_base);
+ if (ret)
+ goto out;
+ }
+
+out:
+ kfree(halo_alg);
+ return ret;
+}
+
+static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware,
+ const char *file)
+{
+ LIST_HEAD(buf_list);
+ struct regmap *regmap = dsp->regmap;
+ struct wmfw_coeff_hdr *hdr;
+ struct wmfw_coeff_item *blk;
+ const struct cs_dsp_region *mem;
+ struct cs_dsp_alg_region *alg_region;
+ const char *region_name;
+ int ret, pos, blocks, type, offset, reg, version;
+ char *text = NULL;
+ struct cs_dsp_buf *buf;
+
+ if (!firmware)
+ return 0;
+
+ ret = -EINVAL;
+
+ if (sizeof(*hdr) >= firmware->size) {
+ cs_dsp_err(dsp, "%s: coefficient file too short, %zu bytes\n",
+ file, firmware->size);
+ goto out_fw;
+ }
+
+ hdr = (void *)&firmware->data[0];
+ if (memcmp(hdr->magic, "WMDR", 4) != 0) {
+ cs_dsp_err(dsp, "%s: invalid coefficient magic\n", file);
+ goto out_fw;
+ }
+
+ switch (be32_to_cpu(hdr->rev) & 0xff) {
+ case 1:
+ case 2:
+ break;
+ default:
+ cs_dsp_err(dsp, "%s: Unsupported coefficient file format %d\n",
+ file, be32_to_cpu(hdr->rev) & 0xff);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
+ cs_dsp_info(dsp, "%s: v%d.%d.%d\n", file,
+ (le32_to_cpu(hdr->ver) >> 16) & 0xff,
+ (le32_to_cpu(hdr->ver) >> 8) & 0xff,
+ le32_to_cpu(hdr->ver) & 0xff);
+
+ pos = le32_to_cpu(hdr->len);
+
+ blocks = 0;
+ while (pos < firmware->size &&
+ sizeof(*blk) < firmware->size - pos) {
+ blk = (void *)(&firmware->data[pos]);
+
+ type = le16_to_cpu(blk->type);
+ offset = le16_to_cpu(blk->offset);
+ version = le32_to_cpu(blk->ver) >> 8;
+
+ cs_dsp_dbg(dsp, "%s.%d: %x v%d.%d.%d\n",
+ file, blocks, le32_to_cpu(blk->id),
+ (le32_to_cpu(blk->ver) >> 16) & 0xff,
+ (le32_to_cpu(blk->ver) >> 8) & 0xff,
+ le32_to_cpu(blk->ver) & 0xff);
+ cs_dsp_dbg(dsp, "%s.%d: %d bytes at 0x%x in %x\n",
+ file, blocks, le32_to_cpu(blk->len), offset, type);
+
+ reg = 0;
+ region_name = "Unknown";
+ switch (type) {
+ case (WMFW_NAME_TEXT << 8):
+ text = kzalloc(le32_to_cpu(blk->len) + 1, GFP_KERNEL);
+ break;
+ case (WMFW_INFO_TEXT << 8):
+ case (WMFW_METADATA << 8):
+ break;
+ case (WMFW_ABSOLUTE << 8):
+ /*
+ * Old files may use this for global
+ * coefficients.
+ */
+ if (le32_to_cpu(blk->id) == dsp->fw_id &&
+ offset == 0) {
+ region_name = "global coefficients";
+ mem = cs_dsp_find_region(dsp, type);
+ if (!mem) {
+ cs_dsp_err(dsp, "No ZM\n");
+ break;
+ }
+ reg = dsp->ops->region_to_reg(mem, 0);
+
+ } else {
+ region_name = "register";
+ reg = offset;
+ }
+ break;
+
+ case WMFW_ADSP1_DM:
+ case WMFW_ADSP1_ZM:
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ case WMFW_HALO_PM_PACKED:
+ cs_dsp_dbg(dsp, "%s.%d: %d bytes in %x for %x\n",
+ file, blocks, le32_to_cpu(blk->len),
+ type, le32_to_cpu(blk->id));
+
+ region_name = cs_dsp_mem_region_name(type);
+ mem = cs_dsp_find_region(dsp, type);
+ if (!mem) {
+ cs_dsp_err(dsp, "No base for region %x\n", type);
+ break;
+ }
+
+ alg_region = cs_dsp_find_alg_region(dsp, type,
+ le32_to_cpu(blk->id));
+ if (alg_region) {
+ if (version != alg_region->ver)
+ cs_dsp_warn(dsp,
+ "Algorithm coefficient version %d.%d.%d but expected %d.%d.%d\n",
+ (version >> 16) & 0xFF,
+ (version >> 8) & 0xFF,
+ version & 0xFF,
+ (alg_region->ver >> 16) & 0xFF,
+ (alg_region->ver >> 8) & 0xFF,
+ alg_region->ver & 0xFF);
+
+ reg = alg_region->base;
+ reg = dsp->ops->region_to_reg(mem, reg);
+ reg += offset;
+ } else {
+ cs_dsp_err(dsp, "No %s for algorithm %x\n",
+ region_name, le32_to_cpu(blk->id));
+ }
+ break;
+
+ default:
+ cs_dsp_err(dsp, "%s.%d: Unknown region type %x at %d\n",
+ file, blocks, type, pos);
+ break;
+ }
+
+ if (text) {
+ memcpy(text, blk->data, le32_to_cpu(blk->len));
+ cs_dsp_info(dsp, "%s: %s\n", dsp->fw_name, text);
+ kfree(text);
+ text = NULL;
+ }
+
+ if (reg) {
+ if (le32_to_cpu(blk->len) >
+ firmware->size - pos - sizeof(*blk)) {
+ cs_dsp_err(dsp,
+ "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+ file, blocks, region_name,
+ le32_to_cpu(blk->len),
+ firmware->size);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
+ buf = cs_dsp_buf_alloc(blk->data,
+ le32_to_cpu(blk->len),
+ &buf_list);
+ if (!buf) {
+ cs_dsp_err(dsp, "Out of memory\n");
+ ret = -ENOMEM;
+ goto out_fw;
+ }
+
+ cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
+ file, blocks, le32_to_cpu(blk->len),
+ reg);
+ ret = regmap_raw_write_async(regmap, reg, buf->buf,
+ le32_to_cpu(blk->len));
+ if (ret != 0) {
+ cs_dsp_err(dsp,
+ "%s.%d: Failed to write to %x in %s: %d\n",
+ file, blocks, reg, region_name, ret);
+ }
+ }
+
+ pos += (le32_to_cpu(blk->len) + sizeof(*blk) + 3) & ~0x03;
+ blocks++;
+ }
+
+ ret = regmap_async_complete(regmap);
+ if (ret != 0)
+ cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
+
+ if (pos > firmware->size)
+ cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
+ file, blocks, pos - firmware->size);
+
+ cs_dsp_debugfs_save_binname(dsp, file);
+
+out_fw:
+ regmap_async_complete(regmap);
+ cs_dsp_buf_free(&buf_list);
+ kfree(text);
+ return ret;
+}
+
+static int cs_dsp_create_name(struct cs_dsp *dsp)
+{
+ if (!dsp->name) {
+ dsp->name = devm_kasprintf(dsp->dev, GFP_KERNEL, "DSP%d",
+ dsp->num);
+ if (!dsp->name)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int cs_dsp_common_init(struct cs_dsp *dsp)
+{
+ int ret;
+
+ ret = cs_dsp_create_name(dsp);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&dsp->alg_regions);
+ INIT_LIST_HEAD(&dsp->ctl_list);
+
+ mutex_init(&dsp->pwr_lock);
+
+ return 0;
+}
+
+/**
+ * cs_dsp_adsp1_init() - Initialise a cs_dsp structure representing a ADSP1 device
+ * @dsp: pointer to DSP structure
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_adsp1_init(struct cs_dsp *dsp)
+{
+ dsp->ops = &cs_dsp_adsp1_ops;
+
+ return cs_dsp_common_init(dsp);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_init, FW_CS_DSP);
+
+/**
+ * cs_dsp_adsp1_power_up() - Load and start the named firmware
+ * @dsp: pointer to DSP structure
+ * @wmfw_firmware: the firmware to be sent
+ * @wmfw_filename: file name of firmware to be sent
+ * @coeff_firmware: the coefficient data to be sent
+ * @coeff_filename: file name of coefficient to data be sent
+ * @fw_name: the user-friendly firmware name
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_adsp1_power_up(struct cs_dsp *dsp,
+ const struct firmware *wmfw_firmware, char *wmfw_filename,
+ const struct firmware *coeff_firmware, char *coeff_filename,
+ const char *fw_name)
+{
+ unsigned int val;
+ int ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ dsp->fw_name = fw_name;
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_SYS_ENA, ADSP1_SYS_ENA);
+
+ /*
+ * For simplicity set the DSP clock rate to be the
+ * SYSCLK rate rather than making it configurable.
+ */
+ if (dsp->sysclk_reg) {
+ ret = regmap_read(dsp->regmap, dsp->sysclk_reg, &val);
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read SYSCLK state: %d\n", ret);
+ goto err_mutex;
+ }
+
+ val = (val & dsp->sysclk_mask) >> dsp->sysclk_shift;
+
+ ret = regmap_update_bits(dsp->regmap,
+ dsp->base + ADSP1_CONTROL_31,
+ ADSP1_CLK_SEL_MASK, val);
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to set clock rate: %d\n", ret);
+ goto err_mutex;
+ }
+ }
+
+ ret = cs_dsp_load(dsp, wmfw_firmware, wmfw_filename);
+ if (ret != 0)
+ goto err_ena;
+
+ ret = cs_dsp_adsp1_setup_algs(dsp);
+ if (ret != 0)
+ goto err_ena;
+
+ ret = cs_dsp_load_coeff(dsp, coeff_firmware, coeff_filename);
+ if (ret != 0)
+ goto err_ena;
+
+ /* Initialize caches for enabled and unset controls */
+ ret = cs_dsp_coeff_init_control_caches(dsp);
+ if (ret != 0)
+ goto err_ena;
+
+ /* Sync set controls */
+ ret = cs_dsp_coeff_sync_controls(dsp);
+ if (ret != 0)
+ goto err_ena;
+
+ dsp->booted = true;
+
+ /* Start the core running */
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_CORE_ENA | ADSP1_START,
+ ADSP1_CORE_ENA | ADSP1_START);
+
+ dsp->running = true;
+
+ mutex_unlock(&dsp->pwr_lock);
+
+ return 0;
+
+err_ena:
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_SYS_ENA, 0);
+err_mutex:
+ mutex_unlock(&dsp->pwr_lock);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_up, FW_CS_DSP);
+
+/**
+ * cs_dsp_adsp1_power_down() - Halts the DSP
+ * @dsp: pointer to DSP structure
+ */
+void cs_dsp_adsp1_power_down(struct cs_dsp *dsp)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ dsp->running = false;
+ dsp->booted = false;
+
+ /* Halt the core */
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_CORE_ENA | ADSP1_START, 0);
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_19,
+ ADSP1_WDMA_BUFFER_LENGTH_MASK, 0);
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_SYS_ENA, 0);
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list)
+ ctl->enabled = 0;
+
+ cs_dsp_free_alg_regions(dsp);
+
+ mutex_unlock(&dsp->pwr_lock);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_down, FW_CS_DSP);
+
+static int cs_dsp_adsp2v2_enable_core(struct cs_dsp *dsp)
+{
+ unsigned int val;
+ int ret, count;
+
+ /* Wait for the RAM to start, should be near instantaneous */
+ for (count = 0; count < 10; ++count) {
+ ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1, &val);
+ if (ret != 0)
+ return ret;
+
+ if (val & ADSP2_RAM_RDY)
+ break;
+
+ usleep_range(250, 500);
+ }
+
+ if (!(val & ADSP2_RAM_RDY)) {
+ cs_dsp_err(dsp, "Failed to start DSP RAM\n");
+ return -EBUSY;
+ }
+
+ cs_dsp_dbg(dsp, "RAM ready after %d polls\n", count);
+
+ return 0;
+}
+
+static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp)
+{
+ int ret;
+
+ ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA, ADSP2_SYS_ENA);
+ if (ret != 0)
+ return ret;
+
+ return cs_dsp_adsp2v2_enable_core(dsp);
+}
+
+static int cs_dsp_adsp2_lock(struct cs_dsp *dsp, unsigned int lock_regions)
+{
+ struct regmap *regmap = dsp->regmap;
+ unsigned int code0, code1, lock_reg;
+
+ if (!(lock_regions & CS_ADSP2_REGION_ALL))
+ return 0;
+
+ lock_regions &= CS_ADSP2_REGION_ALL;
+ lock_reg = dsp->base + ADSP2_LOCK_REGION_1_LOCK_REGION_0;
+
+ while (lock_regions) {
+ code0 = code1 = 0;
+ if (lock_regions & BIT(0)) {
+ code0 = ADSP2_LOCK_CODE_0;
+ code1 = ADSP2_LOCK_CODE_1;
+ }
+ if (lock_regions & BIT(1)) {
+ code0 |= ADSP2_LOCK_CODE_0 << ADSP2_LOCK_REGION_SHIFT;
+ code1 |= ADSP2_LOCK_CODE_1 << ADSP2_LOCK_REGION_SHIFT;
+ }
+ regmap_write(regmap, lock_reg, code0);
+ regmap_write(regmap, lock_reg, code1);
+ lock_regions >>= 2;
+ lock_reg += 2;
+ }
+
+ return 0;
+}
+
+static int cs_dsp_adsp2_enable_memory(struct cs_dsp *dsp)
+{
+ return regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_MEM_ENA, ADSP2_MEM_ENA);
+}
+
+static void cs_dsp_adsp2_disable_memory(struct cs_dsp *dsp)
+{
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_MEM_ENA, 0);
+}
+
+static void cs_dsp_adsp2_disable_core(struct cs_dsp *dsp)
+{
+ regmap_write(dsp->regmap, dsp->base + ADSP2_RDMA_CONFIG_1, 0);
+ regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_1, 0);
+ regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_2, 0);
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA, 0);
+}
+
+static void cs_dsp_adsp2v2_disable_core(struct cs_dsp *dsp)
+{
+ regmap_write(dsp->regmap, dsp->base + ADSP2_RDMA_CONFIG_1, 0);
+ regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_1, 0);
+ regmap_write(dsp->regmap, dsp->base + ADSP2V2_WDMA_CONFIG_2, 0);
+}
+
+static int cs_dsp_halo_configure_mpu(struct cs_dsp *dsp, unsigned int lock_regions)
+{
+ struct reg_sequence config[] = {
+ { dsp->base + HALO_MPU_LOCK_CONFIG, 0x5555 },
+ { dsp->base + HALO_MPU_LOCK_CONFIG, 0xAAAA },
+ { dsp->base + HALO_MPU_XMEM_ACCESS_0, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_YMEM_ACCESS_0, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_WINDOW_ACCESS_0, lock_regions },
+ { dsp->base + HALO_MPU_XREG_ACCESS_0, lock_regions },
+ { dsp->base + HALO_MPU_YREG_ACCESS_0, lock_regions },
+ { dsp->base + HALO_MPU_XMEM_ACCESS_1, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_YMEM_ACCESS_1, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_WINDOW_ACCESS_1, lock_regions },
+ { dsp->base + HALO_MPU_XREG_ACCESS_1, lock_regions },
+ { dsp->base + HALO_MPU_YREG_ACCESS_1, lock_regions },
+ { dsp->base + HALO_MPU_XMEM_ACCESS_2, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_YMEM_ACCESS_2, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_WINDOW_ACCESS_2, lock_regions },
+ { dsp->base + HALO_MPU_XREG_ACCESS_2, lock_regions },
+ { dsp->base + HALO_MPU_YREG_ACCESS_2, lock_regions },
+ { dsp->base + HALO_MPU_XMEM_ACCESS_3, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_YMEM_ACCESS_3, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_WINDOW_ACCESS_3, lock_regions },
+ { dsp->base + HALO_MPU_XREG_ACCESS_3, lock_regions },
+ { dsp->base + HALO_MPU_YREG_ACCESS_3, lock_regions },
+ { dsp->base + HALO_MPU_LOCK_CONFIG, 0 },
+ };
+
+ return regmap_multi_reg_write(dsp->regmap, config, ARRAY_SIZE(config));
+}
+
+/**
+ * cs_dsp_set_dspclk() - Applies the given frequency to the given cs_dsp
+ * @dsp: pointer to DSP structure
+ * @freq: clock rate to set
+ *
+ * This is only for use on ADSP2 cores.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_set_dspclk(struct cs_dsp *dsp, unsigned int freq)
+{
+ int ret;
+
+ ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CLOCKING,
+ ADSP2_CLK_SEL_MASK,
+ freq << ADSP2_CLK_SEL_SHIFT);
+ if (ret)
+ cs_dsp_err(dsp, "Failed to set clock rate: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_set_dspclk, FW_CS_DSP);
+
+static void cs_dsp_stop_watchdog(struct cs_dsp *dsp)
+{
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_WATCHDOG,
+ ADSP2_WDT_ENA_MASK, 0);
+}
+
+static void cs_dsp_halo_stop_watchdog(struct cs_dsp *dsp)
+{
+ regmap_update_bits(dsp->regmap, dsp->base + HALO_WDT_CONTROL,
+ HALO_WDT_EN_MASK, 0);
+}
+
+/**
+ * cs_dsp_power_up() - Downloads firmware to the DSP
+ * @dsp: pointer to DSP structure
+ * @wmfw_firmware: the firmware to be sent
+ * @wmfw_filename: file name of firmware to be sent
+ * @coeff_firmware: the coefficient data to be sent
+ * @coeff_filename: file name of coefficient to data be sent
+ * @fw_name: the user-friendly firmware name
+ *
+ * This function is used on ADSP2 and Halo DSP cores, it powers-up the DSP core
+ * and downloads the firmware but does not start the firmware running. The
+ * cs_dsp booted flag will be set once completed and if the core has a low-power
+ * memory retention mode it will be put into this state after the firmware is
+ * downloaded.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_power_up(struct cs_dsp *dsp,
+ const struct firmware *wmfw_firmware, char *wmfw_filename,
+ const struct firmware *coeff_firmware, char *coeff_filename,
+ const char *fw_name)
+{
+ int ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ dsp->fw_name = fw_name;
+
+ if (dsp->ops->enable_memory) {
+ ret = dsp->ops->enable_memory(dsp);
+ if (ret != 0)
+ goto err_mutex;
+ }
+
+ if (dsp->ops->enable_core) {
+ ret = dsp->ops->enable_core(dsp);
+ if (ret != 0)
+ goto err_mem;
+ }
+
+ ret = cs_dsp_load(dsp, wmfw_firmware, wmfw_filename);
+ if (ret != 0)
+ goto err_ena;
+
+ ret = dsp->ops->setup_algs(dsp);
+ if (ret != 0)
+ goto err_ena;
+
+ ret = cs_dsp_load_coeff(dsp, coeff_firmware, coeff_filename);
+ if (ret != 0)
+ goto err_ena;
+
+ /* Initialize caches for enabled and unset controls */
+ ret = cs_dsp_coeff_init_control_caches(dsp);
+ if (ret != 0)
+ goto err_ena;
+
+ if (dsp->ops->disable_core)
+ dsp->ops->disable_core(dsp);
+
+ dsp->booted = true;
+
+ mutex_unlock(&dsp->pwr_lock);
+
+ return 0;
+err_ena:
+ if (dsp->ops->disable_core)
+ dsp->ops->disable_core(dsp);
+err_mem:
+ if (dsp->ops->disable_memory)
+ dsp->ops->disable_memory(dsp);
+err_mutex:
+ mutex_unlock(&dsp->pwr_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_power_up, FW_CS_DSP);
+
+/**
+ * cs_dsp_power_down() - Powers-down the DSP
+ * @dsp: pointer to DSP structure
+ *
+ * cs_dsp_stop() must have been called before this function. The core will be
+ * fully powered down and so the memory will not be retained.
+ */
+void cs_dsp_power_down(struct cs_dsp *dsp)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ cs_dsp_debugfs_clear(dsp);
+
+ dsp->fw_id = 0;
+ dsp->fw_id_version = 0;
+
+ dsp->booted = false;
+
+ if (dsp->ops->disable_memory)
+ dsp->ops->disable_memory(dsp);
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list)
+ ctl->enabled = 0;
+
+ cs_dsp_free_alg_regions(dsp);
+
+ mutex_unlock(&dsp->pwr_lock);
+
+ cs_dsp_dbg(dsp, "Shutdown complete\n");
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_power_down, FW_CS_DSP);
+
+static int cs_dsp_adsp2_start_core(struct cs_dsp *dsp)
+{
+ return regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_CORE_ENA | ADSP2_START,
+ ADSP2_CORE_ENA | ADSP2_START);
+}
+
+static void cs_dsp_adsp2_stop_core(struct cs_dsp *dsp)
+{
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_CORE_ENA | ADSP2_START, 0);
+}
+
+/**
+ * cs_dsp_run() - Starts the firmware running
+ * @dsp: pointer to DSP structure
+ *
+ * cs_dsp_power_up() must have previously been called successfully.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_run(struct cs_dsp *dsp)
+{
+ int ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ if (!dsp->booted) {
+ ret = -EIO;
+ goto err;
+ }
+
+ if (dsp->ops->enable_core) {
+ ret = dsp->ops->enable_core(dsp);
+ if (ret != 0)
+ goto err;
+ }
+
+ if (dsp->client_ops->pre_run) {
+ ret = dsp->client_ops->pre_run(dsp);
+ if (ret)
+ goto err;
+ }
+
+ /* Sync set controls */
+ ret = cs_dsp_coeff_sync_controls(dsp);
+ if (ret != 0)
+ goto err;
+
+ if (dsp->ops->lock_memory) {
+ ret = dsp->ops->lock_memory(dsp, dsp->lock_regions);
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Error configuring MPU: %d\n", ret);
+ goto err;
+ }
+ }
+
+ if (dsp->ops->start_core) {
+ ret = dsp->ops->start_core(dsp);
+ if (ret != 0)
+ goto err;
+ }
+
+ dsp->running = true;
+
+ if (dsp->client_ops->post_run) {
+ ret = dsp->client_ops->post_run(dsp);
+ if (ret)
+ goto err;
+ }
+
+ mutex_unlock(&dsp->pwr_lock);
+
+ return 0;
+
+err:
+ if (dsp->ops->stop_core)
+ dsp->ops->stop_core(dsp);
+ if (dsp->ops->disable_core)
+ dsp->ops->disable_core(dsp);
+ mutex_unlock(&dsp->pwr_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_run, FW_CS_DSP);
+
+/**
+ * cs_dsp_stop() - Stops the firmware
+ * @dsp: pointer to DSP structure
+ *
+ * Memory will not be disabled so firmware will remain loaded.
+ */
+void cs_dsp_stop(struct cs_dsp *dsp)
+{
+ /* Tell the firmware to cleanup */
+ cs_dsp_signal_event_controls(dsp, CS_DSP_FW_EVENT_SHUTDOWN);
+
+ if (dsp->ops->stop_watchdog)
+ dsp->ops->stop_watchdog(dsp);
+
+ /* Log firmware state, it can be useful for analysis */
+ if (dsp->ops->show_fw_status)
+ dsp->ops->show_fw_status(dsp);
+
+ mutex_lock(&dsp->pwr_lock);
+
+ if (dsp->client_ops->pre_stop)
+ dsp->client_ops->pre_stop(dsp);
+
+ dsp->running = false;
+
+ if (dsp->ops->stop_core)
+ dsp->ops->stop_core(dsp);
+ if (dsp->ops->disable_core)
+ dsp->ops->disable_core(dsp);
+
+ if (dsp->client_ops->post_stop)
+ dsp->client_ops->post_stop(dsp);
+
+ mutex_unlock(&dsp->pwr_lock);
+
+ cs_dsp_dbg(dsp, "Execution stopped\n");
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_stop, FW_CS_DSP);
+
+static int cs_dsp_halo_start_core(struct cs_dsp *dsp)
+{
+ int ret;
+
+ ret = regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL,
+ HALO_CORE_RESET | HALO_CORE_EN,
+ HALO_CORE_RESET | HALO_CORE_EN);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL,
+ HALO_CORE_RESET, 0);
+}
+
+static void cs_dsp_halo_stop_core(struct cs_dsp *dsp)
+{
+ regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL,
+ HALO_CORE_EN, 0);
+
+ /* reset halo core with CORE_SOFT_RESET */
+ regmap_update_bits(dsp->regmap, dsp->base + HALO_CORE_SOFT_RESET,
+ HALO_CORE_SOFT_RESET_MASK, 1);
+}
+
+/**
+ * cs_dsp_adsp2_init() - Initialise a cs_dsp structure representing a ADSP2 core
+ * @dsp: pointer to DSP structure
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_adsp2_init(struct cs_dsp *dsp)
+{
+ int ret;
+
+ switch (dsp->rev) {
+ case 0:
+ /*
+ * Disable the DSP memory by default when in reset for a small
+ * power saving.
+ */
+ ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_MEM_ENA, 0);
+ if (ret) {
+ cs_dsp_err(dsp,
+ "Failed to clear memory retention: %d\n", ret);
+ return ret;
+ }
+
+ dsp->ops = &cs_dsp_adsp2_ops[0];
+ break;
+ case 1:
+ dsp->ops = &cs_dsp_adsp2_ops[1];
+ break;
+ default:
+ dsp->ops = &cs_dsp_adsp2_ops[2];
+ break;
+ }
+
+ return cs_dsp_common_init(dsp);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_init, FW_CS_DSP);
+
+/**
+ * cs_dsp_halo_init() - Initialise a cs_dsp structure representing a HALO Core DSP
+ * @dsp: pointer to DSP structure
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_halo_init(struct cs_dsp *dsp)
+{
+ if (dsp->no_core_startstop)
+ dsp->ops = &cs_dsp_halo_ao_ops;
+ else
+ dsp->ops = &cs_dsp_halo_ops;
+
+ return cs_dsp_common_init(dsp);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_init, FW_CS_DSP);
+
+/**
+ * cs_dsp_remove() - Clean a cs_dsp before deletion
+ * @dsp: pointer to DSP structure
+ */
+void cs_dsp_remove(struct cs_dsp *dsp)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+
+ while (!list_empty(&dsp->ctl_list)) {
+ ctl = list_first_entry(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+
+ if (dsp->client_ops->control_remove)
+ dsp->client_ops->control_remove(ctl);
+
+ list_del(&ctl->list);
+ cs_dsp_free_ctl_blk(ctl);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_remove, FW_CS_DSP);
+
+/**
+ * cs_dsp_read_raw_data_block() - Reads a block of data from DSP memory
+ * @dsp: pointer to DSP structure
+ * @mem_type: the type of DSP memory containing the data to be read
+ * @mem_addr: the address of the data within the memory region
+ * @num_words: the length of the data to read
+ * @data: a buffer to store the fetched data
+ *
+ * If this is used to read unpacked 24-bit memory, each 24-bit DSP word will
+ * occupy 32-bits in data (MSbyte will be 0). This padding can be removed using
+ * cs_dsp_remove_padding()
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr,
+ unsigned int num_words, __be32 *data)
+{
+ struct cs_dsp_region const *mem = cs_dsp_find_region(dsp, mem_type);
+ unsigned int reg;
+ int ret;
+
+ lockdep_assert_held(&dsp->pwr_lock);
+
+ if (!mem)
+ return -EINVAL;
+
+ reg = dsp->ops->region_to_reg(mem, mem_addr);
+
+ ret = regmap_raw_read(dsp->regmap, reg, data,
+ sizeof(*data) * num_words);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_read_raw_data_block, FW_CS_DSP);
+
+/**
+ * cs_dsp_read_data_word() - Reads a word from DSP memory
+ * @dsp: pointer to DSP structure
+ * @mem_type: the type of DSP memory containing the data to be read
+ * @mem_addr: the address of the data within the memory region
+ * @data: a buffer to store the fetched data
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_read_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 *data)
+{
+ __be32 raw;
+ int ret;
+
+ ret = cs_dsp_read_raw_data_block(dsp, mem_type, mem_addr, 1, &raw);
+ if (ret < 0)
+ return ret;
+
+ *data = be32_to_cpu(raw) & 0x00ffffffu;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_read_data_word, FW_CS_DSP);
+
+/**
+ * cs_dsp_write_data_word() - Writes a word to DSP memory
+ * @dsp: pointer to DSP structure
+ * @mem_type: the type of DSP memory containing the data to be written
+ * @mem_addr: the address of the data within the memory region
+ * @data: the data to be written
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 data)
+{
+ struct cs_dsp_region const *mem = cs_dsp_find_region(dsp, mem_type);
+ __be32 val = cpu_to_be32(data & 0x00ffffffu);
+ unsigned int reg;
+
+ lockdep_assert_held(&dsp->pwr_lock);
+
+ if (!mem)
+ return -EINVAL;
+
+ reg = dsp->ops->region_to_reg(mem, mem_addr);
+
+ return regmap_raw_write(dsp->regmap, reg, &val, sizeof(val));
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_write_data_word, FW_CS_DSP);
+
+/**
+ * cs_dsp_remove_padding() - Convert unpacked words to packed bytes
+ * @buf: buffer containing DSP words read from DSP memory
+ * @nwords: number of words to convert
+ *
+ * DSP words from the register map have pad bytes and the data bytes
+ * are in swapped order. This swaps to the native endian order and
+ * strips the pad bytes.
+ */
+void cs_dsp_remove_padding(u32 *buf, int nwords)
+{
+ const __be32 *pack_in = (__be32 *)buf;
+ u8 *pack_out = (u8 *)buf;
+ int i;
+
+ for (i = 0; i < nwords; i++) {
+ u32 word = be32_to_cpu(*pack_in++);
+ *pack_out++ = (u8)word;
+ *pack_out++ = (u8)(word >> 8);
+ *pack_out++ = (u8)(word >> 16);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_remove_padding, FW_CS_DSP);
+
+/**
+ * cs_dsp_adsp2_bus_error() - Handle a DSP bus error interrupt
+ * @dsp: pointer to DSP structure
+ *
+ * The firmware and DSP state will be logged for future analysis.
+ */
+void cs_dsp_adsp2_bus_error(struct cs_dsp *dsp)
+{
+ unsigned int val;
+ struct regmap *regmap = dsp->regmap;
+ int ret = 0;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ ret = regmap_read(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL, &val);
+ if (ret) {
+ cs_dsp_err(dsp,
+ "Failed to read Region Lock Ctrl register: %d\n", ret);
+ goto error;
+ }
+
+ if (val & ADSP2_WDT_TIMEOUT_STS_MASK) {
+ cs_dsp_err(dsp, "watchdog timeout error\n");
+ dsp->ops->stop_watchdog(dsp);
+ if (dsp->client_ops->watchdog_expired)
+ dsp->client_ops->watchdog_expired(dsp);
+ }
+
+ if (val & (ADSP2_ADDR_ERR_MASK | ADSP2_REGION_LOCK_ERR_MASK)) {
+ if (val & ADSP2_ADDR_ERR_MASK)
+ cs_dsp_err(dsp, "bus error: address error\n");
+ else
+ cs_dsp_err(dsp, "bus error: region lock error\n");
+
+ ret = regmap_read(regmap, dsp->base + ADSP2_BUS_ERR_ADDR, &val);
+ if (ret) {
+ cs_dsp_err(dsp,
+ "Failed to read Bus Err Addr register: %d\n",
+ ret);
+ goto error;
+ }
+
+ cs_dsp_err(dsp, "bus error address = 0x%x\n",
+ val & ADSP2_BUS_ERR_ADDR_MASK);
+
+ ret = regmap_read(regmap,
+ dsp->base + ADSP2_PMEM_ERR_ADDR_XMEM_ERR_ADDR,
+ &val);
+ if (ret) {
+ cs_dsp_err(dsp,
+ "Failed to read Pmem Xmem Err Addr register: %d\n",
+ ret);
+ goto error;
+ }
+
+ cs_dsp_err(dsp, "xmem error address = 0x%x\n",
+ val & ADSP2_XMEM_ERR_ADDR_MASK);
+ cs_dsp_err(dsp, "pmem error address = 0x%x\n",
+ (val & ADSP2_PMEM_ERR_ADDR_MASK) >>
+ ADSP2_PMEM_ERR_ADDR_SHIFT);
+ }
+
+ regmap_update_bits(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL,
+ ADSP2_CTRL_ERR_EINT, ADSP2_CTRL_ERR_EINT);
+
+error:
+ mutex_unlock(&dsp->pwr_lock);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_bus_error, FW_CS_DSP);
+
+/**
+ * cs_dsp_halo_bus_error() - Handle a DSP bus error interrupt
+ * @dsp: pointer to DSP structure
+ *
+ * The firmware and DSP state will be logged for future analysis.
+ */
+void cs_dsp_halo_bus_error(struct cs_dsp *dsp)
+{
+ struct regmap *regmap = dsp->regmap;
+ unsigned int fault[6];
+ struct reg_sequence clear[] = {
+ { dsp->base + HALO_MPU_XM_VIO_STATUS, 0x0 },
+ { dsp->base + HALO_MPU_YM_VIO_STATUS, 0x0 },
+ { dsp->base + HALO_MPU_PM_VIO_STATUS, 0x0 },
+ };
+ int ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ ret = regmap_read(regmap, dsp->base_sysinfo + HALO_AHBM_WINDOW_DEBUG_1,
+ fault);
+ if (ret) {
+ cs_dsp_warn(dsp, "Failed to read AHB DEBUG_1: %d\n", ret);
+ goto exit_unlock;
+ }
+
+ cs_dsp_warn(dsp, "AHB: STATUS: 0x%x ADDR: 0x%x\n",
+ *fault & HALO_AHBM_FLAGS_ERR_MASK,
+ (*fault & HALO_AHBM_CORE_ERR_ADDR_MASK) >>
+ HALO_AHBM_CORE_ERR_ADDR_SHIFT);
+
+ ret = regmap_read(regmap, dsp->base_sysinfo + HALO_AHBM_WINDOW_DEBUG_0,
+ fault);
+ if (ret) {
+ cs_dsp_warn(dsp, "Failed to read AHB DEBUG_0: %d\n", ret);
+ goto exit_unlock;
+ }
+
+ cs_dsp_warn(dsp, "AHB: SYS_ADDR: 0x%x\n", *fault);
+
+ ret = regmap_bulk_read(regmap, dsp->base + HALO_MPU_XM_VIO_ADDR,
+ fault, ARRAY_SIZE(fault));
+ if (ret) {
+ cs_dsp_warn(dsp, "Failed to read MPU fault info: %d\n", ret);
+ goto exit_unlock;
+ }
+
+ cs_dsp_warn(dsp, "XM: STATUS:0x%x ADDR:0x%x\n", fault[1], fault[0]);
+ cs_dsp_warn(dsp, "YM: STATUS:0x%x ADDR:0x%x\n", fault[3], fault[2]);
+ cs_dsp_warn(dsp, "PM: STATUS:0x%x ADDR:0x%x\n", fault[5], fault[4]);
+
+ ret = regmap_multi_reg_write(dsp->regmap, clear, ARRAY_SIZE(clear));
+ if (ret)
+ cs_dsp_warn(dsp, "Failed to clear MPU status: %d\n", ret);
+
+exit_unlock:
+ mutex_unlock(&dsp->pwr_lock);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_bus_error, FW_CS_DSP);
+
+/**
+ * cs_dsp_halo_wdt_expire() - Handle DSP watchdog expiry
+ * @dsp: pointer to DSP structure
+ *
+ * This is logged for future analysis.
+ */
+void cs_dsp_halo_wdt_expire(struct cs_dsp *dsp)
+{
+ mutex_lock(&dsp->pwr_lock);
+
+ cs_dsp_warn(dsp, "WDT Expiry Fault\n");
+
+ dsp->ops->stop_watchdog(dsp);
+ if (dsp->client_ops->watchdog_expired)
+ dsp->client_ops->watchdog_expired(dsp);
+
+ mutex_unlock(&dsp->pwr_lock);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_wdt_expire, FW_CS_DSP);
+
+static const struct cs_dsp_ops cs_dsp_adsp1_ops = {
+ .validate_version = cs_dsp_validate_version,
+ .parse_sizes = cs_dsp_adsp1_parse_sizes,
+ .region_to_reg = cs_dsp_region_to_reg,
+};
+
+static const struct cs_dsp_ops cs_dsp_adsp2_ops[] = {
+ {
+ .parse_sizes = cs_dsp_adsp2_parse_sizes,
+ .validate_version = cs_dsp_validate_version,
+ .setup_algs = cs_dsp_adsp2_setup_algs,
+ .region_to_reg = cs_dsp_region_to_reg,
+
+ .show_fw_status = cs_dsp_adsp2_show_fw_status,
+
+ .enable_memory = cs_dsp_adsp2_enable_memory,
+ .disable_memory = cs_dsp_adsp2_disable_memory,
+
+ .enable_core = cs_dsp_adsp2_enable_core,
+ .disable_core = cs_dsp_adsp2_disable_core,
+
+ .start_core = cs_dsp_adsp2_start_core,
+ .stop_core = cs_dsp_adsp2_stop_core,
+
+ },
+ {
+ .parse_sizes = cs_dsp_adsp2_parse_sizes,
+ .validate_version = cs_dsp_validate_version,
+ .setup_algs = cs_dsp_adsp2_setup_algs,
+ .region_to_reg = cs_dsp_region_to_reg,
+
+ .show_fw_status = cs_dsp_adsp2v2_show_fw_status,
+
+ .enable_memory = cs_dsp_adsp2_enable_memory,
+ .disable_memory = cs_dsp_adsp2_disable_memory,
+ .lock_memory = cs_dsp_adsp2_lock,
+
+ .enable_core = cs_dsp_adsp2v2_enable_core,
+ .disable_core = cs_dsp_adsp2v2_disable_core,
+
+ .start_core = cs_dsp_adsp2_start_core,
+ .stop_core = cs_dsp_adsp2_stop_core,
+ },
+ {
+ .parse_sizes = cs_dsp_adsp2_parse_sizes,
+ .validate_version = cs_dsp_validate_version,
+ .setup_algs = cs_dsp_adsp2_setup_algs,
+ .region_to_reg = cs_dsp_region_to_reg,
+
+ .show_fw_status = cs_dsp_adsp2v2_show_fw_status,
+ .stop_watchdog = cs_dsp_stop_watchdog,
+
+ .enable_memory = cs_dsp_adsp2_enable_memory,
+ .disable_memory = cs_dsp_adsp2_disable_memory,
+ .lock_memory = cs_dsp_adsp2_lock,
+
+ .enable_core = cs_dsp_adsp2v2_enable_core,
+ .disable_core = cs_dsp_adsp2v2_disable_core,
+
+ .start_core = cs_dsp_adsp2_start_core,
+ .stop_core = cs_dsp_adsp2_stop_core,
+ },
+};
+
+static const struct cs_dsp_ops cs_dsp_halo_ops = {
+ .parse_sizes = cs_dsp_adsp2_parse_sizes,
+ .validate_version = cs_dsp_halo_validate_version,
+ .setup_algs = cs_dsp_halo_setup_algs,
+ .region_to_reg = cs_dsp_halo_region_to_reg,
+
+ .show_fw_status = cs_dsp_halo_show_fw_status,
+ .stop_watchdog = cs_dsp_halo_stop_watchdog,
+
+ .lock_memory = cs_dsp_halo_configure_mpu,
+
+ .start_core = cs_dsp_halo_start_core,
+ .stop_core = cs_dsp_halo_stop_core,
+};
+
+static const struct cs_dsp_ops cs_dsp_halo_ao_ops = {
+ .parse_sizes = cs_dsp_adsp2_parse_sizes,
+ .validate_version = cs_dsp_halo_validate_version,
+ .setup_algs = cs_dsp_halo_setup_algs,
+ .region_to_reg = cs_dsp_halo_region_to_reg,
+ .show_fw_status = cs_dsp_halo_show_fw_status,
+};
+
+/**
+ * cs_dsp_chunk_write() - Format data to a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ * @nbits: Number of bits to write
+ * @val: Value to write
+ *
+ * This function sequentially writes values into the format required for DSP
+ * memory, it handles both inserting of the padding bytes and converting to
+ * big endian. Note that data is only committed to the chunk when a whole DSP
+ * words worth of data is available.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val)
+{
+ int nwrite, i;
+
+ nwrite = min(CS_DSP_DATA_WORD_BITS - ch->cachebits, nbits);
+
+ ch->cache <<= nwrite;
+ ch->cache |= val >> (nbits - nwrite);
+ ch->cachebits += nwrite;
+ nbits -= nwrite;
+
+ if (ch->cachebits == CS_DSP_DATA_WORD_BITS) {
+ if (cs_dsp_chunk_end(ch))
+ return -ENOSPC;
+
+ ch->cache &= 0xFFFFFF;
+ for (i = 0; i < sizeof(ch->cache); i++, ch->cache <<= BITS_PER_BYTE)
+ *ch->data++ = (ch->cache & 0xFF000000) >> CS_DSP_DATA_WORD_BITS;
+
+ ch->bytes += sizeof(ch->cache);
+ ch->cachebits = 0;
+ }
+
+ if (nbits)
+ return cs_dsp_chunk_write(ch, nbits, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_write, FW_CS_DSP);
+
+/**
+ * cs_dsp_chunk_flush() - Pad remaining data with zero and commit to chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * As cs_dsp_chunk_write only writes data when a whole DSP word is ready to
+ * be written out it is possible that some data will remain in the cache, this
+ * function will pad that data with zeros upto a whole DSP word and write out.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch)
+{
+ if (!ch->cachebits)
+ return 0;
+
+ return cs_dsp_chunk_write(ch, CS_DSP_DATA_WORD_BITS - ch->cachebits, 0);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_flush, FW_CS_DSP);
+
+/**
+ * cs_dsp_chunk_read() - Parse data from a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ * @nbits: Number of bits to read
+ *
+ * This function sequentially reads values from a DSP memory formatted buffer,
+ * it handles both removing of the padding bytes and converting from big endian.
+ *
+ * Return: A negative number is returned on error, otherwise the read value.
+ */
+int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits)
+{
+ int nread, i;
+ u32 result;
+
+ if (!ch->cachebits) {
+ if (cs_dsp_chunk_end(ch))
+ return -ENOSPC;
+
+ ch->cache = 0;
+ ch->cachebits = CS_DSP_DATA_WORD_BITS;
+
+ for (i = 0; i < sizeof(ch->cache); i++, ch->cache <<= BITS_PER_BYTE)
+ ch->cache |= *ch->data++;
+
+ ch->bytes += sizeof(ch->cache);
+ }
+
+ nread = min(ch->cachebits, nbits);
+ nbits -= nread;
+
+ result = ch->cache >> ((sizeof(ch->cache) * BITS_PER_BYTE) - nread);
+ ch->cache <<= nread;
+ ch->cachebits -= nread;
+
+ if (nbits)
+ result = (result << nbits) | cs_dsp_chunk_read(ch, nbits);
+
+ return result;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_read, FW_CS_DSP);
+
+MODULE_DESCRIPTION("Cirrus Logic DSP Support");
+MODULE_AUTHOR("Simon Trimmer <simont@opensource.cirrus.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
new file mode 100644
index 0000000000..5f3a3e913d
--- /dev/null
+++ b/drivers/firmware/dmi-id.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Export SMBIOS/DMI info via sysfs to userspace
+ *
+ * Copyright 2007, Lennart Poettering
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+struct dmi_device_attribute{
+ struct device_attribute dev_attr;
+ int field;
+};
+#define to_dmi_dev_attr(_dev_attr) \
+ container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
+
+static ssize_t sys_dmi_field_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ int field = to_dmi_dev_attr(attr)->field;
+ ssize_t len;
+ len = scnprintf(page, PAGE_SIZE, "%s\n", dmi_get_system_info(field));
+ page[len-1] = '\n';
+ return len;
+}
+
+#define DMI_ATTR(_name, _mode, _show, _field) \
+ { .dev_attr = __ATTR(_name, _mode, _show, NULL), \
+ .field = _field }
+
+#define DEFINE_DMI_ATTR_WITH_SHOW(_name, _mode, _field) \
+static struct dmi_device_attribute sys_dmi_##_name##_attr = \
+ DMI_ATTR(_name, _mode, sys_dmi_field_show, _field);
+
+DEFINE_DMI_ATTR_WITH_SHOW(bios_vendor, 0444, DMI_BIOS_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(bios_version, 0444, DMI_BIOS_VERSION);
+DEFINE_DMI_ATTR_WITH_SHOW(bios_date, 0444, DMI_BIOS_DATE);
+DEFINE_DMI_ATTR_WITH_SHOW(sys_vendor, 0444, DMI_SYS_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(bios_release, 0444, DMI_BIOS_RELEASE);
+DEFINE_DMI_ATTR_WITH_SHOW(ec_firmware_release, 0444, DMI_EC_FIRMWARE_RELEASE);
+DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME);
+DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION);
+DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL);
+DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID);
+DEFINE_DMI_ATTR_WITH_SHOW(product_sku, 0444, DMI_PRODUCT_SKU);
+DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY);
+DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME);
+DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION);
+DEFINE_DMI_ATTR_WITH_SHOW(board_serial, 0400, DMI_BOARD_SERIAL);
+DEFINE_DMI_ATTR_WITH_SHOW(board_asset_tag, 0444, DMI_BOARD_ASSET_TAG);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_vendor, 0444, DMI_CHASSIS_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_type, 0444, DMI_CHASSIS_TYPE);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_version, 0444, DMI_CHASSIS_VERSION);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_serial, 0400, DMI_CHASSIS_SERIAL);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_asset_tag, 0444, DMI_CHASSIS_ASSET_TAG);
+
+static void ascii_filter(char *d, const char *s)
+{
+ /* Filter out characters we don't want to see in the modalias string */
+ for (; *s; s++)
+ if (*s > ' ' && *s < 127 && *s != ':')
+ *(d++) = *s;
+
+ *d = 0;
+}
+
+static ssize_t get_modalias(char *buffer, size_t buffer_size)
+{
+ /*
+ * Note new fields need to be added at the end to keep compatibility
+ * with udev's hwdb which does matches on "`cat dmi/id/modalias`*".
+ */
+ static const struct mafield {
+ const char *prefix;
+ int field;
+ } fields[] = {
+ { "bvn", DMI_BIOS_VENDOR },
+ { "bvr", DMI_BIOS_VERSION },
+ { "bd", DMI_BIOS_DATE },
+ { "br", DMI_BIOS_RELEASE },
+ { "efr", DMI_EC_FIRMWARE_RELEASE },
+ { "svn", DMI_SYS_VENDOR },
+ { "pn", DMI_PRODUCT_NAME },
+ { "pvr", DMI_PRODUCT_VERSION },
+ { "rvn", DMI_BOARD_VENDOR },
+ { "rn", DMI_BOARD_NAME },
+ { "rvr", DMI_BOARD_VERSION },
+ { "cvn", DMI_CHASSIS_VENDOR },
+ { "ct", DMI_CHASSIS_TYPE },
+ { "cvr", DMI_CHASSIS_VERSION },
+ { "sku", DMI_PRODUCT_SKU },
+ { NULL, DMI_NONE }
+ };
+
+ ssize_t l, left;
+ char *p;
+ const struct mafield *f;
+
+ strcpy(buffer, "dmi");
+ p = buffer + 3; left = buffer_size - 4;
+
+ for (f = fields; f->prefix && left > 0; f++) {
+ const char *c;
+ char *t;
+
+ c = dmi_get_system_info(f->field);
+ if (!c)
+ continue;
+
+ t = kmalloc(strlen(c) + 1, GFP_KERNEL);
+ if (!t)
+ break;
+ ascii_filter(t, c);
+ l = scnprintf(p, left, ":%s%s", f->prefix, t);
+ kfree(t);
+
+ p += l;
+ left -= l;
+ }
+
+ p[0] = ':';
+ p[1] = 0;
+
+ return p - buffer + 1;
+}
+
+static ssize_t sys_dmi_modalias_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ ssize_t r;
+ r = get_modalias(page, PAGE_SIZE-1);
+ page[r] = '\n';
+ page[r+1] = 0;
+ return r+1;
+}
+
+static struct device_attribute sys_dmi_modalias_attr =
+ __ATTR(modalias, 0444, sys_dmi_modalias_show, NULL);
+
+static struct attribute *sys_dmi_attributes[DMI_STRING_MAX+2];
+
+static struct attribute_group sys_dmi_attribute_group = {
+ .attrs = sys_dmi_attributes,
+};
+
+static const struct attribute_group* sys_dmi_attribute_groups[] = {
+ &sys_dmi_attribute_group,
+ NULL
+};
+
+static int dmi_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ ssize_t len;
+
+ if (add_uevent_var(env, "MODALIAS="))
+ return -ENOMEM;
+ len = get_modalias(&env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
+ if (len >= (sizeof(env->buf) - env->buflen))
+ return -ENOMEM;
+ env->buflen += len;
+ return 0;
+}
+
+static struct class dmi_class = {
+ .name = "dmi",
+ .dev_release = (void(*)(struct device *)) kfree,
+ .dev_uevent = dmi_dev_uevent,
+};
+
+static struct device *dmi_dev;
+
+/* Initialization */
+
+#define ADD_DMI_ATTR(_name, _field) \
+ if (dmi_get_system_info(_field)) \
+ sys_dmi_attributes[i++] = &sys_dmi_##_name##_attr.dev_attr.attr;
+
+/* In a separate function to keep gcc 3.2 happy - do NOT merge this in
+ dmi_id_init! */
+static void __init dmi_id_init_attr_table(void)
+{
+ int i;
+
+ /* Not necessarily all DMI fields are available on all
+ * systems, hence let's built an attribute table of just
+ * what's available */
+ i = 0;
+ ADD_DMI_ATTR(bios_vendor, DMI_BIOS_VENDOR);
+ ADD_DMI_ATTR(bios_version, DMI_BIOS_VERSION);
+ ADD_DMI_ATTR(bios_date, DMI_BIOS_DATE);
+ ADD_DMI_ATTR(bios_release, DMI_BIOS_RELEASE);
+ ADD_DMI_ATTR(ec_firmware_release, DMI_EC_FIRMWARE_RELEASE);
+ ADD_DMI_ATTR(sys_vendor, DMI_SYS_VENDOR);
+ ADD_DMI_ATTR(product_name, DMI_PRODUCT_NAME);
+ ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION);
+ ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL);
+ ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID);
+ ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY);
+ ADD_DMI_ATTR(product_sku, DMI_PRODUCT_SKU);
+ ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR);
+ ADD_DMI_ATTR(board_name, DMI_BOARD_NAME);
+ ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION);
+ ADD_DMI_ATTR(board_serial, DMI_BOARD_SERIAL);
+ ADD_DMI_ATTR(board_asset_tag, DMI_BOARD_ASSET_TAG);
+ ADD_DMI_ATTR(chassis_vendor, DMI_CHASSIS_VENDOR);
+ ADD_DMI_ATTR(chassis_type, DMI_CHASSIS_TYPE);
+ ADD_DMI_ATTR(chassis_version, DMI_CHASSIS_VERSION);
+ ADD_DMI_ATTR(chassis_serial, DMI_CHASSIS_SERIAL);
+ ADD_DMI_ATTR(chassis_asset_tag, DMI_CHASSIS_ASSET_TAG);
+ sys_dmi_attributes[i++] = &sys_dmi_modalias_attr.attr;
+}
+
+static int __init dmi_id_init(void)
+{
+ int ret;
+
+ if (!dmi_available)
+ return -ENODEV;
+
+ dmi_id_init_attr_table();
+
+ ret = class_register(&dmi_class);
+ if (ret)
+ return ret;
+
+ dmi_dev = kzalloc(sizeof(*dmi_dev), GFP_KERNEL);
+ if (!dmi_dev) {
+ ret = -ENOMEM;
+ goto fail_class_unregister;
+ }
+
+ dmi_dev->class = &dmi_class;
+ dev_set_name(dmi_dev, "id");
+ dmi_dev->groups = sys_dmi_attribute_groups;
+
+ ret = device_register(dmi_dev);
+ if (ret)
+ goto fail_put_dmi_dev;
+
+ return 0;
+
+fail_put_dmi_dev:
+ put_device(dmi_dev);
+
+fail_class_unregister:
+ class_unregister(&dmi_class);
+
+ return ret;
+}
+
+arch_initcall(dmi_id_init);
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
new file mode 100644
index 0000000000..8d91997036
--- /dev/null
+++ b/drivers/firmware/dmi-sysfs.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * dmi-sysfs.c
+ *
+ * This module exports the DMI tables read-only to userspace through the
+ * sysfs file system.
+ *
+ * Data is currently found below
+ * /sys/firmware/dmi/...
+ *
+ * DMI attributes are presented in attribute files with names
+ * formatted using %d-%d, so that the first integer indicates the
+ * structure type (0-255), and the second field is the instance of that
+ * entry.
+ *
+ * Copyright 2011 Google, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kobject.h>
+#include <linux/dmi.h>
+#include <linux/capability.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <asm/dmi.h>
+
+#define MAX_ENTRY_TYPE 255 /* Most of these aren't used, but we consider
+ the top entry type is only 8 bits */
+
+struct dmi_sysfs_entry {
+ struct dmi_header dh;
+ struct kobject kobj;
+ int instance;
+ int position;
+ struct list_head list;
+ struct kobject *child;
+};
+
+/*
+ * Global list of dmi_sysfs_entry. Even though this should only be
+ * manipulated at setup and teardown, the lazy nature of the kobject
+ * system means we get lazy removes.
+ */
+static LIST_HEAD(entry_list);
+static DEFINE_SPINLOCK(entry_list_lock);
+
+/* dmi_sysfs_attribute - Top level attribute. used by all entries. */
+struct dmi_sysfs_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct dmi_sysfs_entry *entry, char *buf);
+};
+
+#define DMI_SYSFS_ATTR(_entry, _name) \
+struct dmi_sysfs_attribute dmi_sysfs_attr_##_entry##_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = 0400}, \
+ .show = dmi_sysfs_##_entry##_##_name, \
+}
+
+/*
+ * dmi_sysfs_mapped_attribute - Attribute where we require the entry be
+ * mapped in. Use in conjunction with dmi_sysfs_specialize_attr_ops.
+ */
+struct dmi_sysfs_mapped_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct dmi_sysfs_entry *entry,
+ const struct dmi_header *dh,
+ char *buf);
+};
+
+#define DMI_SYSFS_MAPPED_ATTR(_entry, _name) \
+struct dmi_sysfs_mapped_attribute dmi_sysfs_attr_##_entry##_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = 0400}, \
+ .show = dmi_sysfs_##_entry##_##_name, \
+}
+
+/*************************************************
+ * Generic DMI entry support.
+ *************************************************/
+static void dmi_entry_free(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static struct dmi_sysfs_entry *to_entry(struct kobject *kobj)
+{
+ return container_of(kobj, struct dmi_sysfs_entry, kobj);
+}
+
+static struct dmi_sysfs_attribute *to_attr(struct attribute *attr)
+{
+ return container_of(attr, struct dmi_sysfs_attribute, attr);
+}
+
+static ssize_t dmi_sysfs_attr_show(struct kobject *kobj,
+ struct attribute *_attr, char *buf)
+{
+ struct dmi_sysfs_entry *entry = to_entry(kobj);
+ struct dmi_sysfs_attribute *attr = to_attr(_attr);
+
+ /* DMI stuff is only ever admin visible */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ return attr->show(entry, buf);
+}
+
+static const struct sysfs_ops dmi_sysfs_attr_ops = {
+ .show = dmi_sysfs_attr_show,
+};
+
+typedef ssize_t (*dmi_callback)(struct dmi_sysfs_entry *,
+ const struct dmi_header *dh, void *);
+
+struct find_dmi_data {
+ struct dmi_sysfs_entry *entry;
+ dmi_callback callback;
+ void *private;
+ int instance_countdown;
+ ssize_t ret;
+};
+
+static void find_dmi_entry_helper(const struct dmi_header *dh,
+ void *_data)
+{
+ struct find_dmi_data *data = _data;
+ struct dmi_sysfs_entry *entry = data->entry;
+
+ /* Is this the entry we want? */
+ if (dh->type != entry->dh.type)
+ return;
+
+ if (data->instance_countdown != 0) {
+ /* try the next instance? */
+ data->instance_countdown--;
+ return;
+ }
+
+ /*
+ * Don't ever revisit the instance. Short circuit later
+ * instances by letting the instance_countdown run negative
+ */
+ data->instance_countdown--;
+
+ /* Found the entry */
+ data->ret = data->callback(entry, dh, data->private);
+}
+
+/* State for passing the read parameters through dmi_find_entry() */
+struct dmi_read_state {
+ char *buf;
+ loff_t pos;
+ size_t count;
+};
+
+static ssize_t find_dmi_entry(struct dmi_sysfs_entry *entry,
+ dmi_callback callback, void *private)
+{
+ struct find_dmi_data data = {
+ .entry = entry,
+ .callback = callback,
+ .private = private,
+ .instance_countdown = entry->instance,
+ .ret = -EIO, /* To signal the entry disappeared */
+ };
+ int ret;
+
+ ret = dmi_walk(find_dmi_entry_helper, &data);
+ /* This shouldn't happen, but just in case. */
+ if (ret)
+ return -EINVAL;
+ return data.ret;
+}
+
+/*
+ * Calculate and return the byte length of the dmi entry identified by
+ * dh. This includes both the formatted portion as well as the
+ * unformatted string space, including the two trailing nul characters.
+ */
+static size_t dmi_entry_length(const struct dmi_header *dh)
+{
+ const char *p = (const char *)dh;
+
+ p += dh->length;
+
+ while (p[0] || p[1])
+ p++;
+
+ return 2 + p - (const char *)dh;
+}
+
+/*************************************************
+ * Support bits for specialized DMI entry support
+ *************************************************/
+struct dmi_entry_attr_show_data {
+ struct attribute *attr;
+ char *buf;
+};
+
+static ssize_t dmi_entry_attr_show_helper(struct dmi_sysfs_entry *entry,
+ const struct dmi_header *dh,
+ void *_data)
+{
+ struct dmi_entry_attr_show_data *data = _data;
+ struct dmi_sysfs_mapped_attribute *attr;
+
+ attr = container_of(data->attr,
+ struct dmi_sysfs_mapped_attribute, attr);
+ return attr->show(entry, dh, data->buf);
+}
+
+static ssize_t dmi_entry_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct dmi_entry_attr_show_data data = {
+ .attr = attr,
+ .buf = buf,
+ };
+ /* Find the entry according to our parent and call the
+ * normalized show method hanging off of the attribute */
+ return find_dmi_entry(to_entry(kobj->parent),
+ dmi_entry_attr_show_helper, &data);
+}
+
+static const struct sysfs_ops dmi_sysfs_specialize_attr_ops = {
+ .show = dmi_entry_attr_show,
+};
+
+/*************************************************
+ * Specialized DMI entry support.
+ *************************************************/
+
+/*** Type 15 - System Event Table ***/
+
+#define DMI_SEL_ACCESS_METHOD_IO8 0x00
+#define DMI_SEL_ACCESS_METHOD_IO2x8 0x01
+#define DMI_SEL_ACCESS_METHOD_IO16 0x02
+#define DMI_SEL_ACCESS_METHOD_PHYS32 0x03
+#define DMI_SEL_ACCESS_METHOD_GPNV 0x04
+
+struct dmi_system_event_log {
+ struct dmi_header header;
+ u16 area_length;
+ u16 header_start_offset;
+ u16 data_start_offset;
+ u8 access_method;
+ u8 status;
+ u32 change_token;
+ union {
+ struct {
+ u16 index_addr;
+ u16 data_addr;
+ } io;
+ u32 phys_addr32;
+ u16 gpnv_handle;
+ u32 access_method_address;
+ };
+ u8 header_format;
+ u8 type_descriptors_supported_count;
+ u8 per_log_type_descriptor_length;
+ u8 supported_log_type_descriptos[];
+} __packed;
+
+#define DMI_SYSFS_SEL_FIELD(_field) \
+static ssize_t dmi_sysfs_sel_##_field(struct dmi_sysfs_entry *entry, \
+ const struct dmi_header *dh, \
+ char *buf) \
+{ \
+ struct dmi_system_event_log sel; \
+ if (sizeof(sel) > dmi_entry_length(dh)) \
+ return -EIO; \
+ memcpy(&sel, dh, sizeof(sel)); \
+ return sprintf(buf, "%u\n", sel._field); \
+} \
+static DMI_SYSFS_MAPPED_ATTR(sel, _field)
+
+DMI_SYSFS_SEL_FIELD(area_length);
+DMI_SYSFS_SEL_FIELD(header_start_offset);
+DMI_SYSFS_SEL_FIELD(data_start_offset);
+DMI_SYSFS_SEL_FIELD(access_method);
+DMI_SYSFS_SEL_FIELD(status);
+DMI_SYSFS_SEL_FIELD(change_token);
+DMI_SYSFS_SEL_FIELD(access_method_address);
+DMI_SYSFS_SEL_FIELD(header_format);
+DMI_SYSFS_SEL_FIELD(type_descriptors_supported_count);
+DMI_SYSFS_SEL_FIELD(per_log_type_descriptor_length);
+
+static struct attribute *dmi_sysfs_sel_attrs[] = {
+ &dmi_sysfs_attr_sel_area_length.attr,
+ &dmi_sysfs_attr_sel_header_start_offset.attr,
+ &dmi_sysfs_attr_sel_data_start_offset.attr,
+ &dmi_sysfs_attr_sel_access_method.attr,
+ &dmi_sysfs_attr_sel_status.attr,
+ &dmi_sysfs_attr_sel_change_token.attr,
+ &dmi_sysfs_attr_sel_access_method_address.attr,
+ &dmi_sysfs_attr_sel_header_format.attr,
+ &dmi_sysfs_attr_sel_type_descriptors_supported_count.attr,
+ &dmi_sysfs_attr_sel_per_log_type_descriptor_length.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(dmi_sysfs_sel);
+
+static const struct kobj_type dmi_system_event_log_ktype = {
+ .release = dmi_entry_free,
+ .sysfs_ops = &dmi_sysfs_specialize_attr_ops,
+ .default_groups = dmi_sysfs_sel_groups,
+};
+
+#ifdef CONFIG_HAS_IOPORT
+typedef u8 (*sel_io_reader)(const struct dmi_system_event_log *sel,
+ loff_t offset);
+
+static DEFINE_MUTEX(io_port_lock);
+
+static u8 read_sel_8bit_indexed_io(const struct dmi_system_event_log *sel,
+ loff_t offset)
+{
+ u8 ret;
+
+ mutex_lock(&io_port_lock);
+ outb((u8)offset, sel->io.index_addr);
+ ret = inb(sel->io.data_addr);
+ mutex_unlock(&io_port_lock);
+ return ret;
+}
+
+static u8 read_sel_2x8bit_indexed_io(const struct dmi_system_event_log *sel,
+ loff_t offset)
+{
+ u8 ret;
+
+ mutex_lock(&io_port_lock);
+ outb((u8)offset, sel->io.index_addr);
+ outb((u8)(offset >> 8), sel->io.index_addr + 1);
+ ret = inb(sel->io.data_addr);
+ mutex_unlock(&io_port_lock);
+ return ret;
+}
+
+static u8 read_sel_16bit_indexed_io(const struct dmi_system_event_log *sel,
+ loff_t offset)
+{
+ u8 ret;
+
+ mutex_lock(&io_port_lock);
+ outw((u16)offset, sel->io.index_addr);
+ ret = inb(sel->io.data_addr);
+ mutex_unlock(&io_port_lock);
+ return ret;
+}
+
+static sel_io_reader sel_io_readers[] = {
+ [DMI_SEL_ACCESS_METHOD_IO8] = read_sel_8bit_indexed_io,
+ [DMI_SEL_ACCESS_METHOD_IO2x8] = read_sel_2x8bit_indexed_io,
+ [DMI_SEL_ACCESS_METHOD_IO16] = read_sel_16bit_indexed_io,
+};
+
+static ssize_t dmi_sel_raw_read_io(struct dmi_sysfs_entry *entry,
+ const struct dmi_system_event_log *sel,
+ char *buf, loff_t pos, size_t count)
+{
+ ssize_t wrote = 0;
+
+ sel_io_reader io_reader = sel_io_readers[sel->access_method];
+
+ while (count && pos < sel->area_length) {
+ count--;
+ *(buf++) = io_reader(sel, pos++);
+ wrote++;
+ }
+
+ return wrote;
+}
+#endif
+
+static ssize_t dmi_sel_raw_read_phys32(struct dmi_sysfs_entry *entry,
+ const struct dmi_system_event_log *sel,
+ char *buf, loff_t pos, size_t count)
+{
+ u8 __iomem *mapped;
+ ssize_t wrote = 0;
+
+ mapped = dmi_remap(sel->access_method_address, sel->area_length);
+ if (!mapped)
+ return -EIO;
+
+ while (count && pos < sel->area_length) {
+ count--;
+ *(buf++) = readb(mapped + pos++);
+ wrote++;
+ }
+
+ dmi_unmap(mapped);
+ return wrote;
+}
+
+static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry,
+ const struct dmi_header *dh,
+ void *_state)
+{
+ struct dmi_read_state *state = _state;
+ struct dmi_system_event_log sel;
+
+ if (sizeof(sel) > dmi_entry_length(dh))
+ return -EIO;
+
+ memcpy(&sel, dh, sizeof(sel));
+
+ switch (sel.access_method) {
+#ifdef CONFIG_HAS_IOPORT
+ case DMI_SEL_ACCESS_METHOD_IO8:
+ case DMI_SEL_ACCESS_METHOD_IO2x8:
+ case DMI_SEL_ACCESS_METHOD_IO16:
+ return dmi_sel_raw_read_io(entry, &sel, state->buf,
+ state->pos, state->count);
+#endif
+ case DMI_SEL_ACCESS_METHOD_PHYS32:
+ return dmi_sel_raw_read_phys32(entry, &sel, state->buf,
+ state->pos, state->count);
+ case DMI_SEL_ACCESS_METHOD_GPNV:
+ pr_info_ratelimited("dmi-sysfs: GPNV support missing.\n");
+ return -EIO;
+ default:
+ pr_info_ratelimited("dmi-sysfs: Unknown access method %02x\n",
+ sel.access_method);
+ return -EIO;
+ }
+}
+
+static ssize_t dmi_sel_raw_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct dmi_sysfs_entry *entry = to_entry(kobj->parent);
+ struct dmi_read_state state = {
+ .buf = buf,
+ .pos = pos,
+ .count = count,
+ };
+
+ return find_dmi_entry(entry, dmi_sel_raw_read_helper, &state);
+}
+
+static struct bin_attribute dmi_sel_raw_attr = {
+ .attr = {.name = "raw_event_log", .mode = 0400},
+ .read = dmi_sel_raw_read,
+};
+
+static int dmi_system_event_log(struct dmi_sysfs_entry *entry)
+{
+ int ret;
+
+ entry->child = kzalloc(sizeof(*entry->child), GFP_KERNEL);
+ if (!entry->child)
+ return -ENOMEM;
+ ret = kobject_init_and_add(entry->child,
+ &dmi_system_event_log_ktype,
+ &entry->kobj,
+ "system_event_log");
+ if (ret)
+ goto out_free;
+
+ ret = sysfs_create_bin_file(entry->child, &dmi_sel_raw_attr);
+ if (ret)
+ goto out_del;
+
+ return 0;
+
+out_del:
+ kobject_del(entry->child);
+out_free:
+ kfree(entry->child);
+ return ret;
+}
+
+/*************************************************
+ * Generic DMI entry support.
+ *************************************************/
+
+static ssize_t dmi_sysfs_entry_length(struct dmi_sysfs_entry *entry, char *buf)
+{
+ return sprintf(buf, "%d\n", entry->dh.length);
+}
+
+static ssize_t dmi_sysfs_entry_handle(struct dmi_sysfs_entry *entry, char *buf)
+{
+ return sprintf(buf, "%d\n", entry->dh.handle);
+}
+
+static ssize_t dmi_sysfs_entry_type(struct dmi_sysfs_entry *entry, char *buf)
+{
+ return sprintf(buf, "%d\n", entry->dh.type);
+}
+
+static ssize_t dmi_sysfs_entry_instance(struct dmi_sysfs_entry *entry,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", entry->instance);
+}
+
+static ssize_t dmi_sysfs_entry_position(struct dmi_sysfs_entry *entry,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", entry->position);
+}
+
+static DMI_SYSFS_ATTR(entry, length);
+static DMI_SYSFS_ATTR(entry, handle);
+static DMI_SYSFS_ATTR(entry, type);
+static DMI_SYSFS_ATTR(entry, instance);
+static DMI_SYSFS_ATTR(entry, position);
+
+static struct attribute *dmi_sysfs_entry_attrs[] = {
+ &dmi_sysfs_attr_entry_length.attr,
+ &dmi_sysfs_attr_entry_handle.attr,
+ &dmi_sysfs_attr_entry_type.attr,
+ &dmi_sysfs_attr_entry_instance.attr,
+ &dmi_sysfs_attr_entry_position.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(dmi_sysfs_entry);
+
+static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry,
+ const struct dmi_header *dh,
+ void *_state)
+{
+ struct dmi_read_state *state = _state;
+ size_t entry_length;
+
+ entry_length = dmi_entry_length(dh);
+
+ return memory_read_from_buffer(state->buf, state->count,
+ &state->pos, dh, entry_length);
+}
+
+static ssize_t dmi_entry_raw_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct dmi_sysfs_entry *entry = to_entry(kobj);
+ struct dmi_read_state state = {
+ .buf = buf,
+ .pos = pos,
+ .count = count,
+ };
+
+ return find_dmi_entry(entry, dmi_entry_raw_read_helper, &state);
+}
+
+static const struct bin_attribute dmi_entry_raw_attr = {
+ .attr = {.name = "raw", .mode = 0400},
+ .read = dmi_entry_raw_read,
+};
+
+static void dmi_sysfs_entry_release(struct kobject *kobj)
+{
+ struct dmi_sysfs_entry *entry = to_entry(kobj);
+
+ spin_lock(&entry_list_lock);
+ list_del(&entry->list);
+ spin_unlock(&entry_list_lock);
+ kfree(entry);
+}
+
+static const struct kobj_type dmi_sysfs_entry_ktype = {
+ .release = dmi_sysfs_entry_release,
+ .sysfs_ops = &dmi_sysfs_attr_ops,
+ .default_groups = dmi_sysfs_entry_groups,
+};
+
+static struct kset *dmi_kset;
+
+/* Global count of all instances seen. Only for setup */
+static int __initdata instance_counts[MAX_ENTRY_TYPE + 1];
+
+/* Global positional count of all entries seen. Only for setup */
+static int __initdata position_count;
+
+static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
+ void *_ret)
+{
+ struct dmi_sysfs_entry *entry;
+ int *ret = _ret;
+
+ /* If a previous entry saw an error, short circuit */
+ if (*ret)
+ return;
+
+ /* Allocate and register a new entry into the entries set */
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ *ret = -ENOMEM;
+ return;
+ }
+
+ /* Set the key */
+ memcpy(&entry->dh, dh, sizeof(*dh));
+ entry->instance = instance_counts[dh->type]++;
+ entry->position = position_count++;
+
+ entry->kobj.kset = dmi_kset;
+ *ret = kobject_init_and_add(&entry->kobj, &dmi_sysfs_entry_ktype, NULL,
+ "%d-%d", dh->type, entry->instance);
+
+ /* Thread on the global list for cleanup */
+ spin_lock(&entry_list_lock);
+ list_add_tail(&entry->list, &entry_list);
+ spin_unlock(&entry_list_lock);
+
+ if (*ret) {
+ kobject_put(&entry->kobj);
+ return;
+ }
+
+ /* Handle specializations by type */
+ switch (dh->type) {
+ case DMI_ENTRY_SYSTEM_EVENT_LOG:
+ *ret = dmi_system_event_log(entry);
+ break;
+ default:
+ /* No specialization */
+ break;
+ }
+ if (*ret)
+ goto out_err;
+
+ /* Create the raw binary file to access the entry */
+ *ret = sysfs_create_bin_file(&entry->kobj, &dmi_entry_raw_attr);
+ if (*ret)
+ goto out_err;
+
+ return;
+out_err:
+ kobject_put(entry->child);
+ kobject_put(&entry->kobj);
+ return;
+}
+
+static void cleanup_entry_list(void)
+{
+ struct dmi_sysfs_entry *entry, *next;
+
+ /* No locks, we are on our way out */
+ list_for_each_entry_safe(entry, next, &entry_list, list) {
+ kobject_put(entry->child);
+ kobject_put(&entry->kobj);
+ }
+}
+
+static int __init dmi_sysfs_init(void)
+{
+ int error;
+ int val;
+
+ if (!dmi_kobj) {
+ pr_debug("dmi-sysfs: dmi entry is absent.\n");
+ error = -ENODATA;
+ goto err;
+ }
+
+ dmi_kset = kset_create_and_add("entries", NULL, dmi_kobj);
+ if (!dmi_kset) {
+ error = -ENOMEM;
+ goto err;
+ }
+
+ val = 0;
+ error = dmi_walk(dmi_sysfs_register_handle, &val);
+ if (error)
+ goto err;
+ if (val) {
+ error = val;
+ goto err;
+ }
+
+ pr_debug("dmi-sysfs: loaded.\n");
+
+ return 0;
+err:
+ cleanup_entry_list();
+ kset_unregister(dmi_kset);
+ return error;
+}
+
+/* clean up everything. */
+static void __exit dmi_sysfs_exit(void)
+{
+ pr_debug("dmi-sysfs: unloading.\n");
+ cleanup_entry_list();
+ kset_unregister(dmi_kset);
+}
+
+module_init(dmi_sysfs_init);
+module_exit(dmi_sysfs_exit);
+
+MODULE_AUTHOR("Mike Waychison <mikew@google.com>");
+MODULE_DESCRIPTION("DMI sysfs support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
new file mode 100644
index 0000000000..015c95a825
--- /dev/null
+++ b/drivers/firmware/dmi_scan.c
@@ -0,0 +1,1210 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/memblock.h>
+#include <linux/random.h>
+#include <asm/dmi.h>
+#include <asm/unaligned.h>
+
+#ifndef SMBIOS_ENTRY_POINT_SCAN_START
+#define SMBIOS_ENTRY_POINT_SCAN_START 0xF0000
+#endif
+
+struct kobject *dmi_kobj;
+EXPORT_SYMBOL_GPL(dmi_kobj);
+
+/*
+ * DMI stands for "Desktop Management Interface". It is part
+ * of and an antecedent to, SMBIOS, which stands for System
+ * Management BIOS. See further: https://www.dmtf.org/standards
+ */
+static const char dmi_empty_string[] = "";
+
+static u32 dmi_ver __initdata;
+static u32 dmi_len;
+static u16 dmi_num;
+static u8 smbios_entry_point[32];
+static int smbios_entry_point_size;
+
+/* DMI system identification string used during boot */
+static char dmi_ids_string[128] __initdata;
+
+static struct dmi_memdev_info {
+ const char *device;
+ const char *bank;
+ u64 size; /* bytes */
+ u16 handle;
+ u8 type; /* DDR2, DDR3, DDR4 etc */
+} *dmi_memdev;
+static int dmi_memdev_nr;
+
+static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
+{
+ const u8 *bp = ((u8 *) dm) + dm->length;
+ const u8 *nsp;
+
+ if (s) {
+ while (--s > 0 && *bp)
+ bp += strlen(bp) + 1;
+
+ /* Strings containing only spaces are considered empty */
+ nsp = bp;
+ while (*nsp == ' ')
+ nsp++;
+ if (*nsp != '\0')
+ return bp;
+ }
+
+ return dmi_empty_string;
+}
+
+static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
+{
+ const char *bp = dmi_string_nosave(dm, s);
+ char *str;
+ size_t len;
+
+ if (bp == dmi_empty_string)
+ return dmi_empty_string;
+
+ len = strlen(bp) + 1;
+ str = dmi_alloc(len);
+ if (str != NULL)
+ strcpy(str, bp);
+
+ return str;
+}
+
+/*
+ * We have to be cautious here. We have seen BIOSes with DMI pointers
+ * pointing to completely the wrong place for example
+ */
+static void dmi_decode_table(u8 *buf,
+ void (*decode)(const struct dmi_header *, void *),
+ void *private_data)
+{
+ u8 *data = buf;
+ int i = 0;
+
+ /*
+ * Stop when we have seen all the items the table claimed to have
+ * (SMBIOS < 3.0 only) OR we reach an end-of-table marker (SMBIOS
+ * >= 3.0 only) OR we run off the end of the table (should never
+ * happen but sometimes does on bogus implementations.)
+ */
+ while ((!dmi_num || i < dmi_num) &&
+ (data - buf + sizeof(struct dmi_header)) <= dmi_len) {
+ const struct dmi_header *dm = (const struct dmi_header *)data;
+
+ /*
+ * We want to know the total length (formatted area and
+ * strings) before decoding to make sure we won't run off the
+ * table in dmi_decode or dmi_string
+ */
+ data += dm->length;
+ while ((data - buf < dmi_len - 1) && (data[0] || data[1]))
+ data++;
+ if (data - buf < dmi_len - 1)
+ decode(dm, private_data);
+
+ data += 2;
+ i++;
+
+ /*
+ * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
+ * For tables behind a 64-bit entry point, we have no item
+ * count and no exact table length, so stop on end-of-table
+ * marker. For tables behind a 32-bit entry point, we have
+ * seen OEM structures behind the end-of-table marker on
+ * some systems, so don't trust it.
+ */
+ if (!dmi_num && dm->type == DMI_ENTRY_END_OF_TABLE)
+ break;
+ }
+
+ /* Trim DMI table length if needed */
+ if (dmi_len > data - buf)
+ dmi_len = data - buf;
+}
+
+static phys_addr_t dmi_base;
+
+static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
+ void *))
+{
+ u8 *buf;
+ u32 orig_dmi_len = dmi_len;
+
+ buf = dmi_early_remap(dmi_base, orig_dmi_len);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ dmi_decode_table(buf, decode, NULL);
+
+ add_device_randomness(buf, dmi_len);
+
+ dmi_early_unmap(buf, orig_dmi_len);
+ return 0;
+}
+
+static int __init dmi_checksum(const u8 *buf, u8 len)
+{
+ u8 sum = 0;
+ int a;
+
+ for (a = 0; a < len; a++)
+ sum += buf[a];
+
+ return sum == 0;
+}
+
+static const char *dmi_ident[DMI_STRING_MAX];
+static LIST_HEAD(dmi_devices);
+int dmi_available;
+EXPORT_SYMBOL_GPL(dmi_available);
+
+/*
+ * Save a DMI string
+ */
+static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
+ int string)
+{
+ const char *d = (const char *) dm;
+ const char *p;
+
+ if (dmi_ident[slot] || dm->length <= string)
+ return;
+
+ p = dmi_string(dm, d[string]);
+ if (p == NULL)
+ return;
+
+ dmi_ident[slot] = p;
+}
+
+static void __init dmi_save_release(const struct dmi_header *dm, int slot,
+ int index)
+{
+ const u8 *minor, *major;
+ char *s;
+
+ /* If the table doesn't have the field, let's return */
+ if (dmi_ident[slot] || dm->length < index)
+ return;
+
+ minor = (u8 *) dm + index;
+ major = (u8 *) dm + index - 1;
+
+ /* As per the spec, if the system doesn't support this field,
+ * the value is FF
+ */
+ if (*major == 0xFF && *minor == 0xFF)
+ return;
+
+ s = dmi_alloc(8);
+ if (!s)
+ return;
+
+ sprintf(s, "%u.%u", *major, *minor);
+
+ dmi_ident[slot] = s;
+}
+
+static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
+ int index)
+{
+ const u8 *d;
+ char *s;
+ int is_ff = 1, is_00 = 1, i;
+
+ if (dmi_ident[slot] || dm->length < index + 16)
+ return;
+
+ d = (u8 *) dm + index;
+ for (i = 0; i < 16 && (is_ff || is_00); i++) {
+ if (d[i] != 0x00)
+ is_00 = 0;
+ if (d[i] != 0xFF)
+ is_ff = 0;
+ }
+
+ if (is_ff || is_00)
+ return;
+
+ s = dmi_alloc(16*2+4+1);
+ if (!s)
+ return;
+
+ /*
+ * As of version 2.6 of the SMBIOS specification, the first 3 fields of
+ * the UUID are supposed to be little-endian encoded. The specification
+ * says that this is the defacto standard.
+ */
+ if (dmi_ver >= 0x020600)
+ sprintf(s, "%pUl", d);
+ else
+ sprintf(s, "%pUb", d);
+
+ dmi_ident[slot] = s;
+}
+
+static void __init dmi_save_type(const struct dmi_header *dm, int slot,
+ int index)
+{
+ const u8 *d;
+ char *s;
+
+ if (dmi_ident[slot] || dm->length <= index)
+ return;
+
+ s = dmi_alloc(4);
+ if (!s)
+ return;
+
+ d = (u8 *) dm + index;
+ sprintf(s, "%u", *d & 0x7F);
+ dmi_ident[slot] = s;
+}
+
+static void __init dmi_save_one_device(int type, const char *name)
+{
+ struct dmi_device *dev;
+
+ /* No duplicate device */
+ if (dmi_find_device(type, name, NULL))
+ return;
+
+ dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
+ if (!dev)
+ return;
+
+ dev->type = type;
+ strcpy((char *)(dev + 1), name);
+ dev->name = (char *)(dev + 1);
+ dev->device_data = NULL;
+ list_add(&dev->list, &dmi_devices);
+}
+
+static void __init dmi_save_devices(const struct dmi_header *dm)
+{
+ int i, count = (dm->length - sizeof(struct dmi_header)) / 2;
+
+ for (i = 0; i < count; i++) {
+ const char *d = (char *)(dm + 1) + (i * 2);
+
+ /* Skip disabled device */
+ if ((*d & 0x80) == 0)
+ continue;
+
+ dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d + 1)));
+ }
+}
+
+static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
+{
+ int i, count;
+ struct dmi_device *dev;
+
+ if (dm->length < 0x05)
+ return;
+
+ count = *(u8 *)(dm + 1);
+ for (i = 1; i <= count; i++) {
+ const char *devname = dmi_string(dm, i);
+
+ if (devname == dmi_empty_string)
+ continue;
+
+ dev = dmi_alloc(sizeof(*dev));
+ if (!dev)
+ break;
+
+ dev->type = DMI_DEV_TYPE_OEM_STRING;
+ dev->name = devname;
+ dev->device_data = NULL;
+
+ list_add(&dev->list, &dmi_devices);
+ }
+}
+
+static void __init dmi_save_ipmi_device(const struct dmi_header *dm)
+{
+ struct dmi_device *dev;
+ void *data;
+
+ data = dmi_alloc(dm->length);
+ if (data == NULL)
+ return;
+
+ memcpy(data, dm, dm->length);
+
+ dev = dmi_alloc(sizeof(*dev));
+ if (!dev)
+ return;
+
+ dev->type = DMI_DEV_TYPE_IPMI;
+ dev->name = "IPMI controller";
+ dev->device_data = data;
+
+ list_add_tail(&dev->list, &dmi_devices);
+}
+
+static void __init dmi_save_dev_pciaddr(int instance, int segment, int bus,
+ int devfn, const char *name, int type)
+{
+ struct dmi_dev_onboard *dev;
+
+ /* Ignore invalid values */
+ if (type == DMI_DEV_TYPE_DEV_SLOT &&
+ segment == 0xFFFF && bus == 0xFF && devfn == 0xFF)
+ return;
+
+ dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
+ if (!dev)
+ return;
+
+ dev->instance = instance;
+ dev->segment = segment;
+ dev->bus = bus;
+ dev->devfn = devfn;
+
+ strcpy((char *)&dev[1], name);
+ dev->dev.type = type;
+ dev->dev.name = (char *)&dev[1];
+ dev->dev.device_data = dev;
+
+ list_add(&dev->dev.list, &dmi_devices);
+}
+
+static void __init dmi_save_extended_devices(const struct dmi_header *dm)
+{
+ const char *name;
+ const u8 *d = (u8 *)dm;
+
+ if (dm->length < 0x0B)
+ return;
+
+ /* Skip disabled device */
+ if ((d[0x5] & 0x80) == 0)
+ return;
+
+ name = dmi_string_nosave(dm, d[0x4]);
+ dmi_save_dev_pciaddr(d[0x6], *(u16 *)(d + 0x7), d[0x9], d[0xA], name,
+ DMI_DEV_TYPE_DEV_ONBOARD);
+ dmi_save_one_device(d[0x5] & 0x7f, name);
+}
+
+static void __init dmi_save_system_slot(const struct dmi_header *dm)
+{
+ const u8 *d = (u8 *)dm;
+
+ /* Need SMBIOS 2.6+ structure */
+ if (dm->length < 0x11)
+ return;
+ dmi_save_dev_pciaddr(*(u16 *)(d + 0x9), *(u16 *)(d + 0xD), d[0xF],
+ d[0x10], dmi_string_nosave(dm, d[0x4]),
+ DMI_DEV_TYPE_DEV_SLOT);
+}
+
+static void __init count_mem_devices(const struct dmi_header *dm, void *v)
+{
+ if (dm->type != DMI_ENTRY_MEM_DEVICE)
+ return;
+ dmi_memdev_nr++;
+}
+
+static void __init save_mem_devices(const struct dmi_header *dm, void *v)
+{
+ const char *d = (const char *)dm;
+ static int nr;
+ u64 bytes;
+ u16 size;
+
+ if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x13)
+ return;
+ if (nr >= dmi_memdev_nr) {
+ pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
+ return;
+ }
+ dmi_memdev[nr].handle = get_unaligned(&dm->handle);
+ dmi_memdev[nr].device = dmi_string(dm, d[0x10]);
+ dmi_memdev[nr].bank = dmi_string(dm, d[0x11]);
+ dmi_memdev[nr].type = d[0x12];
+
+ size = get_unaligned((u16 *)&d[0xC]);
+ if (size == 0)
+ bytes = 0;
+ else if (size == 0xffff)
+ bytes = ~0ull;
+ else if (size & 0x8000)
+ bytes = (u64)(size & 0x7fff) << 10;
+ else if (size != 0x7fff || dm->length < 0x20)
+ bytes = (u64)size << 20;
+ else
+ bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20;
+
+ dmi_memdev[nr].size = bytes;
+ nr++;
+}
+
+static void __init dmi_memdev_walk(void)
+{
+ if (dmi_walk_early(count_mem_devices) == 0 && dmi_memdev_nr) {
+ dmi_memdev = dmi_alloc(sizeof(*dmi_memdev) * dmi_memdev_nr);
+ if (dmi_memdev)
+ dmi_walk_early(save_mem_devices);
+ }
+}
+
+/*
+ * Process a DMI table entry. Right now all we care about are the BIOS
+ * and machine entries. For 2.5 we should pull the smbus controller info
+ * out of here.
+ */
+static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
+{
+ switch (dm->type) {
+ case 0: /* BIOS Information */
+ dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
+ dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
+ dmi_save_ident(dm, DMI_BIOS_DATE, 8);
+ dmi_save_release(dm, DMI_BIOS_RELEASE, 21);
+ dmi_save_release(dm, DMI_EC_FIRMWARE_RELEASE, 23);
+ break;
+ case 1: /* System Information */
+ dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
+ dmi_save_ident(dm, DMI_PRODUCT_NAME, 5);
+ dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
+ dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
+ dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
+ dmi_save_ident(dm, DMI_PRODUCT_SKU, 25);
+ dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
+ break;
+ case 2: /* Base Board Information */
+ dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
+ dmi_save_ident(dm, DMI_BOARD_NAME, 5);
+ dmi_save_ident(dm, DMI_BOARD_VERSION, 6);
+ dmi_save_ident(dm, DMI_BOARD_SERIAL, 7);
+ dmi_save_ident(dm, DMI_BOARD_ASSET_TAG, 8);
+ break;
+ case 3: /* Chassis Information */
+ dmi_save_ident(dm, DMI_CHASSIS_VENDOR, 4);
+ dmi_save_type(dm, DMI_CHASSIS_TYPE, 5);
+ dmi_save_ident(dm, DMI_CHASSIS_VERSION, 6);
+ dmi_save_ident(dm, DMI_CHASSIS_SERIAL, 7);
+ dmi_save_ident(dm, DMI_CHASSIS_ASSET_TAG, 8);
+ break;
+ case 9: /* System Slots */
+ dmi_save_system_slot(dm);
+ break;
+ case 10: /* Onboard Devices Information */
+ dmi_save_devices(dm);
+ break;
+ case 11: /* OEM Strings */
+ dmi_save_oem_strings_devices(dm);
+ break;
+ case 38: /* IPMI Device Information */
+ dmi_save_ipmi_device(dm);
+ break;
+ case 41: /* Onboard Devices Extended Information */
+ dmi_save_extended_devices(dm);
+ }
+}
+
+static int __init print_filtered(char *buf, size_t len, const char *info)
+{
+ int c = 0;
+ const char *p;
+
+ if (!info)
+ return c;
+
+ for (p = info; *p; p++)
+ if (isprint(*p))
+ c += scnprintf(buf + c, len - c, "%c", *p);
+ else
+ c += scnprintf(buf + c, len - c, "\\x%02x", *p & 0xff);
+ return c;
+}
+
+static void __init dmi_format_ids(char *buf, size_t len)
+{
+ int c = 0;
+ const char *board; /* Board Name is optional */
+
+ c += print_filtered(buf + c, len - c,
+ dmi_get_system_info(DMI_SYS_VENDOR));
+ c += scnprintf(buf + c, len - c, " ");
+ c += print_filtered(buf + c, len - c,
+ dmi_get_system_info(DMI_PRODUCT_NAME));
+
+ board = dmi_get_system_info(DMI_BOARD_NAME);
+ if (board) {
+ c += scnprintf(buf + c, len - c, "/");
+ c += print_filtered(buf + c, len - c, board);
+ }
+ c += scnprintf(buf + c, len - c, ", BIOS ");
+ c += print_filtered(buf + c, len - c,
+ dmi_get_system_info(DMI_BIOS_VERSION));
+ c += scnprintf(buf + c, len - c, " ");
+ c += print_filtered(buf + c, len - c,
+ dmi_get_system_info(DMI_BIOS_DATE));
+}
+
+/*
+ * Check for DMI/SMBIOS headers in the system firmware image. Any
+ * SMBIOS header must start 16 bytes before the DMI header, so take a
+ * 32 byte buffer and check for DMI at offset 16 and SMBIOS at offset
+ * 0. If the DMI header is present, set dmi_ver accordingly (SMBIOS
+ * takes precedence) and return 0. Otherwise return 1.
+ */
+static int __init dmi_present(const u8 *buf)
+{
+ u32 smbios_ver;
+
+ /*
+ * The size of this structure is 31 bytes, but we also accept value
+ * 30 due to a mistake in SMBIOS specification version 2.1.
+ */
+ if (memcmp(buf, "_SM_", 4) == 0 &&
+ buf[5] >= 30 && buf[5] <= 32 &&
+ dmi_checksum(buf, buf[5])) {
+ smbios_ver = get_unaligned_be16(buf + 6);
+ smbios_entry_point_size = buf[5];
+ memcpy(smbios_entry_point, buf, smbios_entry_point_size);
+
+ /* Some BIOS report weird SMBIOS version, fix that up */
+ switch (smbios_ver) {
+ case 0x021F:
+ case 0x0221:
+ pr_debug("SMBIOS version fixup (2.%d->2.%d)\n",
+ smbios_ver & 0xFF, 3);
+ smbios_ver = 0x0203;
+ break;
+ case 0x0233:
+ pr_debug("SMBIOS version fixup (2.%d->2.%d)\n", 51, 6);
+ smbios_ver = 0x0206;
+ break;
+ }
+ } else {
+ smbios_ver = 0;
+ }
+
+ buf += 16;
+
+ if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) {
+ if (smbios_ver)
+ dmi_ver = smbios_ver;
+ else
+ dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F);
+ dmi_ver <<= 8;
+ dmi_num = get_unaligned_le16(buf + 12);
+ dmi_len = get_unaligned_le16(buf + 6);
+ dmi_base = get_unaligned_le32(buf + 8);
+
+ if (dmi_walk_early(dmi_decode) == 0) {
+ if (smbios_ver) {
+ pr_info("SMBIOS %d.%d present.\n",
+ dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
+ } else {
+ smbios_entry_point_size = 15;
+ memcpy(smbios_entry_point, buf,
+ smbios_entry_point_size);
+ pr_info("Legacy DMI %d.%d present.\n",
+ dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
+ }
+ dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
+ pr_info("DMI: %s\n", dmi_ids_string);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/*
+ * Check for the SMBIOS 3.0 64-bit entry point signature. Unlike the legacy
+ * 32-bit entry point, there is no embedded DMI header (_DMI_) in here.
+ */
+static int __init dmi_smbios3_present(const u8 *buf)
+{
+ if (memcmp(buf, "_SM3_", 5) == 0 &&
+ buf[6] >= 24 && buf[6] <= 32 &&
+ dmi_checksum(buf, buf[6])) {
+ dmi_ver = get_unaligned_be24(buf + 7);
+ dmi_num = 0; /* No longer specified */
+ dmi_len = get_unaligned_le32(buf + 12);
+ dmi_base = get_unaligned_le64(buf + 16);
+ smbios_entry_point_size = buf[6];
+ memcpy(smbios_entry_point, buf, smbios_entry_point_size);
+
+ if (dmi_walk_early(dmi_decode) == 0) {
+ pr_info("SMBIOS %d.%d.%d present.\n",
+ dmi_ver >> 16, (dmi_ver >> 8) & 0xFF,
+ dmi_ver & 0xFF);
+ dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
+ pr_info("DMI: %s\n", dmi_ids_string);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static void __init dmi_scan_machine(void)
+{
+ char __iomem *p, *q;
+ char buf[32];
+
+ if (efi_enabled(EFI_CONFIG_TABLES)) {
+ /*
+ * According to the DMTF SMBIOS reference spec v3.0.0, it is
+ * allowed to define both the 64-bit entry point (smbios3) and
+ * the 32-bit entry point (smbios), in which case they should
+ * either both point to the same SMBIOS structure table, or the
+ * table pointed to by the 64-bit entry point should contain a
+ * superset of the table contents pointed to by the 32-bit entry
+ * point (section 5.2)
+ * This implies that the 64-bit entry point should have
+ * precedence if it is defined and supported by the OS. If we
+ * have the 64-bit entry point, but fail to decode it, fall
+ * back to the legacy one (if available)
+ */
+ if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) {
+ p = dmi_early_remap(efi.smbios3, 32);
+ if (p == NULL)
+ goto error;
+ memcpy_fromio(buf, p, 32);
+ dmi_early_unmap(p, 32);
+
+ if (!dmi_smbios3_present(buf)) {
+ dmi_available = 1;
+ return;
+ }
+ }
+ if (efi.smbios == EFI_INVALID_TABLE_ADDR)
+ goto error;
+
+ /* This is called as a core_initcall() because it isn't
+ * needed during early boot. This also means we can
+ * iounmap the space when we're done with it.
+ */
+ p = dmi_early_remap(efi.smbios, 32);
+ if (p == NULL)
+ goto error;
+ memcpy_fromio(buf, p, 32);
+ dmi_early_unmap(p, 32);
+
+ if (!dmi_present(buf)) {
+ dmi_available = 1;
+ return;
+ }
+ } else if (IS_ENABLED(CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK)) {
+ p = dmi_early_remap(SMBIOS_ENTRY_POINT_SCAN_START, 0x10000);
+ if (p == NULL)
+ goto error;
+
+ /*
+ * Same logic as above, look for a 64-bit entry point
+ * first, and if not found, fall back to 32-bit entry point.
+ */
+ memcpy_fromio(buf, p, 16);
+ for (q = p + 16; q < p + 0x10000; q += 16) {
+ memcpy_fromio(buf + 16, q, 16);
+ if (!dmi_smbios3_present(buf)) {
+ dmi_available = 1;
+ dmi_early_unmap(p, 0x10000);
+ return;
+ }
+ memcpy(buf, buf + 16, 16);
+ }
+
+ /*
+ * Iterate over all possible DMI header addresses q.
+ * Maintain the 32 bytes around q in buf. On the
+ * first iteration, substitute zero for the
+ * out-of-range bytes so there is no chance of falsely
+ * detecting an SMBIOS header.
+ */
+ memset(buf, 0, 16);
+ for (q = p; q < p + 0x10000; q += 16) {
+ memcpy_fromio(buf + 16, q, 16);
+ if (!dmi_present(buf)) {
+ dmi_available = 1;
+ dmi_early_unmap(p, 0x10000);
+ return;
+ }
+ memcpy(buf, buf + 16, 16);
+ }
+ dmi_early_unmap(p, 0x10000);
+ }
+ error:
+ pr_info("DMI not present or invalid.\n");
+}
+
+static ssize_t raw_table_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count)
+{
+ memcpy(buf, attr->private + pos, count);
+ return count;
+}
+
+static BIN_ATTR(smbios_entry_point, S_IRUSR, raw_table_read, NULL, 0);
+static BIN_ATTR(DMI, S_IRUSR, raw_table_read, NULL, 0);
+
+static int __init dmi_init(void)
+{
+ struct kobject *tables_kobj;
+ u8 *dmi_table;
+ int ret = -ENOMEM;
+
+ if (!dmi_available)
+ return 0;
+
+ /*
+ * Set up dmi directory at /sys/firmware/dmi. This entry should stay
+ * even after farther error, as it can be used by other modules like
+ * dmi-sysfs.
+ */
+ dmi_kobj = kobject_create_and_add("dmi", firmware_kobj);
+ if (!dmi_kobj)
+ goto err;
+
+ tables_kobj = kobject_create_and_add("tables", dmi_kobj);
+ if (!tables_kobj)
+ goto err;
+
+ dmi_table = dmi_remap(dmi_base, dmi_len);
+ if (!dmi_table)
+ goto err_tables;
+
+ bin_attr_smbios_entry_point.size = smbios_entry_point_size;
+ bin_attr_smbios_entry_point.private = smbios_entry_point;
+ ret = sysfs_create_bin_file(tables_kobj, &bin_attr_smbios_entry_point);
+ if (ret)
+ goto err_unmap;
+
+ bin_attr_DMI.size = dmi_len;
+ bin_attr_DMI.private = dmi_table;
+ ret = sysfs_create_bin_file(tables_kobj, &bin_attr_DMI);
+ if (!ret)
+ return 0;
+
+ sysfs_remove_bin_file(tables_kobj,
+ &bin_attr_smbios_entry_point);
+ err_unmap:
+ dmi_unmap(dmi_table);
+ err_tables:
+ kobject_del(tables_kobj);
+ kobject_put(tables_kobj);
+ err:
+ pr_err("dmi: Firmware registration failed.\n");
+
+ return ret;
+}
+subsys_initcall(dmi_init);
+
+/**
+ * dmi_setup - scan and setup DMI system information
+ *
+ * Scan the DMI system information. This setups DMI identifiers
+ * (dmi_system_id) for printing it out on task dumps and prepares
+ * DIMM entry information (dmi_memdev_info) from the SMBIOS table
+ * for using this when reporting memory errors.
+ */
+void __init dmi_setup(void)
+{
+ dmi_scan_machine();
+ if (!dmi_available)
+ return;
+
+ dmi_memdev_walk();
+ dump_stack_set_arch_desc("%s", dmi_ids_string);
+}
+
+/**
+ * dmi_matches - check if dmi_system_id structure matches system DMI data
+ * @dmi: pointer to the dmi_system_id structure to check
+ */
+static bool dmi_matches(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dmi->matches); i++) {
+ int s = dmi->matches[i].slot;
+ if (s == DMI_NONE)
+ break;
+ if (s == DMI_OEM_STRING) {
+ /* DMI_OEM_STRING must be exact match */
+ const struct dmi_device *valid;
+
+ valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING,
+ dmi->matches[i].substr, NULL);
+ if (valid)
+ continue;
+ } else if (dmi_ident[s]) {
+ if (dmi->matches[i].exact_match) {
+ if (!strcmp(dmi_ident[s],
+ dmi->matches[i].substr))
+ continue;
+ } else {
+ if (strstr(dmi_ident[s],
+ dmi->matches[i].substr))
+ continue;
+ }
+ }
+
+ /* No match */
+ return false;
+ }
+ return true;
+}
+
+/**
+ * dmi_is_end_of_table - check for end-of-table marker
+ * @dmi: pointer to the dmi_system_id structure to check
+ */
+static bool dmi_is_end_of_table(const struct dmi_system_id *dmi)
+{
+ return dmi->matches[0].slot == DMI_NONE;
+}
+
+/**
+ * dmi_check_system - check system DMI data
+ * @list: array of dmi_system_id structures to match against
+ * All non-null elements of the list must match
+ * their slot's (field index's) data (i.e., each
+ * list string must be a substring of the specified
+ * DMI slot's string data) to be considered a
+ * successful match.
+ *
+ * Walk the blacklist table running matching functions until someone
+ * returns non zero or we hit the end. Callback function is called for
+ * each successful match. Returns the number of matches.
+ *
+ * dmi_setup must be called before this function is called.
+ */
+int dmi_check_system(const struct dmi_system_id *list)
+{
+ int count = 0;
+ const struct dmi_system_id *d;
+
+ for (d = list; !dmi_is_end_of_table(d); d++)
+ if (dmi_matches(d)) {
+ count++;
+ if (d->callback && d->callback(d))
+ break;
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(dmi_check_system);
+
+/**
+ * dmi_first_match - find dmi_system_id structure matching system DMI data
+ * @list: array of dmi_system_id structures to match against
+ * All non-null elements of the list must match
+ * their slot's (field index's) data (i.e., each
+ * list string must be a substring of the specified
+ * DMI slot's string data) to be considered a
+ * successful match.
+ *
+ * Walk the blacklist table until the first match is found. Return the
+ * pointer to the matching entry or NULL if there's no match.
+ *
+ * dmi_setup must be called before this function is called.
+ */
+const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list)
+{
+ const struct dmi_system_id *d;
+
+ for (d = list; !dmi_is_end_of_table(d); d++)
+ if (dmi_matches(d))
+ return d;
+
+ return NULL;
+}
+EXPORT_SYMBOL(dmi_first_match);
+
+/**
+ * dmi_get_system_info - return DMI data value
+ * @field: data index (see enum dmi_field)
+ *
+ * Returns one DMI data value, can be used to perform
+ * complex DMI data checks.
+ */
+const char *dmi_get_system_info(int field)
+{
+ return dmi_ident[field];
+}
+EXPORT_SYMBOL(dmi_get_system_info);
+
+/**
+ * dmi_name_in_serial - Check if string is in the DMI product serial information
+ * @str: string to check for
+ */
+int dmi_name_in_serial(const char *str)
+{
+ int f = DMI_PRODUCT_SERIAL;
+ if (dmi_ident[f] && strstr(dmi_ident[f], str))
+ return 1;
+ return 0;
+}
+
+/**
+ * dmi_name_in_vendors - Check if string is in the DMI system or board vendor name
+ * @str: Case sensitive Name
+ */
+int dmi_name_in_vendors(const char *str)
+{
+ static int fields[] = { DMI_SYS_VENDOR, DMI_BOARD_VENDOR, DMI_NONE };
+ int i;
+ for (i = 0; fields[i] != DMI_NONE; i++) {
+ int f = fields[i];
+ if (dmi_ident[f] && strstr(dmi_ident[f], str))
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(dmi_name_in_vendors);
+
+/**
+ * dmi_find_device - find onboard device by type/name
+ * @type: device type or %DMI_DEV_TYPE_ANY to match all device types
+ * @name: device name string or %NULL to match all
+ * @from: previous device found in search, or %NULL for new search.
+ *
+ * Iterates through the list of known onboard devices. If a device is
+ * found with a matching @type and @name, a pointer to its device
+ * structure is returned. Otherwise, %NULL is returned.
+ * A new search is initiated by passing %NULL as the @from argument.
+ * If @from is not %NULL, searches continue from next device.
+ */
+const struct dmi_device *dmi_find_device(int type, const char *name,
+ const struct dmi_device *from)
+{
+ const struct list_head *head = from ? &from->list : &dmi_devices;
+ struct list_head *d;
+
+ for (d = head->next; d != &dmi_devices; d = d->next) {
+ const struct dmi_device *dev =
+ list_entry(d, struct dmi_device, list);
+
+ if (((type == DMI_DEV_TYPE_ANY) || (dev->type == type)) &&
+ ((name == NULL) || (strcmp(dev->name, name) == 0)))
+ return dev;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(dmi_find_device);
+
+/**
+ * dmi_get_date - parse a DMI date
+ * @field: data index (see enum dmi_field)
+ * @yearp: optional out parameter for the year
+ * @monthp: optional out parameter for the month
+ * @dayp: optional out parameter for the day
+ *
+ * The date field is assumed to be in the form resembling
+ * [mm[/dd]]/yy[yy] and the result is stored in the out
+ * parameters any or all of which can be omitted.
+ *
+ * If the field doesn't exist, all out parameters are set to zero
+ * and false is returned. Otherwise, true is returned with any
+ * invalid part of date set to zero.
+ *
+ * On return, year, month and day are guaranteed to be in the
+ * range of [0,9999], [0,12] and [0,31] respectively.
+ */
+bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
+{
+ int year = 0, month = 0, day = 0;
+ bool exists;
+ const char *s, *y;
+ char *e;
+
+ s = dmi_get_system_info(field);
+ exists = s;
+ if (!exists)
+ goto out;
+
+ /*
+ * Determine year first. We assume the date string resembles
+ * mm/dd/yy[yy] but the original code extracted only the year
+ * from the end. Keep the behavior in the spirit of no
+ * surprises.
+ */
+ y = strrchr(s, '/');
+ if (!y)
+ goto out;
+
+ y++;
+ year = simple_strtoul(y, &e, 10);
+ if (y != e && year < 100) { /* 2-digit year */
+ year += 1900;
+ if (year < 1996) /* no dates < spec 1.0 */
+ year += 100;
+ }
+ if (year > 9999) /* year should fit in %04d */
+ year = 0;
+
+ /* parse the mm and dd */
+ month = simple_strtoul(s, &e, 10);
+ if (s == e || *e != '/' || !month || month > 12) {
+ month = 0;
+ goto out;
+ }
+
+ s = e + 1;
+ day = simple_strtoul(s, &e, 10);
+ if (s == y || s == e || *e != '/' || day > 31)
+ day = 0;
+out:
+ if (yearp)
+ *yearp = year;
+ if (monthp)
+ *monthp = month;
+ if (dayp)
+ *dayp = day;
+ return exists;
+}
+EXPORT_SYMBOL(dmi_get_date);
+
+/**
+ * dmi_get_bios_year - get a year out of DMI_BIOS_DATE field
+ *
+ * Returns year on success, -ENXIO if DMI is not selected,
+ * or a different negative error code if DMI field is not present
+ * or not parseable.
+ */
+int dmi_get_bios_year(void)
+{
+ bool exists;
+ int year;
+
+ exists = dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL);
+ if (!exists)
+ return -ENODATA;
+
+ return year ? year : -ERANGE;
+}
+EXPORT_SYMBOL(dmi_get_bios_year);
+
+/**
+ * dmi_walk - Walk the DMI table and get called back for every record
+ * @decode: Callback function
+ * @private_data: Private data to be passed to the callback function
+ *
+ * Returns 0 on success, -ENXIO if DMI is not selected or not present,
+ * or a different negative error code if DMI walking fails.
+ */
+int dmi_walk(void (*decode)(const struct dmi_header *, void *),
+ void *private_data)
+{
+ u8 *buf;
+
+ if (!dmi_available)
+ return -ENXIO;
+
+ buf = dmi_remap(dmi_base, dmi_len);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ dmi_decode_table(buf, decode, private_data);
+
+ dmi_unmap(buf);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dmi_walk);
+
+/**
+ * dmi_match - compare a string to the dmi field (if exists)
+ * @f: DMI field identifier
+ * @str: string to compare the DMI field to
+ *
+ * Returns true if the requested field equals to the str (including NULL).
+ */
+bool dmi_match(enum dmi_field f, const char *str)
+{
+ const char *info = dmi_get_system_info(f);
+
+ if (info == NULL || str == NULL)
+ return info == str;
+
+ return !strcmp(info, str);
+}
+EXPORT_SYMBOL_GPL(dmi_match);
+
+void dmi_memdev_name(u16 handle, const char **bank, const char **device)
+{
+ int n;
+
+ if (dmi_memdev == NULL)
+ return;
+
+ for (n = 0; n < dmi_memdev_nr; n++) {
+ if (handle == dmi_memdev[n].handle) {
+ *bank = dmi_memdev[n].bank;
+ *device = dmi_memdev[n].device;
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_name);
+
+u64 dmi_memdev_size(u16 handle)
+{
+ int n;
+
+ if (dmi_memdev) {
+ for (n = 0; n < dmi_memdev_nr; n++) {
+ if (handle == dmi_memdev[n].handle)
+ return dmi_memdev[n].size;
+ }
+ }
+ return ~0ull;
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_size);
+
+/**
+ * dmi_memdev_type - get the memory type
+ * @handle: DMI structure handle
+ *
+ * Return the DMI memory type of the module in the slot associated with the
+ * given DMI handle, or 0x0 if no such DMI handle exists.
+ */
+u8 dmi_memdev_type(u16 handle)
+{
+ int n;
+
+ if (dmi_memdev) {
+ for (n = 0; n < dmi_memdev_nr; n++) {
+ if (handle == dmi_memdev[n].handle)
+ return dmi_memdev[n].type;
+ }
+ }
+ return 0x0; /* Not a valid value */
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_type);
+
+/**
+ * dmi_memdev_handle - get the DMI handle of a memory slot
+ * @slot: slot number
+ *
+ * Return the DMI handle associated with a given memory slot, or %0xFFFF
+ * if there is no such slot.
+ */
+u16 dmi_memdev_handle(int slot)
+{
+ if (dmi_memdev && slot >= 0 && slot < dmi_memdev_nr)
+ return dmi_memdev[slot].handle;
+
+ return 0xffff; /* Not a valid value */
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_handle);
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
new file mode 100644
index 0000000000..55dec4eb2c
--- /dev/null
+++ b/drivers/firmware/edd.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/firmware/edd.c
+ * Copyright (C) 2002, 2003, 2004 Dell Inc.
+ * by Matt Domsch <Matt_Domsch@dell.com>
+ * disk signature by Matt Domsch, Andrew Wilks, and Sandeep K. Shandilya
+ * legacy CHS by Patrick J. LoPresti <patl@users.sourceforge.net>
+ *
+ * BIOS Enhanced Disk Drive Services (EDD)
+ * conformant to T13 Committee www.t13.org
+ * projects 1572D, 1484D, 1386D, 1226DT
+ *
+ * This code takes information provided by BIOS EDD calls
+ * fn41 - Check Extensions Present and
+ * fn48 - Get Device Parameters with EDD extensions
+ * made in setup.S, copied to safe structures in setup.c,
+ * and presents it in sysfs.
+ *
+ * Please see http://linux.dell.com/edd/results.html for
+ * the list of BIOSs which have been reported to implement EDD.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/limits.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/edd.h>
+
+#define EDD_VERSION "0.16"
+#define EDD_DATE "2004-Jun-25"
+
+MODULE_AUTHOR("Matt Domsch <Matt_Domsch@Dell.com>");
+MODULE_DESCRIPTION("sysfs interface to BIOS EDD information");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(EDD_VERSION);
+
+#define left (PAGE_SIZE - (p - buf) - 1)
+
+struct edd_device {
+ unsigned int index;
+ unsigned int mbr_signature;
+ struct edd_info *info;
+ struct kobject kobj;
+};
+
+struct edd_attribute {
+ struct attribute attr;
+ ssize_t(*show) (struct edd_device * edev, char *buf);
+ int (*test) (struct edd_device * edev);
+};
+
+/* forward declarations */
+static int edd_dev_is_type(struct edd_device *edev, const char *type);
+static struct pci_dev *edd_get_pci_dev(struct edd_device *edev);
+
+static struct edd_device *edd_devices[EDD_MBR_SIG_MAX];
+
+#define EDD_DEVICE_ATTR(_name,_mode,_show,_test) \
+struct edd_attribute edd_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .test = _test, \
+};
+
+static int
+edd_has_mbr_signature(struct edd_device *edev)
+{
+ return edev->index < min_t(unsigned char, edd.mbr_signature_nr, EDD_MBR_SIG_MAX);
+}
+
+static int
+edd_has_edd_info(struct edd_device *edev)
+{
+ return edev->index < min_t(unsigned char, edd.edd_info_nr, EDDMAXNR);
+}
+
+static inline struct edd_info *
+edd_dev_get_info(struct edd_device *edev)
+{
+ return edev->info;
+}
+
+static inline void
+edd_dev_set_info(struct edd_device *edev, int i)
+{
+ edev->index = i;
+ if (edd_has_mbr_signature(edev))
+ edev->mbr_signature = edd.mbr_signature[i];
+ if (edd_has_edd_info(edev))
+ edev->info = &edd.edd_info[i];
+}
+
+#define to_edd_attr(_attr) container_of(_attr,struct edd_attribute,attr)
+#define to_edd_device(obj) container_of(obj,struct edd_device,kobj)
+
+static ssize_t
+edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
+{
+ struct edd_device *dev = to_edd_device(kobj);
+ struct edd_attribute *edd_attr = to_edd_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (edd_attr->show)
+ ret = edd_attr->show(dev, buf);
+ return ret;
+}
+
+static const struct sysfs_ops edd_attr_ops = {
+ .show = edd_attr_show,
+};
+
+static ssize_t
+edd_show_host_bus(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ int i;
+
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ for (i = 0; i < 4; i++) {
+ if (isprint(info->params.host_bus_type[i])) {
+ p += scnprintf(p, left, "%c", info->params.host_bus_type[i]);
+ } else {
+ p += scnprintf(p, left, " ");
+ }
+ }
+
+ if (!strncmp(info->params.host_bus_type, "ISA", 3)) {
+ p += scnprintf(p, left, "\tbase_address: %x\n",
+ info->params.interface_path.isa.base_address);
+ } else if (!strncmp(info->params.host_bus_type, "PCIX", 4) ||
+ !strncmp(info->params.host_bus_type, "PCI", 3) ||
+ !strncmp(info->params.host_bus_type, "XPRS", 4)) {
+ p += scnprintf(p, left,
+ "\t%02x:%02x.%d channel: %u\n",
+ info->params.interface_path.pci.bus,
+ info->params.interface_path.pci.slot,
+ info->params.interface_path.pci.function,
+ info->params.interface_path.pci.channel);
+ } else if (!strncmp(info->params.host_bus_type, "IBND", 4) ||
+ !strncmp(info->params.host_bus_type, "HTPT", 4)) {
+ p += scnprintf(p, left,
+ "\tTBD: %llx\n",
+ info->params.interface_path.ibnd.reserved);
+
+ } else {
+ p += scnprintf(p, left, "\tunknown: %llx\n",
+ info->params.interface_path.unknown.reserved);
+ }
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_interface(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ int i;
+
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ for (i = 0; i < 8; i++) {
+ if (isprint(info->params.interface_type[i])) {
+ p += scnprintf(p, left, "%c", info->params.interface_type[i]);
+ } else {
+ p += scnprintf(p, left, " ");
+ }
+ }
+ if (!strncmp(info->params.interface_type, "ATAPI", 5)) {
+ p += scnprintf(p, left, "\tdevice: %u lun: %u\n",
+ info->params.device_path.atapi.device,
+ info->params.device_path.atapi.lun);
+ } else if (!strncmp(info->params.interface_type, "ATA", 3)) {
+ p += scnprintf(p, left, "\tdevice: %u\n",
+ info->params.device_path.ata.device);
+ } else if (!strncmp(info->params.interface_type, "SCSI", 4)) {
+ p += scnprintf(p, left, "\tid: %u lun: %llu\n",
+ info->params.device_path.scsi.id,
+ info->params.device_path.scsi.lun);
+ } else if (!strncmp(info->params.interface_type, "USB", 3)) {
+ p += scnprintf(p, left, "\tserial_number: %llx\n",
+ info->params.device_path.usb.serial_number);
+ } else if (!strncmp(info->params.interface_type, "1394", 4)) {
+ p += scnprintf(p, left, "\teui: %llx\n",
+ info->params.device_path.i1394.eui);
+ } else if (!strncmp(info->params.interface_type, "FIBRE", 5)) {
+ p += scnprintf(p, left, "\twwid: %llx lun: %llx\n",
+ info->params.device_path.fibre.wwid,
+ info->params.device_path.fibre.lun);
+ } else if (!strncmp(info->params.interface_type, "I2O", 3)) {
+ p += scnprintf(p, left, "\tidentity_tag: %llx\n",
+ info->params.device_path.i2o.identity_tag);
+ } else if (!strncmp(info->params.interface_type, "RAID", 4)) {
+ p += scnprintf(p, left, "\tidentity_tag: %x\n",
+ info->params.device_path.raid.array_number);
+ } else if (!strncmp(info->params.interface_type, "SATA", 4)) {
+ p += scnprintf(p, left, "\tdevice: %u\n",
+ info->params.device_path.sata.device);
+ } else {
+ p += scnprintf(p, left, "\tunknown: %llx %llx\n",
+ info->params.device_path.unknown.reserved1,
+ info->params.device_path.unknown.reserved2);
+ }
+
+ return (p - buf);
+}
+
+/**
+ * edd_show_raw_data() - copies raw data to buffer for userspace to parse
+ * @edev: target edd_device
+ * @buf: output buffer
+ *
+ * Returns: number of bytes written, or -EINVAL on failure
+ */
+static ssize_t
+edd_show_raw_data(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ ssize_t len = sizeof (info->params);
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE))
+ len = info->params.length;
+
+ /* In case of buggy BIOSs */
+ if (len > (sizeof(info->params)))
+ len = sizeof(info->params);
+
+ memcpy(buf, &info->params, len);
+ return len;
+}
+
+static ssize_t
+edd_show_version(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ p += scnprintf(p, left, "0x%02x\n", info->version);
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_mbr_signature(struct edd_device *edev, char *buf)
+{
+ char *p = buf;
+ p += scnprintf(p, left, "0x%08x\n", edev->mbr_signature);
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_extensions(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ if (info->interface_support & EDD_EXT_FIXED_DISK_ACCESS) {
+ p += scnprintf(p, left, "Fixed disk access\n");
+ }
+ if (info->interface_support & EDD_EXT_DEVICE_LOCKING_AND_EJECTING) {
+ p += scnprintf(p, left, "Device locking and ejecting\n");
+ }
+ if (info->interface_support & EDD_EXT_ENHANCED_DISK_DRIVE_SUPPORT) {
+ p += scnprintf(p, left, "Enhanced Disk Drive support\n");
+ }
+ if (info->interface_support & EDD_EXT_64BIT_EXTENSIONS) {
+ p += scnprintf(p, left, "64-bit extensions\n");
+ }
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_info_flags(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ if (info->params.info_flags & EDD_INFO_DMA_BOUNDARY_ERROR_TRANSPARENT)
+ p += scnprintf(p, left, "DMA boundary error transparent\n");
+ if (info->params.info_flags & EDD_INFO_GEOMETRY_VALID)
+ p += scnprintf(p, left, "geometry valid\n");
+ if (info->params.info_flags & EDD_INFO_REMOVABLE)
+ p += scnprintf(p, left, "removable\n");
+ if (info->params.info_flags & EDD_INFO_WRITE_VERIFY)
+ p += scnprintf(p, left, "write verify\n");
+ if (info->params.info_flags & EDD_INFO_MEDIA_CHANGE_NOTIFICATION)
+ p += scnprintf(p, left, "media change notification\n");
+ if (info->params.info_flags & EDD_INFO_LOCKABLE)
+ p += scnprintf(p, left, "lockable\n");
+ if (info->params.info_flags & EDD_INFO_NO_MEDIA_PRESENT)
+ p += scnprintf(p, left, "no media present\n");
+ if (info->params.info_flags & EDD_INFO_USE_INT13_FN50)
+ p += scnprintf(p, left, "use int13 fn50\n");
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_legacy_max_cylinder(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ p += scnprintf(p, left, "%u\n", info->legacy_max_cylinder);
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_legacy_max_head(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ p += scnprintf(p, left, "%u\n", info->legacy_max_head);
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_legacy_sectors_per_track(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ p += scnprintf(p, left, "%u\n", info->legacy_sectors_per_track);
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_default_cylinders(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ p += scnprintf(p, left, "%u\n", info->params.num_default_cylinders);
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_default_heads(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ p += scnprintf(p, left, "%u\n", info->params.num_default_heads);
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_default_sectors_per_track(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ p += scnprintf(p, left, "%u\n", info->params.sectors_per_track);
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_sectors(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ p += scnprintf(p, left, "%llu\n", info->params.number_of_sectors);
+ return (p - buf);
+}
+
+
+/*
+ * Some device instances may not have all the above attributes,
+ * or the attribute values may be meaningless (i.e. if
+ * the device is < EDD 3.0, it won't have host_bus and interface
+ * information), so don't bother making files for them. Likewise
+ * if the default_{cylinders,heads,sectors_per_track} values
+ * are zero, the BIOS doesn't provide sane values, don't bother
+ * creating files for them either.
+ */
+
+static int
+edd_has_legacy_max_cylinder(struct edd_device *edev)
+{
+ struct edd_info *info;
+ if (!edev)
+ return 0;
+ info = edd_dev_get_info(edev);
+ if (!info)
+ return 0;
+ return info->legacy_max_cylinder > 0;
+}
+
+static int
+edd_has_legacy_max_head(struct edd_device *edev)
+{
+ struct edd_info *info;
+ if (!edev)
+ return 0;
+ info = edd_dev_get_info(edev);
+ if (!info)
+ return 0;
+ return info->legacy_max_head > 0;
+}
+
+static int
+edd_has_legacy_sectors_per_track(struct edd_device *edev)
+{
+ struct edd_info *info;
+ if (!edev)
+ return 0;
+ info = edd_dev_get_info(edev);
+ if (!info)
+ return 0;
+ return info->legacy_sectors_per_track > 0;
+}
+
+static int
+edd_has_default_cylinders(struct edd_device *edev)
+{
+ struct edd_info *info;
+ if (!edev)
+ return 0;
+ info = edd_dev_get_info(edev);
+ if (!info)
+ return 0;
+ return info->params.num_default_cylinders > 0;
+}
+
+static int
+edd_has_default_heads(struct edd_device *edev)
+{
+ struct edd_info *info;
+ if (!edev)
+ return 0;
+ info = edd_dev_get_info(edev);
+ if (!info)
+ return 0;
+ return info->params.num_default_heads > 0;
+}
+
+static int
+edd_has_default_sectors_per_track(struct edd_device *edev)
+{
+ struct edd_info *info;
+ if (!edev)
+ return 0;
+ info = edd_dev_get_info(edev);
+ if (!info)
+ return 0;
+ return info->params.sectors_per_track > 0;
+}
+
+static int
+edd_has_edd30(struct edd_device *edev)
+{
+ struct edd_info *info;
+ int i;
+ u8 csum = 0;
+
+ if (!edev)
+ return 0;
+ info = edd_dev_get_info(edev);
+ if (!info)
+ return 0;
+
+ if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE)) {
+ return 0;
+ }
+
+
+ /* We support only T13 spec */
+ if (info->params.device_path_info_length != 44)
+ return 0;
+
+ for (i = 30; i < info->params.device_path_info_length + 30; i++)
+ csum += *(((u8 *)&info->params) + i);
+
+ if (csum)
+ return 0;
+
+ return 1;
+}
+
+
+static EDD_DEVICE_ATTR(raw_data, 0444, edd_show_raw_data, edd_has_edd_info);
+static EDD_DEVICE_ATTR(version, 0444, edd_show_version, edd_has_edd_info);
+static EDD_DEVICE_ATTR(extensions, 0444, edd_show_extensions, edd_has_edd_info);
+static EDD_DEVICE_ATTR(info_flags, 0444, edd_show_info_flags, edd_has_edd_info);
+static EDD_DEVICE_ATTR(sectors, 0444, edd_show_sectors, edd_has_edd_info);
+static EDD_DEVICE_ATTR(legacy_max_cylinder, 0444,
+ edd_show_legacy_max_cylinder,
+ edd_has_legacy_max_cylinder);
+static EDD_DEVICE_ATTR(legacy_max_head, 0444, edd_show_legacy_max_head,
+ edd_has_legacy_max_head);
+static EDD_DEVICE_ATTR(legacy_sectors_per_track, 0444,
+ edd_show_legacy_sectors_per_track,
+ edd_has_legacy_sectors_per_track);
+static EDD_DEVICE_ATTR(default_cylinders, 0444, edd_show_default_cylinders,
+ edd_has_default_cylinders);
+static EDD_DEVICE_ATTR(default_heads, 0444, edd_show_default_heads,
+ edd_has_default_heads);
+static EDD_DEVICE_ATTR(default_sectors_per_track, 0444,
+ edd_show_default_sectors_per_track,
+ edd_has_default_sectors_per_track);
+static EDD_DEVICE_ATTR(interface, 0444, edd_show_interface, edd_has_edd30);
+static EDD_DEVICE_ATTR(host_bus, 0444, edd_show_host_bus, edd_has_edd30);
+static EDD_DEVICE_ATTR(mbr_signature, 0444, edd_show_mbr_signature, edd_has_mbr_signature);
+
+/* These attributes are conditional and only added for some devices. */
+static struct edd_attribute * edd_attrs[] = {
+ &edd_attr_raw_data,
+ &edd_attr_version,
+ &edd_attr_extensions,
+ &edd_attr_info_flags,
+ &edd_attr_sectors,
+ &edd_attr_legacy_max_cylinder,
+ &edd_attr_legacy_max_head,
+ &edd_attr_legacy_sectors_per_track,
+ &edd_attr_default_cylinders,
+ &edd_attr_default_heads,
+ &edd_attr_default_sectors_per_track,
+ &edd_attr_interface,
+ &edd_attr_host_bus,
+ &edd_attr_mbr_signature,
+ NULL,
+};
+
+/**
+ * edd_release - free edd structure
+ * @kobj: kobject of edd structure
+ *
+ * This is called when the refcount of the edd structure
+ * reaches 0. This should happen right after we unregister,
+ * but just in case, we use the release callback anyway.
+ */
+
+static void edd_release(struct kobject * kobj)
+{
+ struct edd_device * dev = to_edd_device(kobj);
+ kfree(dev);
+}
+
+static const struct kobj_type edd_ktype = {
+ .release = edd_release,
+ .sysfs_ops = &edd_attr_ops,
+};
+
+static struct kset *edd_kset;
+
+
+/**
+ * edd_dev_is_type() - is this EDD device a 'type' device?
+ * @edev: target edd_device
+ * @type: a host bus or interface identifier string per the EDD spec
+ *
+ * Returns 1 (TRUE) if it is a 'type' device, 0 otherwise.
+ */
+static int
+edd_dev_is_type(struct edd_device *edev, const char *type)
+{
+ struct edd_info *info;
+ if (!edev)
+ return 0;
+ info = edd_dev_get_info(edev);
+
+ if (type && info) {
+ if (!strncmp(info->params.host_bus_type, type, strlen(type)) ||
+ !strncmp(info->params.interface_type, type, strlen(type)))
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * edd_get_pci_dev() - finds pci_dev that matches edev
+ * @edev: edd_device
+ *
+ * Returns pci_dev if found, or NULL
+ */
+static struct pci_dev *
+edd_get_pci_dev(struct edd_device *edev)
+{
+ struct edd_info *info = edd_dev_get_info(edev);
+
+ if (edd_dev_is_type(edev, "PCI") || edd_dev_is_type(edev, "XPRS")) {
+ return pci_get_domain_bus_and_slot(0,
+ info->params.interface_path.pci.bus,
+ PCI_DEVFN(info->params.interface_path.pci.slot,
+ info->params.interface_path.pci.function));
+ }
+ return NULL;
+}
+
+static int
+edd_create_symlink_to_pcidev(struct edd_device *edev)
+{
+
+ struct pci_dev *pci_dev = edd_get_pci_dev(edev);
+ int ret;
+ if (!pci_dev)
+ return 1;
+ ret = sysfs_create_link(&edev->kobj,&pci_dev->dev.kobj,"pci_dev");
+ pci_dev_put(pci_dev);
+ return ret;
+}
+
+static inline void
+edd_device_unregister(struct edd_device *edev)
+{
+ kobject_put(&edev->kobj);
+}
+
+static void edd_populate_dir(struct edd_device * edev)
+{
+ struct edd_attribute * attr;
+ int error = 0;
+ int i;
+
+ for (i = 0; (attr = edd_attrs[i]) && !error; i++) {
+ if (!attr->test || attr->test(edev))
+ error = sysfs_create_file(&edev->kobj,&attr->attr);
+ }
+
+ if (!error) {
+ edd_create_symlink_to_pcidev(edev);
+ }
+}
+
+static int
+edd_device_register(struct edd_device *edev, int i)
+{
+ int error;
+
+ if (!edev)
+ return 1;
+ edd_dev_set_info(edev, i);
+ edev->kobj.kset = edd_kset;
+ error = kobject_init_and_add(&edev->kobj, &edd_ktype, NULL,
+ "int13_dev%02x", 0x80 + i);
+ if (!error) {
+ edd_populate_dir(edev);
+ kobject_uevent(&edev->kobj, KOBJ_ADD);
+ }
+ return error;
+}
+
+static inline int edd_num_devices(void)
+{
+ return max_t(unsigned char,
+ min_t(unsigned char, EDD_MBR_SIG_MAX, edd.mbr_signature_nr),
+ min_t(unsigned char, EDDMAXNR, edd.edd_info_nr));
+}
+
+/**
+ * edd_init() - creates sysfs tree of EDD data
+ */
+static int __init
+edd_init(void)
+{
+ int i;
+ int rc=0;
+ struct edd_device *edev;
+
+ if (!edd_num_devices())
+ return -ENODEV;
+
+ printk(KERN_INFO "BIOS EDD facility v%s %s, %d devices found\n",
+ EDD_VERSION, EDD_DATE, edd_num_devices());
+
+ edd_kset = kset_create_and_add("edd", NULL, firmware_kobj);
+ if (!edd_kset)
+ return -ENOMEM;
+
+ for (i = 0; i < edd_num_devices(); i++) {
+ edev = kzalloc(sizeof (*edev), GFP_KERNEL);
+ if (!edev) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = edd_device_register(edev, i);
+ if (rc) {
+ kfree(edev);
+ goto out;
+ }
+ edd_devices[i] = edev;
+ }
+
+ return 0;
+
+out:
+ while (--i >= 0)
+ edd_device_unregister(edd_devices[i]);
+ kset_unregister(edd_kset);
+ return rc;
+}
+
+static void __exit
+edd_exit(void)
+{
+ int i;
+ struct edd_device *edev;
+
+ for (i = 0; i < edd_num_devices(); i++) {
+ if ((edev = edd_devices[i]))
+ edd_device_unregister(edev);
+ }
+ kset_unregister(edd_kset);
+}
+
+late_initcall(edd_init);
+module_exit(edd_exit);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
new file mode 100644
index 0000000000..231f1c70d1
--- /dev/null
+++ b/drivers/firmware/efi/Kconfig
@@ -0,0 +1,303 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "EFI (Extensible Firmware Interface) Support"
+ depends on EFI
+
+config EFI_ESRT
+ bool
+ depends on EFI && !IA64
+ default y
+
+config EFI_VARS_PSTORE
+ tristate "Register efivars backend for pstore"
+ depends on PSTORE
+ select UCS2_STRING
+ default y
+ help
+ Say Y here to enable use efivars as a backend to pstore. This
+ will allow writing console messages, crash dumps, or anything
+ else supported by pstore to EFI variables.
+
+config EFI_VARS_PSTORE_DEFAULT_DISABLE
+ bool "Disable using efivars as a pstore backend by default"
+ depends on EFI_VARS_PSTORE
+ default n
+ help
+ Saying Y here will disable the use of efivars as a storage
+ backend for pstore by default. This setting can be overridden
+ using the efivars module's pstore_disable parameter.
+
+config EFI_SOFT_RESERVE
+ bool "Reserve EFI Specific Purpose Memory"
+ depends on EFI && EFI_STUB && ACPI_HMAT
+ default ACPI_HMAT
+ help
+ On systems that have mixed performance classes of memory EFI
+ may indicate specific purpose memory with an attribute (See
+ EFI_MEMORY_SP in UEFI 2.8). A memory range tagged with this
+ attribute may have unique performance characteristics compared
+ to the system's general purpose "System RAM" pool. On the
+ expectation that such memory has application specific usage,
+ and its base EFI memory type is "conventional" answer Y to
+ arrange for the kernel to reserve it as a "Soft Reserved"
+ resource, and set aside for direct-access (device-dax) by
+ default. The memory range can later be optionally assigned to
+ the page allocator by system administrator policy via the
+ device-dax kmem facility. Say N to have the kernel treat this
+ memory as "System RAM" by default.
+
+ If unsure, say Y.
+
+config EFI_DXE_MEM_ATTRIBUTES
+ bool "Adjust memory attributes in EFISTUB"
+ depends on EFI && EFI_STUB && X86
+ default y
+ help
+ UEFI specification does not guarantee all memory to be
+ accessible for both write and execute as the kernel expects
+ it to be.
+ Use DXE services to check and alter memory protection
+ attributes during boot via EFISTUB to ensure that memory
+ ranges used by the kernel are writable and executable.
+
+config EFI_PARAMS_FROM_FDT
+ bool
+ help
+ Select this config option from the architecture Kconfig if
+ the EFI runtime support gets system table address, memory
+ map address, and other parameters from the device tree.
+
+config EFI_RUNTIME_WRAPPERS
+ bool
+
+config EFI_GENERIC_STUB
+ bool
+
+config EFI_ZBOOT
+ bool "Enable the generic EFI decompressor"
+ depends on EFI_GENERIC_STUB && !ARM
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZ4
+ select HAVE_KERNEL_LZMA
+ select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_XZ
+ select HAVE_KERNEL_ZSTD
+ help
+ Create the bootable image as an EFI application that carries the
+ actual kernel image in compressed form, and decompresses it into
+ memory before executing it via LoadImage/StartImage EFI boot service
+ calls. For compatibility with non-EFI loaders, the payload can be
+ decompressed and executed by the loader as well, provided that the
+ loader implements the decompression algorithm and that non-EFI boot
+ is supported by the encapsulated image. (The compression algorithm
+ used is described in the zboot image header)
+
+config EFI_ARMSTUB_DTB_LOADER
+ bool "Enable the DTB loader"
+ depends on EFI_GENERIC_STUB && !RISCV && !LOONGARCH
+ default y
+ help
+ Select this config option to add support for the dtb= command
+ line parameter, allowing a device tree blob to be loaded into
+ memory from the EFI System Partition by the stub.
+
+ If the device tree is provided by the platform or by
+ the bootloader this option may not be needed.
+ But, for various development reasons and to maintain existing
+ functionality for bootloaders that do not have such support
+ this option is necessary.
+
+config EFI_BOOTLOADER_CONTROL
+ tristate "EFI Bootloader Control"
+ select UCS2_STRING
+ default n
+ help
+ This module installs a reboot hook, such that if reboot() is
+ invoked with a string argument NNN, "NNN" is copied to the
+ "LoaderEntryOneShot" EFI variable, to be read by the
+ bootloader. If the string matches one of the boot labels
+ defined in its configuration, the bootloader will boot once
+ to that label. The "LoaderEntryRebootReason" EFI variable is
+ set with the reboot reason: "reboot" or "shutdown". The
+ bootloader reads this reboot reason and takes particular
+ action according to its policy.
+
+config EFI_CAPSULE_LOADER
+ tristate "EFI capsule loader"
+ depends on EFI && !IA64
+ help
+ This option exposes a loader interface "/dev/efi_capsule_loader" for
+ users to load EFI capsules. This driver requires working runtime
+ capsule support in the firmware, which many OEMs do not provide.
+
+ Most users should say N.
+
+config EFI_CAPSULE_QUIRK_QUARK_CSH
+ bool "Add support for Quark capsules with non-standard headers"
+ depends on X86 && !64BIT
+ select EFI_CAPSULE_LOADER
+ default y
+ help
+ Add support for processing Quark X1000 EFI capsules, whose header
+ layout deviates from the layout mandated by the UEFI specification.
+
+config EFI_TEST
+ tristate "EFI Runtime Service Tests Support"
+ depends on EFI
+ default n
+ help
+ This driver uses the efi.<service> function pointers directly instead
+ of going through the efivar API, because it is not trying to test the
+ kernel subsystem, just for testing the UEFI runtime service
+ interfaces which are provided by the firmware. This driver is used
+ by the Firmware Test Suite (FWTS) for testing the UEFI runtime
+ interfaces readiness of the firmware.
+ Details for FWTS are available from:
+ <https://wiki.ubuntu.com/FirmwareTestSuite>
+
+ Say Y here to enable the runtime services support via /dev/efi_test.
+ If unsure, say N.
+
+config EFI_DEV_PATH_PARSER
+ bool
+
+config APPLE_PROPERTIES
+ bool "Apple Device Properties"
+ depends on EFI_STUB && X86
+ select EFI_DEV_PATH_PARSER
+ select UCS2_STRING
+ help
+ Retrieve properties from EFI on Apple Macs and assign them to
+ devices, allowing for improved support of Apple hardware.
+ Properties that would otherwise be missing include the
+ Thunderbolt Device ROM and GPU configuration data.
+
+ If unsure, say Y if you have a Mac. Otherwise N.
+
+config RESET_ATTACK_MITIGATION
+ bool "Reset memory attack mitigation"
+ depends on EFI_STUB
+ help
+ Request that the firmware clear the contents of RAM after a reboot
+ using the TCG Platform Reset Attack Mitigation specification. This
+ protects against an attacker forcibly rebooting the system while it
+ still contains secrets in RAM, booting another OS and extracting the
+ secrets. This should only be enabled when userland is configured to
+ clear the MemoryOverwriteRequest flag on clean shutdown after secrets
+ have been evicted, since otherwise it will trigger even on clean
+ reboots.
+
+config EFI_RCI2_TABLE
+ bool "EFI Runtime Configuration Interface Table Version 2 Support"
+ depends on X86 || COMPILE_TEST
+ help
+ Displays the content of the Runtime Configuration Interface
+ Table version 2 on Dell EMC PowerEdge systems as a binary
+ attribute 'rci2' under /sys/firmware/efi/tables directory.
+
+ RCI2 table contains BIOS HII in XML format and is used to populate
+ BIOS setup page in Dell EMC OpenManage Server Administrator tool.
+ The BIOS setup page contains BIOS tokens which can be configured.
+
+ Say Y here for Dell EMC PowerEdge systems.
+
+config EFI_DISABLE_PCI_DMA
+ bool "Clear Busmaster bit on PCI bridges during ExitBootServices()"
+ help
+ Disable the busmaster bit in the control register on all PCI bridges
+ while calling ExitBootServices() and passing control to the runtime
+ kernel. System firmware may configure the IOMMU to prevent malicious
+ PCI devices from being able to attack the OS via DMA. However, since
+ firmware can't guarantee that the OS is IOMMU-aware, it will tear
+ down IOMMU configuration when ExitBootServices() is called. This
+ leaves a window between where a hostile device could still cause
+ damage before Linux configures the IOMMU again.
+
+ If you say Y here, the EFI stub will clear the busmaster bit on all
+ PCI bridges before ExitBootServices() is called. This will prevent
+ any malicious PCI devices from being able to perform DMA until the
+ kernel reenables busmastering after configuring the IOMMU.
+
+ This option will cause failures with some poorly behaved hardware
+ and should not be enabled without testing. The kernel commandline
+ options "efi=disable_early_pci_dma" or "efi=no_disable_early_pci_dma"
+ may be used to override this option.
+
+config EFI_EARLYCON
+ def_bool y
+ depends on SERIAL_EARLYCON && !ARM && !IA64
+ select FONT_SUPPORT
+ select ARCH_USE_MEMREMAP_PROT
+
+config EFI_CUSTOM_SSDT_OVERLAYS
+ bool "Load custom ACPI SSDT overlay from an EFI variable"
+ depends on ACPI
+ default ACPI_TABLE_UPGRADE
+ help
+ Allow loading of an ACPI SSDT overlay from an EFI variable specified
+ by a kernel command line option.
+
+ See Documentation/admin-guide/acpi/ssdt-overlays.rst for more
+ information.
+
+config EFI_DISABLE_RUNTIME
+ bool "Disable EFI runtime services support by default"
+ default y if PREEMPT_RT
+ help
+ Allow to disable the EFI runtime services support by default. This can
+ already be achieved by using the efi=noruntime option, but it could be
+ useful to have this default without any kernel command line parameter.
+
+ The EFI runtime services are disabled by default when PREEMPT_RT is
+ enabled, because measurements have shown that some EFI functions calls
+ might take too much time to complete, causing large latencies which is
+ an issue for Real-Time kernels.
+
+ This default can be overridden by using the efi=runtime option.
+
+config EFI_COCO_SECRET
+ bool "EFI Confidential Computing Secret Area Support"
+ help
+ Confidential Computing platforms (such as AMD SEV) allow the
+ Guest Owner to securely inject secrets during guest VM launch.
+ The secrets are placed in a designated EFI reserved memory area.
+
+ In order to use the secrets in the kernel, the location of the secret
+ area (as published in the EFI config table) must be kept.
+
+ If you say Y here, the address of the EFI secret area will be kept
+ for usage inside the kernel. This will allow the
+ virt/coco/efi_secret module to access the secrets, which in turn
+ allows userspace programs to access the injected secrets.
+
+config UNACCEPTED_MEMORY
+ bool
+ depends on EFI_STUB
+ help
+ Some Virtual Machine platforms, such as Intel TDX, require
+ some memory to be "accepted" by the guest before it can be used.
+ This mechanism helps prevent malicious hosts from making changes
+ to guest memory.
+
+ UEFI specification v2.9 introduced EFI_UNACCEPTED_MEMORY memory type.
+
+ This option adds support for unaccepted memory and makes such memory
+ usable by the kernel.
+
+config EFI_EMBEDDED_FIRMWARE
+ bool
+ select CRYPTO_LIB_SHA256
+
+endmenu
+
+config UEFI_CPER
+ bool
+
+config UEFI_CPER_ARM
+ bool
+ depends on UEFI_CPER && ( ARM || ARM64 )
+ default y
+
+config UEFI_CPER_X86
+ bool
+ depends on UEFI_CPER && X86
+ default y
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
new file mode 100644
index 0000000000..e489fefd23
--- /dev/null
+++ b/drivers/firmware/efi/Makefile
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux kernel
+#
+
+#
+# ARM64 maps efi runtime services in userspace addresses
+# which don't have KASAN shadow. So dereference of these addresses
+# in efi_call_virt() will cause crash if this code instrumented.
+#
+KASAN_SANITIZE_runtime-wrappers.o := n
+
+obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
+obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o tpm.o
+obj-$(CONFIG_EFI) += memmap.o
+ifneq ($(CONFIG_EFI_CAPSULE_LOADER),)
+obj-$(CONFIG_EFI) += capsule.o
+endif
+obj-$(CONFIG_EFI_PARAMS_FROM_FDT) += fdtparams.o
+obj-$(CONFIG_EFI_ESRT) += esrt.o
+obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
+obj-$(CONFIG_UEFI_CPER) += cper.o cper_cxl.o
+obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o
+subdir-$(CONFIG_EFI_STUB) += libstub
+obj-$(CONFIG_EFI_BOOTLOADER_CONTROL) += efibc.o
+obj-$(CONFIG_EFI_TEST) += test/
+obj-$(CONFIG_EFI_DEV_PATH_PARSER) += dev-path-parser.o
+obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
+obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o
+obj-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += embedded-firmware.o
+obj-$(CONFIG_LOAD_UEFI_KEYS) += mokvar-table.o
+
+obj-$(CONFIG_SYSFB) += sysfb_efi.o
+
+arm-obj-$(CONFIG_EFI) := efi-init.o arm-runtime.o
+obj-$(CONFIG_ARM) += $(arm-obj-y)
+obj-$(CONFIG_ARM64) += $(arm-obj-y)
+riscv-obj-$(CONFIG_EFI) := efi-init.o riscv-runtime.o
+obj-$(CONFIG_RISCV) += $(riscv-obj-y)
+obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o
+obj-$(CONFIG_EFI_EARLYCON) += earlycon.o
+obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o
+obj-$(CONFIG_UEFI_CPER_X86) += cper-x86.o
+obj-$(CONFIG_UNACCEPTED_MEMORY) += unaccepted_memory.o
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
new file mode 100644
index 0000000000..ea84108035
--- /dev/null
+++ b/drivers/firmware/efi/apple-properties.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * apple-properties.c - EFI device properties on Macs
+ * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
+ *
+ * Properties are stored either as:
+ * u8 arrays which can be retrieved with device_property_read_u8_array() or
+ * booleans which can be queried with device_property_present().
+ */
+
+#define pr_fmt(fmt) "apple-properties: " fmt
+
+#include <linux/memblock.h>
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/platform_data/x86/apple.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/ucs2_string.h>
+#include <asm/setup.h>
+
+static bool dump_properties __initdata;
+
+static int __init dump_properties_enable(char *arg)
+{
+ dump_properties = true;
+ return 1;
+}
+
+__setup("dump_apple_properties", dump_properties_enable);
+
+struct dev_header {
+ u32 len;
+ u32 prop_count;
+ struct efi_dev_path path[];
+ /*
+ * followed by key/value pairs, each key and value preceded by u32 len,
+ * len includes itself, value may be empty (in which case its len is 4)
+ */
+};
+
+struct properties_header {
+ u32 len;
+ u32 version;
+ u32 dev_count;
+ struct dev_header dev_header[];
+};
+
+static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
+ struct device *dev, const void *ptr,
+ struct property_entry entry[])
+{
+ int i;
+
+ for (i = 0; i < dev_header->prop_count; i++) {
+ int remaining = dev_header->len - (ptr - (void *)dev_header);
+ u32 key_len, val_len, entry_len;
+ const u8 *entry_data;
+ char *key;
+
+ if (sizeof(key_len) > remaining)
+ break;
+
+ key_len = *(typeof(key_len) *)ptr;
+ if (key_len + sizeof(val_len) > remaining ||
+ key_len < sizeof(key_len) + sizeof(efi_char16_t) ||
+ *(efi_char16_t *)(ptr + sizeof(key_len)) == 0) {
+ dev_err(dev, "invalid property name len at %#zx\n",
+ ptr - (void *)dev_header);
+ break;
+ }
+
+ val_len = *(typeof(val_len) *)(ptr + key_len);
+ if (key_len + val_len > remaining ||
+ val_len < sizeof(val_len)) {
+ dev_err(dev, "invalid property val len at %#zx\n",
+ ptr - (void *)dev_header + key_len);
+ break;
+ }
+
+ /* 4 bytes to accommodate UTF-8 code points + null byte */
+ key = kzalloc((key_len - sizeof(key_len)) * 4 + 1, GFP_KERNEL);
+ if (!key) {
+ dev_err(dev, "cannot allocate property name\n");
+ break;
+ }
+ ucs2_as_utf8(key, ptr + sizeof(key_len),
+ key_len - sizeof(key_len));
+
+ entry_data = ptr + key_len + sizeof(val_len);
+ entry_len = val_len - sizeof(val_len);
+ if (entry_len)
+ entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
+ entry_len);
+ else
+ entry[i] = PROPERTY_ENTRY_BOOL(key);
+
+ if (dump_properties) {
+ dev_info(dev, "property: %s\n", key);
+ print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, entry_data, entry_len, true);
+ }
+
+ ptr += key_len + val_len;
+ }
+
+ if (i != dev_header->prop_count) {
+ dev_err(dev, "got %d device properties, expected %u\n", i,
+ dev_header->prop_count);
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, dev_header, dev_header->len, true);
+ return;
+ }
+
+ dev_info(dev, "assigning %d device properties\n", i);
+}
+
+static int __init unmarshal_devices(struct properties_header *properties)
+{
+ size_t offset = offsetof(struct properties_header, dev_header[0]);
+
+ while (offset + sizeof(struct dev_header) < properties->len) {
+ struct dev_header *dev_header = (void *)properties + offset;
+ struct property_entry *entry = NULL;
+ const struct efi_dev_path *ptr;
+ struct device *dev;
+ size_t len;
+ int ret, i;
+
+ if (offset + dev_header->len > properties->len ||
+ dev_header->len <= sizeof(*dev_header)) {
+ pr_err("invalid len in dev_header at %#zx\n", offset);
+ return -EINVAL;
+ }
+
+ ptr = dev_header->path;
+ len = dev_header->len - sizeof(*dev_header);
+
+ dev = efi_get_device_by_path(&ptr, &len);
+ if (IS_ERR(dev)) {
+ pr_err("device path parse error %ld at %#zx:\n",
+ PTR_ERR(dev), (void *)ptr - (void *)dev_header);
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, dev_header, dev_header->len, true);
+ dev = NULL;
+ goto skip_device;
+ }
+
+ entry = kcalloc(dev_header->prop_count + 1, sizeof(*entry),
+ GFP_KERNEL);
+ if (!entry) {
+ dev_err(dev, "cannot allocate properties\n");
+ goto skip_device;
+ }
+
+ unmarshal_key_value_pairs(dev_header, dev, ptr, entry);
+ if (!entry[0].name)
+ goto skip_device;
+
+ ret = device_create_managed_software_node(dev, entry, NULL);
+ if (ret)
+ dev_err(dev, "error %d assigning properties\n", ret);
+
+ for (i = 0; entry[i].name; i++)
+ kfree(entry[i].name);
+
+skip_device:
+ kfree(entry);
+ put_device(dev);
+ offset += dev_header->len;
+ }
+
+ return 0;
+}
+
+static int __init map_properties(void)
+{
+ struct properties_header *properties;
+ struct setup_data *data;
+ u32 data_len;
+ u64 pa_data;
+ int ret;
+
+ if (!x86_apple_machine)
+ return 0;
+
+ pa_data = boot_params.hdr.setup_data;
+ while (pa_data) {
+ data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
+ if (!data) {
+ pr_err("cannot map setup_data header\n");
+ return -ENOMEM;
+ }
+
+ if (data->type != SETUP_APPLE_PROPERTIES) {
+ pa_data = data->next;
+ memunmap(data);
+ continue;
+ }
+
+ data_len = data->len;
+ memunmap(data);
+
+ data = memremap(pa_data, sizeof(*data) + data_len, MEMREMAP_WB);
+ if (!data) {
+ pr_err("cannot map setup_data payload\n");
+ return -ENOMEM;
+ }
+
+ properties = (struct properties_header *)data->data;
+ if (properties->version != 1) {
+ pr_err("unsupported version:\n");
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, properties, data_len, true);
+ ret = -ENOTSUPP;
+ } else if (properties->len != data_len) {
+ pr_err("length mismatch, expected %u\n", data_len);
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, properties, data_len, true);
+ ret = -EINVAL;
+ } else
+ ret = unmarshal_devices(properties);
+
+ /*
+ * Can only free the setup_data payload but not its header
+ * to avoid breaking the chain of ->next pointers.
+ */
+ data->len = 0;
+ memunmap(data);
+ memblock_free_late(pa_data + sizeof(*data), data_len);
+
+ return ret;
+ }
+ return 0;
+}
+
+fs_initcall(map_properties);
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
new file mode 100644
index 0000000000..83f5bb57fa
--- /dev/null
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ *
+ * Copyright (C) 2013, 2014 Linaro Ltd.
+ */
+
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+#include <linux/mm_types.h>
+#include <linux/preempt.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/pgtable.h>
+
+#include <asm/cacheflush.h>
+#include <asm/efi.h>
+#include <asm/mmu.h>
+#include <asm/pgalloc.h>
+
+#if defined(CONFIG_PTDUMP_DEBUGFS) || defined(CONFIG_ARM_PTDUMP_DEBUGFS)
+#include <asm/ptdump.h>
+
+static struct ptdump_info efi_ptdump_info = {
+ .mm = &efi_mm,
+ .markers = (struct addr_marker[]){
+ { 0, "UEFI runtime start" },
+ { EFI_RUNTIME_MAP_END, "UEFI runtime end" },
+ { -1, NULL }
+ },
+ .base_addr = 0,
+};
+
+static int __init ptdump_init(void)
+{
+ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables");
+
+ return 0;
+}
+device_initcall(ptdump_init);
+
+#endif
+
+static bool __init efi_virtmap_init(void)
+{
+ efi_memory_desc_t *md;
+
+ efi_mm.pgd = pgd_alloc(&efi_mm);
+ mm_init_cpumask(&efi_mm);
+ init_new_context(NULL, &efi_mm);
+
+ for_each_efi_memory_desc(md) {
+ phys_addr_t phys = md->phys_addr;
+ int ret;
+
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == U64_MAX)
+ return false;
+
+ ret = efi_create_mapping(&efi_mm, md);
+ if (ret) {
+ pr_warn(" EFI remap %pa: failed to create mapping (%d)\n",
+ &phys, ret);
+ return false;
+ }
+ }
+
+ if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions))
+ return false;
+
+ return true;
+}
+
+/*
+ * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
+ * non-early mapping of the UEFI system table and virtual mappings for all
+ * EFI_MEMORY_RUNTIME regions.
+ */
+static int __init arm_enable_runtime_services(void)
+{
+ u64 mapsize;
+
+ if (!efi_enabled(EFI_BOOT)) {
+ pr_info("EFI services will not be available.\n");
+ return 0;
+ }
+
+ efi_memmap_unmap();
+
+ mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
+
+ if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
+ pr_err("Failed to remap EFI memory map\n");
+ return 0;
+ }
+
+ if (efi_soft_reserve_enabled()) {
+ efi_memory_desc_t *md;
+
+ for_each_efi_memory_desc(md) {
+ int md_size = md->num_pages << EFI_PAGE_SHIFT;
+ struct resource *res;
+
+ if (!(md->attribute & EFI_MEMORY_SP))
+ continue;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (WARN_ON(!res))
+ break;
+
+ res->start = md->phys_addr;
+ res->end = md->phys_addr + md_size - 1;
+ res->name = "Soft Reserved";
+ res->flags = IORESOURCE_MEM;
+ res->desc = IORES_DESC_SOFT_RESERVED;
+
+ insert_resource(&iomem_resource, res);
+ }
+ }
+
+ if (efi_runtime_disabled()) {
+ pr_info("EFI runtime services will be disabled.\n");
+ return 0;
+ }
+
+ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
+ pr_info("EFI runtime services access via paravirt.\n");
+ return 0;
+ }
+
+ pr_info("Remapping and enabling EFI services.\n");
+
+ if (!efi_virtmap_init()) {
+ pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
+ return -ENOMEM;
+ }
+
+ /* Set up runtime services function pointers */
+ efi_native_runtime_setup();
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
+ return 0;
+}
+early_initcall(arm_enable_runtime_services);
+
+void efi_virtmap_load(void)
+{
+ preempt_disable();
+ efi_set_pgd(&efi_mm);
+}
+
+void efi_virtmap_unload(void)
+{
+ efi_set_pgd(current->active_mm);
+ preempt_enable();
+}
+
+
+static int __init arm_dmi_init(void)
+{
+ /*
+ * On arm64/ARM, DMI depends on UEFI, and dmi_setup() needs to
+ * be called early because dmi_id_init(), which is an arch_initcall
+ * itself, depends on dmi_scan_machine() having been called already.
+ */
+ dmi_setup();
+ return 0;
+}
+core_initcall(arm_dmi_init);
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
new file mode 100644
index 0000000000..3e8d4b51a8
--- /dev/null
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * EFI capsule loader driver.
+ *
+ * Copyright 2015 Intel Corporation
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/efi.h>
+#include <linux/vmalloc.h>
+
+#define NO_FURTHER_WRITE_ACTION -1
+
+/**
+ * efi_free_all_buff_pages - free all previous allocated buffer pages
+ * @cap_info: pointer to current instance of capsule_info structure
+ *
+ * In addition to freeing buffer pages, it flags NO_FURTHER_WRITE_ACTION
+ * to cease processing data in subsequent write(2) calls until close(2)
+ * is called.
+ **/
+static void efi_free_all_buff_pages(struct capsule_info *cap_info)
+{
+ while (cap_info->index > 0)
+ __free_page(cap_info->pages[--cap_info->index]);
+
+ cap_info->index = NO_FURTHER_WRITE_ACTION;
+}
+
+int __efi_capsule_setup_info(struct capsule_info *cap_info)
+{
+ size_t pages_needed;
+ int ret;
+ void *temp_page;
+
+ pages_needed = ALIGN(cap_info->total_size, PAGE_SIZE) / PAGE_SIZE;
+
+ if (pages_needed == 0) {
+ pr_err("invalid capsule size\n");
+ return -EINVAL;
+ }
+
+ /* Check if the capsule binary supported */
+ ret = efi_capsule_supported(cap_info->header.guid,
+ cap_info->header.flags,
+ cap_info->header.imagesize,
+ &cap_info->reset_type);
+ if (ret) {
+ pr_err("capsule not supported\n");
+ return ret;
+ }
+
+ temp_page = krealloc(cap_info->pages,
+ pages_needed * sizeof(void *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!temp_page)
+ return -ENOMEM;
+
+ cap_info->pages = temp_page;
+
+ temp_page = krealloc(cap_info->phys,
+ pages_needed * sizeof(phys_addr_t *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!temp_page)
+ return -ENOMEM;
+
+ cap_info->phys = temp_page;
+
+ return 0;
+}
+
+/**
+ * efi_capsule_setup_info - obtain the efi capsule header in the binary and
+ * setup capsule_info structure
+ * @cap_info: pointer to current instance of capsule_info structure
+ * @kbuff: a mapped first page buffer pointer
+ * @hdr_bytes: the total received number of bytes for efi header
+ *
+ * Platforms with non-standard capsule update mechanisms can override
+ * this __weak function so they can perform any required capsule
+ * image munging. See quark_quirk_function() for an example.
+ **/
+int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
+ size_t hdr_bytes)
+{
+ /* Only process data block that is larger than efi header size */
+ if (hdr_bytes < sizeof(efi_capsule_header_t))
+ return 0;
+
+ memcpy(&cap_info->header, kbuff, sizeof(cap_info->header));
+ cap_info->total_size = cap_info->header.imagesize;
+
+ return __efi_capsule_setup_info(cap_info);
+}
+
+/**
+ * efi_capsule_submit_update - invoke the efi_capsule_update API once binary
+ * upload done
+ * @cap_info: pointer to current instance of capsule_info structure
+ **/
+static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
+{
+ bool do_vunmap = false;
+ int ret;
+
+ /*
+ * cap_info->capsule may have been assigned already by a quirk
+ * handler, so only overwrite it if it is NULL
+ */
+ if (!cap_info->capsule) {
+ cap_info->capsule = vmap(cap_info->pages, cap_info->index,
+ VM_MAP, PAGE_KERNEL);
+ if (!cap_info->capsule)
+ return -ENOMEM;
+ do_vunmap = true;
+ }
+
+ ret = efi_capsule_update(cap_info->capsule, cap_info->phys);
+ if (do_vunmap)
+ vunmap(cap_info->capsule);
+ if (ret) {
+ pr_err("capsule update failed\n");
+ return ret;
+ }
+
+ /* Indicate capsule binary uploading is done */
+ cap_info->index = NO_FURTHER_WRITE_ACTION;
+
+ if (cap_info->header.flags & EFI_CAPSULE_PERSIST_ACROSS_RESET) {
+ pr_info("Successfully uploaded capsule file with reboot type '%s'\n",
+ !cap_info->reset_type ? "RESET_COLD" :
+ cap_info->reset_type == 1 ? "RESET_WARM" :
+ "RESET_SHUTDOWN");
+ } else {
+ pr_info("Successfully processed capsule file\n");
+ }
+
+ return 0;
+}
+
+/**
+ * efi_capsule_write - store the capsule binary and pass it to
+ * efi_capsule_update() API
+ * @file: file pointer
+ * @buff: buffer pointer
+ * @count: number of bytes in @buff
+ * @offp: not used
+ *
+ * Expectation:
+ * - A user space tool should start at the beginning of capsule binary and
+ * pass data in sequentially.
+ * - Users should close and re-open this file note in order to upload more
+ * capsules.
+ * - After an error returned, user should close the file and restart the
+ * operation for the next try otherwise -EIO will be returned until the
+ * file is closed.
+ * - An EFI capsule header must be located at the beginning of capsule
+ * binary file and passed in as first block data of write operation.
+ **/
+static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
+ size_t count, loff_t *offp)
+{
+ int ret;
+ struct capsule_info *cap_info = file->private_data;
+ struct page *page;
+ void *kbuff = NULL;
+ size_t write_byte;
+
+ if (count == 0)
+ return 0;
+
+ /* Return error while NO_FURTHER_WRITE_ACTION is flagged */
+ if (cap_info->index < 0)
+ return -EIO;
+
+ /* Only alloc a new page when previous page is full */
+ if (!cap_info->page_bytes_remain) {
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ cap_info->pages[cap_info->index] = page;
+ cap_info->phys[cap_info->index] = page_to_phys(page);
+ cap_info->page_bytes_remain = PAGE_SIZE;
+ cap_info->index++;
+ } else {
+ page = cap_info->pages[cap_info->index - 1];
+ }
+
+ kbuff = kmap(page);
+ kbuff += PAGE_SIZE - cap_info->page_bytes_remain;
+
+ /* Copy capsule binary data from user space to kernel space buffer */
+ write_byte = min_t(size_t, count, cap_info->page_bytes_remain);
+ if (copy_from_user(kbuff, buff, write_byte)) {
+ ret = -EFAULT;
+ goto fail_unmap;
+ }
+ cap_info->page_bytes_remain -= write_byte;
+
+ /* Setup capsule binary info structure */
+ if (cap_info->header.headersize == 0) {
+ ret = efi_capsule_setup_info(cap_info, kbuff - cap_info->count,
+ cap_info->count + write_byte);
+ if (ret)
+ goto fail_unmap;
+ }
+
+ cap_info->count += write_byte;
+ kunmap(page);
+
+ /* Submit the full binary to efi_capsule_update() API */
+ if (cap_info->header.headersize > 0 &&
+ cap_info->count >= cap_info->total_size) {
+ if (cap_info->count > cap_info->total_size) {
+ pr_err("capsule upload size exceeded header defined size\n");
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ ret = efi_capsule_submit_update(cap_info);
+ if (ret)
+ goto failed;
+ }
+
+ return write_byte;
+
+fail_unmap:
+ kunmap(page);
+failed:
+ efi_free_all_buff_pages(cap_info);
+ return ret;
+}
+
+/**
+ * efi_capsule_release - called by file close
+ * @inode: not used
+ * @file: file pointer
+ *
+ * We will not free successfully submitted pages since efi update
+ * requires data to be maintained across system reboot.
+ **/
+static int efi_capsule_release(struct inode *inode, struct file *file)
+{
+ struct capsule_info *cap_info = file->private_data;
+
+ if (cap_info->index > 0 &&
+ (cap_info->header.headersize == 0 ||
+ cap_info->count < cap_info->total_size)) {
+ pr_err("capsule upload not complete\n");
+ efi_free_all_buff_pages(cap_info);
+ }
+
+ kfree(cap_info->pages);
+ kfree(cap_info->phys);
+ kfree(file->private_data);
+ file->private_data = NULL;
+ return 0;
+}
+
+/**
+ * efi_capsule_open - called by file open
+ * @inode: not used
+ * @file: file pointer
+ *
+ * Will allocate each capsule_info memory for each file open call.
+ * This provided the capability to support multiple file open feature
+ * where user is not needed to wait for others to finish in order to
+ * upload their capsule binary.
+ **/
+static int efi_capsule_open(struct inode *inode, struct file *file)
+{
+ struct capsule_info *cap_info;
+
+ cap_info = kzalloc(sizeof(*cap_info), GFP_KERNEL);
+ if (!cap_info)
+ return -ENOMEM;
+
+ cap_info->pages = kzalloc(sizeof(void *), GFP_KERNEL);
+ if (!cap_info->pages) {
+ kfree(cap_info);
+ return -ENOMEM;
+ }
+
+ cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
+ if (!cap_info->phys) {
+ kfree(cap_info->pages);
+ kfree(cap_info);
+ return -ENOMEM;
+ }
+
+ file->private_data = cap_info;
+
+ return 0;
+}
+
+static const struct file_operations efi_capsule_fops = {
+ .owner = THIS_MODULE,
+ .open = efi_capsule_open,
+ .write = efi_capsule_write,
+ .release = efi_capsule_release,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice efi_capsule_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "efi_capsule_loader",
+ .fops = &efi_capsule_fops,
+};
+
+static int __init efi_capsule_loader_init(void)
+{
+ int ret;
+
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return -ENODEV;
+
+ ret = misc_register(&efi_capsule_misc);
+ if (ret)
+ pr_err("Unable to register capsule loader device\n");
+
+ return ret;
+}
+module_init(efi_capsule_loader_init);
+
+static void __exit efi_capsule_loader_exit(void)
+{
+ misc_deregister(&efi_capsule_misc);
+}
+module_exit(efi_capsule_loader_exit);
+
+MODULE_DESCRIPTION("EFI capsule firmware binary loader");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c
new file mode 100644
index 0000000000..7684302936
--- /dev/null
+++ b/drivers/firmware/efi/capsule.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * EFI capsule support.
+ *
+ * Copyright 2013 Intel Corporation; author Matt Fleming
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/highmem.h>
+#include <linux/efi.h>
+#include <linux/vmalloc.h>
+#include <asm/efi.h>
+#include <asm/io.h>
+
+typedef struct {
+ u64 length;
+ u64 data;
+} efi_capsule_block_desc_t;
+
+static bool capsule_pending;
+static bool stop_capsules;
+static int efi_reset_type = -1;
+
+/*
+ * capsule_mutex serialises access to both capsule_pending and
+ * efi_reset_type and stop_capsules.
+ */
+static DEFINE_MUTEX(capsule_mutex);
+
+/**
+ * efi_capsule_pending - has a capsule been passed to the firmware?
+ * @reset_type: store the type of EFI reset if capsule is pending
+ *
+ * To ensure that the registered capsule is processed correctly by the
+ * firmware we need to perform a specific type of reset. If a capsule is
+ * pending return the reset type in @reset_type.
+ *
+ * This function will race with callers of efi_capsule_update(), for
+ * example, calling this function while somebody else is in
+ * efi_capsule_update() but hasn't reached efi_capsue_update_locked()
+ * will miss the updates to capsule_pending and efi_reset_type after
+ * efi_capsule_update_locked() completes.
+ *
+ * A non-racy use is from platform reboot code because we use
+ * system_state to ensure no capsules can be sent to the firmware once
+ * we're at SYSTEM_RESTART. See efi_capsule_update_locked().
+ */
+bool efi_capsule_pending(int *reset_type)
+{
+ if (!capsule_pending)
+ return false;
+
+ if (reset_type)
+ *reset_type = efi_reset_type;
+
+ return true;
+}
+
+/*
+ * Whitelist of EFI capsule flags that we support.
+ *
+ * We do not handle EFI_CAPSULE_INITIATE_RESET because that would
+ * require us to prepare the kernel for reboot. Refuse to load any
+ * capsules with that flag and any other flags that we do not know how
+ * to handle.
+ */
+#define EFI_CAPSULE_SUPPORTED_FLAG_MASK \
+ (EFI_CAPSULE_PERSIST_ACROSS_RESET | EFI_CAPSULE_POPULATE_SYSTEM_TABLE)
+
+/**
+ * efi_capsule_supported - does the firmware support the capsule?
+ * @guid: vendor guid of capsule
+ * @flags: capsule flags
+ * @size: size of capsule data
+ * @reset: the reset type required for this capsule
+ *
+ * Check whether a capsule with @flags is supported by the firmware
+ * and that @size doesn't exceed the maximum size for a capsule.
+ *
+ * No attempt is made to check @reset against the reset type required
+ * by any pending capsules because of the races involved.
+ */
+int efi_capsule_supported(efi_guid_t guid, u32 flags, size_t size, int *reset)
+{
+ efi_capsule_header_t capsule;
+ efi_capsule_header_t *cap_list[] = { &capsule };
+ efi_status_t status;
+ u64 max_size;
+
+ if (flags & ~EFI_CAPSULE_SUPPORTED_FLAG_MASK)
+ return -EINVAL;
+
+ capsule.headersize = capsule.imagesize = sizeof(capsule);
+ memcpy(&capsule.guid, &guid, sizeof(efi_guid_t));
+ capsule.flags = flags;
+
+ status = efi.query_capsule_caps(cap_list, 1, &max_size, reset);
+ if (status != EFI_SUCCESS)
+ return efi_status_to_err(status);
+
+ if (size > max_size)
+ return -ENOSPC;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(efi_capsule_supported);
+
+/*
+ * Every scatter gather list (block descriptor) page must end with a
+ * continuation pointer. The last continuation pointer of the last
+ * page must be zero to mark the end of the chain.
+ */
+#define SGLIST_PER_PAGE ((PAGE_SIZE / sizeof(efi_capsule_block_desc_t)) - 1)
+
+/*
+ * How many scatter gather list (block descriptor) pages do we need
+ * to map @count pages?
+ */
+static inline unsigned int sg_pages_num(unsigned int count)
+{
+ return DIV_ROUND_UP(count, SGLIST_PER_PAGE);
+}
+
+/**
+ * efi_capsule_update_locked - pass a single capsule to the firmware
+ * @capsule: capsule to send to the firmware
+ * @sg_pages: array of scatter gather (block descriptor) pages
+ * @reset: the reset type required for @capsule
+ *
+ * Since this function must be called under capsule_mutex check
+ * whether efi_reset_type will conflict with @reset, and atomically
+ * set it and capsule_pending if a capsule was successfully sent to
+ * the firmware.
+ *
+ * We also check to see if the system is about to restart, and if so,
+ * abort. This avoids races between efi_capsule_update() and
+ * efi_capsule_pending().
+ */
+static int
+efi_capsule_update_locked(efi_capsule_header_t *capsule,
+ struct page **sg_pages, int reset)
+{
+ efi_physical_addr_t sglist_phys;
+ efi_status_t status;
+
+ lockdep_assert_held(&capsule_mutex);
+
+ /*
+ * If someone has already registered a capsule that requires a
+ * different reset type, we're out of luck and must abort.
+ */
+ if (efi_reset_type >= 0 && efi_reset_type != reset) {
+ pr_err("Conflicting capsule reset type %d (%d).\n",
+ reset, efi_reset_type);
+ return -EINVAL;
+ }
+
+ /*
+ * If the system is getting ready to restart it may have
+ * called efi_capsule_pending() to make decisions (such as
+ * whether to force an EFI reboot), and we're racing against
+ * that call. Abort in that case.
+ */
+ if (unlikely(stop_capsules)) {
+ pr_warn("Capsule update raced with reboot, aborting.\n");
+ return -EINVAL;
+ }
+
+ sglist_phys = page_to_phys(sg_pages[0]);
+
+ status = efi.update_capsule(&capsule, 1, sglist_phys);
+ if (status == EFI_SUCCESS) {
+ capsule_pending = true;
+ efi_reset_type = reset;
+ }
+
+ return efi_status_to_err(status);
+}
+
+/**
+ * efi_capsule_update - send a capsule to the firmware
+ * @capsule: capsule to send to firmware
+ * @pages: an array of capsule data pages
+ *
+ * Build a scatter gather list with EFI capsule block descriptors to
+ * map the capsule described by @capsule with its data in @pages and
+ * send it to the firmware via the UpdateCapsule() runtime service.
+ *
+ * @capsule must be a virtual mapping of the complete capsule update in the
+ * kernel address space, as the capsule can be consumed immediately.
+ * A capsule_header_t that describes the entire contents of the capsule
+ * must be at the start of the first data page.
+ *
+ * Even though this function will validate that the firmware supports
+ * the capsule guid, users will likely want to check that
+ * efi_capsule_supported() returns true before calling this function
+ * because it makes it easier to print helpful error messages.
+ *
+ * If the capsule is successfully submitted to the firmware, any
+ * subsequent calls to efi_capsule_pending() will return true. @pages
+ * must not be released or modified if this function returns
+ * successfully.
+ *
+ * Callers must be prepared for this function to fail, which can
+ * happen if we raced with system reboot or if there is already a
+ * pending capsule that has a reset type that conflicts with the one
+ * required by @capsule. Do NOT use efi_capsule_pending() to detect
+ * this conflict since that would be racy. Instead, submit the capsule
+ * to efi_capsule_update() and check the return value.
+ *
+ * Return 0 on success, a converted EFI status code on failure.
+ */
+int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages)
+{
+ u32 imagesize = capsule->imagesize;
+ efi_guid_t guid = capsule->guid;
+ unsigned int count, sg_count;
+ u32 flags = capsule->flags;
+ struct page **sg_pages;
+ int rv, reset_type;
+ int i, j;
+
+ rv = efi_capsule_supported(guid, flags, imagesize, &reset_type);
+ if (rv)
+ return rv;
+
+ count = DIV_ROUND_UP(imagesize, PAGE_SIZE);
+ sg_count = sg_pages_num(count);
+
+ sg_pages = kcalloc(sg_count, sizeof(*sg_pages), GFP_KERNEL);
+ if (!sg_pages)
+ return -ENOMEM;
+
+ for (i = 0; i < sg_count; i++) {
+ sg_pages[i] = alloc_page(GFP_KERNEL);
+ if (!sg_pages[i]) {
+ rv = -ENOMEM;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < sg_count; i++) {
+ efi_capsule_block_desc_t *sglist;
+
+ sglist = kmap_atomic(sg_pages[i]);
+
+ for (j = 0; j < SGLIST_PER_PAGE && count > 0; j++) {
+ u64 sz = min_t(u64, imagesize,
+ PAGE_SIZE - (u64)*pages % PAGE_SIZE);
+
+ sglist[j].length = sz;
+ sglist[j].data = *pages++;
+
+ imagesize -= sz;
+ count--;
+ }
+
+ /* Continuation pointer */
+ sglist[j].length = 0;
+
+ if (i + 1 == sg_count)
+ sglist[j].data = 0;
+ else
+ sglist[j].data = page_to_phys(sg_pages[i + 1]);
+
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ /*
+ * At runtime, the firmware has no way to find out where the
+ * sglist elements are mapped, if they are mapped in the first
+ * place. Therefore, on architectures that can only perform
+ * cache maintenance by virtual address, the firmware is unable
+ * to perform this maintenance, and so it is up to the OS to do
+ * it instead.
+ */
+ efi_capsule_flush_cache_range(sglist, PAGE_SIZE);
+#endif
+ kunmap_atomic(sglist);
+ }
+
+ mutex_lock(&capsule_mutex);
+ rv = efi_capsule_update_locked(capsule, sg_pages, reset_type);
+ mutex_unlock(&capsule_mutex);
+
+out:
+ for (i = 0; rv && i < sg_count; i++) {
+ if (sg_pages[i])
+ __free_page(sg_pages[i]);
+ }
+
+ kfree(sg_pages);
+ return rv;
+}
+EXPORT_SYMBOL_GPL(efi_capsule_update);
+
+static int capsule_reboot_notify(struct notifier_block *nb, unsigned long event, void *cmd)
+{
+ mutex_lock(&capsule_mutex);
+ stop_capsules = true;
+ mutex_unlock(&capsule_mutex);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block capsule_reboot_nb = {
+ .notifier_call = capsule_reboot_notify,
+};
+
+static int __init capsule_reboot_register(void)
+{
+ return register_reboot_notifier(&capsule_reboot_nb);
+}
+core_initcall(capsule_reboot_register);
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
new file mode 100644
index 0000000000..fa9c1c3bf1
--- /dev/null
+++ b/drivers/firmware/efi/cper-arm.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UEFI Common Platform Error Record (CPER) support
+ *
+ * Copyright (C) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/cper.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/printk.h>
+#include <linux/bcd.h>
+#include <acpi/ghes.h>
+#include <ras/ras_event.h>
+
+static const char * const arm_reg_ctx_strs[] = {
+ "AArch32 general purpose registers",
+ "AArch32 EL1 context registers",
+ "AArch32 EL2 context registers",
+ "AArch32 secure context registers",
+ "AArch64 general purpose registers",
+ "AArch64 EL1 context registers",
+ "AArch64 EL2 context registers",
+ "AArch64 EL3 context registers",
+ "Misc. system register structure",
+};
+
+static const char * const arm_err_trans_type_strs[] = {
+ "Instruction",
+ "Data Access",
+ "Generic",
+};
+
+static const char * const arm_bus_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+};
+
+static const char * const arm_cache_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+ "Eviction",
+ "Snooping (processor initiated a cache snoop that resulted in an error)",
+ "Snooped (processor raised a cache error caused by another processor or device snooping its cache)",
+ "Management",
+};
+
+static const char * const arm_tlb_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+ "Local management operation (processor initiated a TLB management operation that resulted in an error)",
+ "External management operation (processor raised a TLB error caused by another processor or device broadcasting TLB operations)",
+};
+
+static const char * const arm_bus_err_part_type_strs[] = {
+ "Local processor originated request",
+ "Local processor responded to request",
+ "Local processor observed",
+ "Generic",
+};
+
+static const char * const arm_bus_err_addr_space_strs[] = {
+ "External Memory Access",
+ "Internal Memory Access",
+ "Unknown",
+ "Device Memory Access",
+};
+
+static void cper_print_arm_err_info(const char *pfx, u32 type,
+ u64 error_info)
+{
+ u8 trans_type, op_type, level, participation_type, address_space;
+ u16 mem_attributes;
+ bool proc_context_corrupt, corrected, precise_pc, restartable_pc;
+ bool time_out, access_mode;
+
+ /* If the type is unknown, bail. */
+ if (type > CPER_ARM_MAX_TYPE)
+ return;
+
+ /*
+ * Vendor type errors have error information values that are vendor
+ * specific.
+ */
+ if (type == CPER_ARM_VENDOR_ERROR)
+ return;
+
+ if (error_info & CPER_ARM_ERR_VALID_TRANSACTION_TYPE) {
+ trans_type = ((error_info >> CPER_ARM_ERR_TRANSACTION_SHIFT)
+ & CPER_ARM_ERR_TRANSACTION_MASK);
+ if (trans_type < ARRAY_SIZE(arm_err_trans_type_strs)) {
+ printk("%stransaction type: %s\n", pfx,
+ arm_err_trans_type_strs[trans_type]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_OPERATION_TYPE) {
+ op_type = ((error_info >> CPER_ARM_ERR_OPERATION_SHIFT)
+ & CPER_ARM_ERR_OPERATION_MASK);
+ switch (type) {
+ case CPER_ARM_CACHE_ERROR:
+ if (op_type < ARRAY_SIZE(arm_cache_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_cache_err_op_strs[op_type]);
+ }
+ break;
+ case CPER_ARM_TLB_ERROR:
+ if (op_type < ARRAY_SIZE(arm_tlb_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_tlb_err_op_strs[op_type]);
+ }
+ break;
+ case CPER_ARM_BUS_ERROR:
+ if (op_type < ARRAY_SIZE(arm_bus_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_bus_err_op_strs[op_type]);
+ }
+ break;
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_LEVEL) {
+ level = ((error_info >> CPER_ARM_ERR_LEVEL_SHIFT)
+ & CPER_ARM_ERR_LEVEL_MASK);
+ switch (type) {
+ case CPER_ARM_CACHE_ERROR:
+ printk("%scache level: %d\n", pfx, level);
+ break;
+ case CPER_ARM_TLB_ERROR:
+ printk("%sTLB level: %d\n", pfx, level);
+ break;
+ case CPER_ARM_BUS_ERROR:
+ printk("%saffinity level at which the bus error occurred: %d\n",
+ pfx, level);
+ break;
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT) {
+ proc_context_corrupt = ((error_info >> CPER_ARM_ERR_PC_CORRUPT_SHIFT)
+ & CPER_ARM_ERR_PC_CORRUPT_MASK);
+ if (proc_context_corrupt)
+ printk("%sprocessor context corrupted\n", pfx);
+ else
+ printk("%sprocessor context not corrupted\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_CORRECTED) {
+ corrected = ((error_info >> CPER_ARM_ERR_CORRECTED_SHIFT)
+ & CPER_ARM_ERR_CORRECTED_MASK);
+ if (corrected)
+ printk("%sthe error has been corrected\n", pfx);
+ else
+ printk("%sthe error has not been corrected\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_PRECISE_PC) {
+ precise_pc = ((error_info >> CPER_ARM_ERR_PRECISE_PC_SHIFT)
+ & CPER_ARM_ERR_PRECISE_PC_MASK);
+ if (precise_pc)
+ printk("%sPC is precise\n", pfx);
+ else
+ printk("%sPC is imprecise\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_RESTARTABLE_PC) {
+ restartable_pc = ((error_info >> CPER_ARM_ERR_RESTARTABLE_PC_SHIFT)
+ & CPER_ARM_ERR_RESTARTABLE_PC_MASK);
+ if (restartable_pc)
+ printk("%sProgram execution can be restarted reliably at the PC associated with the error.\n", pfx);
+ }
+
+ /* The rest of the fields are specific to bus errors */
+ if (type != CPER_ARM_BUS_ERROR)
+ return;
+
+ if (error_info & CPER_ARM_ERR_VALID_PARTICIPATION_TYPE) {
+ participation_type = ((error_info >> CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT)
+ & CPER_ARM_ERR_PARTICIPATION_TYPE_MASK);
+ if (participation_type < ARRAY_SIZE(arm_bus_err_part_type_strs)) {
+ printk("%sparticipation type: %s\n", pfx,
+ arm_bus_err_part_type_strs[participation_type]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_TIME_OUT) {
+ time_out = ((error_info >> CPER_ARM_ERR_TIME_OUT_SHIFT)
+ & CPER_ARM_ERR_TIME_OUT_MASK);
+ if (time_out)
+ printk("%srequest timed out\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_ADDRESS_SPACE) {
+ address_space = ((error_info >> CPER_ARM_ERR_ADDRESS_SPACE_SHIFT)
+ & CPER_ARM_ERR_ADDRESS_SPACE_MASK);
+ if (address_space < ARRAY_SIZE(arm_bus_err_addr_space_strs)) {
+ printk("%saddress space: %s\n", pfx,
+ arm_bus_err_addr_space_strs[address_space]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_MEM_ATTRIBUTES) {
+ mem_attributes = ((error_info >> CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT)
+ & CPER_ARM_ERR_MEM_ATTRIBUTES_MASK);
+ printk("%smemory access attributes:0x%x\n", pfx, mem_attributes);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_ACCESS_MODE) {
+ access_mode = ((error_info >> CPER_ARM_ERR_ACCESS_MODE_SHIFT)
+ & CPER_ARM_ERR_ACCESS_MODE_MASK);
+ if (access_mode)
+ printk("%saccess mode: normal\n", pfx);
+ else
+ printk("%saccess mode: secure\n", pfx);
+ }
+}
+
+void cper_print_proc_arm(const char *pfx,
+ const struct cper_sec_proc_arm *proc)
+{
+ int i, len, max_ctx_type;
+ struct cper_arm_err_info *err_info;
+ struct cper_arm_ctx_info *ctx_info;
+ char newpfx[64], infopfx[64];
+
+ printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
+
+ len = proc->section_length - (sizeof(*proc) +
+ proc->err_info_num * (sizeof(*err_info)));
+ if (len < 0) {
+ printk("%ssection length: %d\n", pfx, proc->section_length);
+ printk("%ssection length is too small\n", pfx);
+ printk("%sfirmware-generated error record is incorrect\n", pfx);
+ printk("%sERR_INFO_NUM is %d\n", pfx, proc->err_info_num);
+ return;
+ }
+
+ if (proc->validation_bits & CPER_ARM_VALID_MPIDR)
+ printk("%sMultiprocessor Affinity Register (MPIDR): 0x%016llx\n",
+ pfx, proc->mpidr);
+
+ if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL)
+ printk("%serror affinity level: %d\n", pfx,
+ proc->affinity_level);
+
+ if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) {
+ printk("%srunning state: 0x%x\n", pfx, proc->running_state);
+ printk("%sPower State Coordination Interface state: %d\n",
+ pfx, proc->psci_state);
+ }
+
+ snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+
+ err_info = (struct cper_arm_err_info *)(proc + 1);
+ for (i = 0; i < proc->err_info_num; i++) {
+ printk("%sError info structure %d:\n", pfx, i);
+
+ printk("%snum errors: %d\n", pfx, err_info->multiple_error + 1);
+
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_FLAGS) {
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_FIRST)
+ printk("%sfirst error captured\n", newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_LAST)
+ printk("%slast error captured\n", newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_PROPAGATED)
+ printk("%spropagated error captured\n",
+ newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_OVERFLOW)
+ printk("%soverflow occurred, error info is incomplete\n",
+ newpfx);
+ }
+
+ printk("%serror_type: %d, %s\n", newpfx, err_info->type,
+ err_info->type < ARRAY_SIZE(cper_proc_error_type_strs) ?
+ cper_proc_error_type_strs[err_info->type] : "unknown");
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO) {
+ printk("%serror_info: 0x%016llx\n", newpfx,
+ err_info->error_info);
+ snprintf(infopfx, sizeof(infopfx), "%s ", newpfx);
+ cper_print_arm_err_info(infopfx, err_info->type,
+ err_info->error_info);
+ }
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_VIRT_ADDR)
+ printk("%svirtual fault address: 0x%016llx\n",
+ newpfx, err_info->virt_fault_addr);
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR)
+ printk("%sphysical fault address: 0x%016llx\n",
+ newpfx, err_info->physical_fault_addr);
+ err_info += 1;
+ }
+
+ ctx_info = (struct cper_arm_ctx_info *)err_info;
+ max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
+ for (i = 0; i < proc->context_info_num; i++) {
+ int size = sizeof(*ctx_info) + ctx_info->size;
+
+ printk("%sContext info structure %d:\n", pfx, i);
+ if (len < size) {
+ printk("%ssection length is too small\n", newpfx);
+ printk("%sfirmware-generated error record is incorrect\n", pfx);
+ return;
+ }
+ if (ctx_info->type > max_ctx_type) {
+ printk("%sInvalid context type: %d (max: %d)\n",
+ newpfx, ctx_info->type, max_ctx_type);
+ return;
+ }
+ printk("%sregister context type: %s\n", newpfx,
+ arm_reg_ctx_strs[ctx_info->type]);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4,
+ (ctx_info + 1), ctx_info->size, 0);
+ len -= size;
+ ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size);
+ }
+
+ if (len > 0) {
+ printk("%sVendor specific error info has %u bytes:\n", pfx,
+ len);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info,
+ len, true);
+ }
+}
diff --git a/drivers/firmware/efi/cper-x86.c b/drivers/firmware/efi/cper-x86.c
new file mode 100644
index 0000000000..438ed9eff6
--- /dev/null
+++ b/drivers/firmware/efi/cper-x86.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018, Advanced Micro Devices, Inc.
+
+#include <linux/cper.h>
+#include <linux/acpi.h>
+
+/*
+ * We don't need a "CPER_IA" prefix since these are all locally defined.
+ * This will save us a lot of line space.
+ */
+#define VALID_LAPIC_ID BIT_ULL(0)
+#define VALID_CPUID_INFO BIT_ULL(1)
+#define VALID_PROC_ERR_INFO_NUM(bits) (((bits) & GENMASK_ULL(7, 2)) >> 2)
+#define VALID_PROC_CXT_INFO_NUM(bits) (((bits) & GENMASK_ULL(13, 8)) >> 8)
+
+#define INFO_ERR_STRUCT_TYPE_CACHE \
+ GUID_INIT(0xA55701F5, 0xE3EF, 0x43DE, 0xAC, 0x72, 0x24, 0x9B, \
+ 0x57, 0x3F, 0xAD, 0x2C)
+#define INFO_ERR_STRUCT_TYPE_TLB \
+ GUID_INIT(0xFC06B535, 0x5E1F, 0x4562, 0x9F, 0x25, 0x0A, 0x3B, \
+ 0x9A, 0xDB, 0x63, 0xC3)
+#define INFO_ERR_STRUCT_TYPE_BUS \
+ GUID_INIT(0x1CF3F8B3, 0xC5B1, 0x49a2, 0xAA, 0x59, 0x5E, 0xEF, \
+ 0x92, 0xFF, 0xA6, 0x3C)
+#define INFO_ERR_STRUCT_TYPE_MS \
+ GUID_INIT(0x48AB7F57, 0xDC34, 0x4f6c, 0xA7, 0xD3, 0xB0, 0xB5, \
+ 0xB0, 0xA7, 0x43, 0x14)
+
+#define INFO_VALID_CHECK_INFO BIT_ULL(0)
+#define INFO_VALID_TARGET_ID BIT_ULL(1)
+#define INFO_VALID_REQUESTOR_ID BIT_ULL(2)
+#define INFO_VALID_RESPONDER_ID BIT_ULL(3)
+#define INFO_VALID_IP BIT_ULL(4)
+
+#define CHECK_VALID_TRANS_TYPE BIT_ULL(0)
+#define CHECK_VALID_OPERATION BIT_ULL(1)
+#define CHECK_VALID_LEVEL BIT_ULL(2)
+#define CHECK_VALID_PCC BIT_ULL(3)
+#define CHECK_VALID_UNCORRECTED BIT_ULL(4)
+#define CHECK_VALID_PRECISE_IP BIT_ULL(5)
+#define CHECK_VALID_RESTARTABLE_IP BIT_ULL(6)
+#define CHECK_VALID_OVERFLOW BIT_ULL(7)
+
+#define CHECK_VALID_BUS_PART_TYPE BIT_ULL(8)
+#define CHECK_VALID_BUS_TIME_OUT BIT_ULL(9)
+#define CHECK_VALID_BUS_ADDR_SPACE BIT_ULL(10)
+
+#define CHECK_VALID_BITS(check) (((check) & GENMASK_ULL(15, 0)))
+#define CHECK_TRANS_TYPE(check) (((check) & GENMASK_ULL(17, 16)) >> 16)
+#define CHECK_OPERATION(check) (((check) & GENMASK_ULL(21, 18)) >> 18)
+#define CHECK_LEVEL(check) (((check) & GENMASK_ULL(24, 22)) >> 22)
+#define CHECK_PCC BIT_ULL(25)
+#define CHECK_UNCORRECTED BIT_ULL(26)
+#define CHECK_PRECISE_IP BIT_ULL(27)
+#define CHECK_RESTARTABLE_IP BIT_ULL(28)
+#define CHECK_OVERFLOW BIT_ULL(29)
+
+#define CHECK_BUS_PART_TYPE(check) (((check) & GENMASK_ULL(31, 30)) >> 30)
+#define CHECK_BUS_TIME_OUT BIT_ULL(32)
+#define CHECK_BUS_ADDR_SPACE(check) (((check) & GENMASK_ULL(34, 33)) >> 33)
+
+#define CHECK_VALID_MS_ERR_TYPE BIT_ULL(0)
+#define CHECK_VALID_MS_PCC BIT_ULL(1)
+#define CHECK_VALID_MS_UNCORRECTED BIT_ULL(2)
+#define CHECK_VALID_MS_PRECISE_IP BIT_ULL(3)
+#define CHECK_VALID_MS_RESTARTABLE_IP BIT_ULL(4)
+#define CHECK_VALID_MS_OVERFLOW BIT_ULL(5)
+
+#define CHECK_MS_ERR_TYPE(check) (((check) & GENMASK_ULL(18, 16)) >> 16)
+#define CHECK_MS_PCC BIT_ULL(19)
+#define CHECK_MS_UNCORRECTED BIT_ULL(20)
+#define CHECK_MS_PRECISE_IP BIT_ULL(21)
+#define CHECK_MS_RESTARTABLE_IP BIT_ULL(22)
+#define CHECK_MS_OVERFLOW BIT_ULL(23)
+
+#define CTX_TYPE_MSR 1
+#define CTX_TYPE_MMREG 7
+
+enum err_types {
+ ERR_TYPE_CACHE = 0,
+ ERR_TYPE_TLB,
+ ERR_TYPE_BUS,
+ ERR_TYPE_MS,
+ N_ERR_TYPES
+};
+
+static enum err_types cper_get_err_type(const guid_t *err_type)
+{
+ if (guid_equal(err_type, &INFO_ERR_STRUCT_TYPE_CACHE))
+ return ERR_TYPE_CACHE;
+ else if (guid_equal(err_type, &INFO_ERR_STRUCT_TYPE_TLB))
+ return ERR_TYPE_TLB;
+ else if (guid_equal(err_type, &INFO_ERR_STRUCT_TYPE_BUS))
+ return ERR_TYPE_BUS;
+ else if (guid_equal(err_type, &INFO_ERR_STRUCT_TYPE_MS))
+ return ERR_TYPE_MS;
+ else
+ return N_ERR_TYPES;
+}
+
+static const char * const ia_check_trans_type_strs[] = {
+ "Instruction",
+ "Data Access",
+ "Generic",
+};
+
+static const char * const ia_check_op_strs[] = {
+ "generic error",
+ "generic read",
+ "generic write",
+ "data read",
+ "data write",
+ "instruction fetch",
+ "prefetch",
+ "eviction",
+ "snoop",
+};
+
+static const char * const ia_check_bus_part_type_strs[] = {
+ "Local Processor originated request",
+ "Local Processor responded to request",
+ "Local Processor observed",
+ "Generic",
+};
+
+static const char * const ia_check_bus_addr_space_strs[] = {
+ "Memory Access",
+ "Reserved",
+ "I/O",
+ "Other Transaction",
+};
+
+static const char * const ia_check_ms_error_type_strs[] = {
+ "No Error",
+ "Unclassified",
+ "Microcode ROM Parity Error",
+ "External Error",
+ "FRC Error",
+ "Internal Unclassified",
+};
+
+static const char * const ia_reg_ctx_strs[] = {
+ "Unclassified Data",
+ "MSR Registers (Machine Check and other MSRs)",
+ "32-bit Mode Execution Context",
+ "64-bit Mode Execution Context",
+ "FXSAVE Context",
+ "32-bit Mode Debug Registers (DR0-DR7)",
+ "64-bit Mode Debug Registers (DR0-DR7)",
+ "Memory Mapped Registers",
+};
+
+static inline void print_bool(char *str, const char *pfx, u64 check, u64 bit)
+{
+ printk("%s%s: %s\n", pfx, str, (check & bit) ? "true" : "false");
+}
+
+static void print_err_info_ms(const char *pfx, u16 validation_bits, u64 check)
+{
+ if (validation_bits & CHECK_VALID_MS_ERR_TYPE) {
+ u8 err_type = CHECK_MS_ERR_TYPE(check);
+
+ printk("%sError Type: %u, %s\n", pfx, err_type,
+ err_type < ARRAY_SIZE(ia_check_ms_error_type_strs) ?
+ ia_check_ms_error_type_strs[err_type] : "unknown");
+ }
+
+ if (validation_bits & CHECK_VALID_MS_PCC)
+ print_bool("Processor Context Corrupt", pfx, check, CHECK_MS_PCC);
+
+ if (validation_bits & CHECK_VALID_MS_UNCORRECTED)
+ print_bool("Uncorrected", pfx, check, CHECK_MS_UNCORRECTED);
+
+ if (validation_bits & CHECK_VALID_MS_PRECISE_IP)
+ print_bool("Precise IP", pfx, check, CHECK_MS_PRECISE_IP);
+
+ if (validation_bits & CHECK_VALID_MS_RESTARTABLE_IP)
+ print_bool("Restartable IP", pfx, check, CHECK_MS_RESTARTABLE_IP);
+
+ if (validation_bits & CHECK_VALID_MS_OVERFLOW)
+ print_bool("Overflow", pfx, check, CHECK_MS_OVERFLOW);
+}
+
+static void print_err_info(const char *pfx, u8 err_type, u64 check)
+{
+ u16 validation_bits = CHECK_VALID_BITS(check);
+
+ /*
+ * The MS Check structure varies a lot from the others, so use a
+ * separate function for decoding.
+ */
+ if (err_type == ERR_TYPE_MS)
+ return print_err_info_ms(pfx, validation_bits, check);
+
+ if (validation_bits & CHECK_VALID_TRANS_TYPE) {
+ u8 trans_type = CHECK_TRANS_TYPE(check);
+
+ printk("%sTransaction Type: %u, %s\n", pfx, trans_type,
+ trans_type < ARRAY_SIZE(ia_check_trans_type_strs) ?
+ ia_check_trans_type_strs[trans_type] : "unknown");
+ }
+
+ if (validation_bits & CHECK_VALID_OPERATION) {
+ u8 op = CHECK_OPERATION(check);
+
+ /*
+ * CACHE has more operation types than TLB or BUS, though the
+ * name and the order are the same.
+ */
+ u8 max_ops = (err_type == ERR_TYPE_CACHE) ? 9 : 7;
+
+ printk("%sOperation: %u, %s\n", pfx, op,
+ op < max_ops ? ia_check_op_strs[op] : "unknown");
+ }
+
+ if (validation_bits & CHECK_VALID_LEVEL)
+ printk("%sLevel: %llu\n", pfx, CHECK_LEVEL(check));
+
+ if (validation_bits & CHECK_VALID_PCC)
+ print_bool("Processor Context Corrupt", pfx, check, CHECK_PCC);
+
+ if (validation_bits & CHECK_VALID_UNCORRECTED)
+ print_bool("Uncorrected", pfx, check, CHECK_UNCORRECTED);
+
+ if (validation_bits & CHECK_VALID_PRECISE_IP)
+ print_bool("Precise IP", pfx, check, CHECK_PRECISE_IP);
+
+ if (validation_bits & CHECK_VALID_RESTARTABLE_IP)
+ print_bool("Restartable IP", pfx, check, CHECK_RESTARTABLE_IP);
+
+ if (validation_bits & CHECK_VALID_OVERFLOW)
+ print_bool("Overflow", pfx, check, CHECK_OVERFLOW);
+
+ if (err_type != ERR_TYPE_BUS)
+ return;
+
+ if (validation_bits & CHECK_VALID_BUS_PART_TYPE) {
+ u8 part_type = CHECK_BUS_PART_TYPE(check);
+
+ printk("%sParticipation Type: %u, %s\n", pfx, part_type,
+ part_type < ARRAY_SIZE(ia_check_bus_part_type_strs) ?
+ ia_check_bus_part_type_strs[part_type] : "unknown");
+ }
+
+ if (validation_bits & CHECK_VALID_BUS_TIME_OUT)
+ print_bool("Time Out", pfx, check, CHECK_BUS_TIME_OUT);
+
+ if (validation_bits & CHECK_VALID_BUS_ADDR_SPACE) {
+ u8 addr_space = CHECK_BUS_ADDR_SPACE(check);
+
+ printk("%sAddress Space: %u, %s\n", pfx, addr_space,
+ addr_space < ARRAY_SIZE(ia_check_bus_addr_space_strs) ?
+ ia_check_bus_addr_space_strs[addr_space] : "unknown");
+ }
+}
+
+void cper_print_proc_ia(const char *pfx, const struct cper_sec_proc_ia *proc)
+{
+ int i;
+ struct cper_ia_err_info *err_info;
+ struct cper_ia_proc_ctx *ctx_info;
+ char newpfx[64], infopfx[64];
+ u8 err_type;
+
+ if (proc->validation_bits & VALID_LAPIC_ID)
+ printk("%sLocal APIC_ID: 0x%llx\n", pfx, proc->lapic_id);
+
+ if (proc->validation_bits & VALID_CPUID_INFO) {
+ printk("%sCPUID Info:\n", pfx);
+ print_hex_dump(pfx, "", DUMP_PREFIX_OFFSET, 16, 4, proc->cpuid,
+ sizeof(proc->cpuid), 0);
+ }
+
+ snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+
+ err_info = (struct cper_ia_err_info *)(proc + 1);
+ for (i = 0; i < VALID_PROC_ERR_INFO_NUM(proc->validation_bits); i++) {
+ printk("%sError Information Structure %d:\n", pfx, i);
+
+ err_type = cper_get_err_type(&err_info->err_type);
+ printk("%sError Structure Type: %s\n", newpfx,
+ err_type < ARRAY_SIZE(cper_proc_error_type_strs) ?
+ cper_proc_error_type_strs[err_type] : "unknown");
+
+ if (err_type >= N_ERR_TYPES) {
+ printk("%sError Structure Type: %pUl\n", newpfx,
+ &err_info->err_type);
+ }
+
+ if (err_info->validation_bits & INFO_VALID_CHECK_INFO) {
+ printk("%sCheck Information: 0x%016llx\n", newpfx,
+ err_info->check_info);
+
+ if (err_type < N_ERR_TYPES) {
+ snprintf(infopfx, sizeof(infopfx), "%s ",
+ newpfx);
+
+ print_err_info(infopfx, err_type,
+ err_info->check_info);
+ }
+ }
+
+ if (err_info->validation_bits & INFO_VALID_TARGET_ID) {
+ printk("%sTarget Identifier: 0x%016llx\n",
+ newpfx, err_info->target_id);
+ }
+
+ if (err_info->validation_bits & INFO_VALID_REQUESTOR_ID) {
+ printk("%sRequestor Identifier: 0x%016llx\n",
+ newpfx, err_info->requestor_id);
+ }
+
+ if (err_info->validation_bits & INFO_VALID_RESPONDER_ID) {
+ printk("%sResponder Identifier: 0x%016llx\n",
+ newpfx, err_info->responder_id);
+ }
+
+ if (err_info->validation_bits & INFO_VALID_IP) {
+ printk("%sInstruction Pointer: 0x%016llx\n",
+ newpfx, err_info->ip);
+ }
+
+ err_info++;
+ }
+
+ ctx_info = (struct cper_ia_proc_ctx *)err_info;
+ for (i = 0; i < VALID_PROC_CXT_INFO_NUM(proc->validation_bits); i++) {
+ int size = sizeof(*ctx_info) + ctx_info->reg_arr_size;
+ int groupsize = 4;
+
+ printk("%sContext Information Structure %d:\n", pfx, i);
+
+ printk("%sRegister Context Type: %s\n", newpfx,
+ ctx_info->reg_ctx_type < ARRAY_SIZE(ia_reg_ctx_strs) ?
+ ia_reg_ctx_strs[ctx_info->reg_ctx_type] : "unknown");
+
+ printk("%sRegister Array Size: 0x%04x\n", newpfx,
+ ctx_info->reg_arr_size);
+
+ if (ctx_info->reg_ctx_type == CTX_TYPE_MSR) {
+ groupsize = 8; /* MSRs are 8 bytes wide. */
+ printk("%sMSR Address: 0x%08x\n", newpfx,
+ ctx_info->msr_addr);
+ }
+
+ if (ctx_info->reg_ctx_type == CTX_TYPE_MMREG) {
+ printk("%sMM Register Address: 0x%016llx\n", newpfx,
+ ctx_info->mm_reg_addr);
+ }
+
+ if (ctx_info->reg_ctx_type != CTX_TYPE_MSR ||
+ arch_apei_report_x86_error(ctx_info, proc->lapic_id)) {
+ printk("%sRegister Array:\n", newpfx);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16,
+ groupsize, (ctx_info + 1),
+ ctx_info->reg_arr_size, 0);
+ }
+
+ ctx_info = (struct cper_ia_proc_ctx *)((long)ctx_info + size);
+ }
+}
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
new file mode 100644
index 0000000000..35c37f6677
--- /dev/null
+++ b/drivers/firmware/efi/cper.c
@@ -0,0 +1,689 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UEFI Common Platform Error Record (CPER) support
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * CPER is the format used to describe platform hardware error by
+ * various tables, such as ERST, BERT and HEST etc.
+ *
+ * For more information about CPER, please refer to Appendix N of UEFI
+ * Specification version 2.4.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/cper.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/printk.h>
+#include <linux/bcd.h>
+#include <acpi/ghes.h>
+#include <ras/ras_event.h>
+#include "cper_cxl.h"
+
+/*
+ * CPER record ID need to be unique even after reboot, because record
+ * ID is used as index for ERST storage, while CPER records from
+ * multiple boot may co-exist in ERST.
+ */
+u64 cper_next_record_id(void)
+{
+ static atomic64_t seq;
+
+ if (!atomic64_read(&seq)) {
+ time64_t time = ktime_get_real_seconds();
+
+ /*
+ * This code is unlikely to still be needed in year 2106,
+ * but just in case, let's use a few more bits for timestamps
+ * after y2038 to be sure they keep increasing monotonically
+ * for the next few hundred years...
+ */
+ if (time < 0x80000000)
+ atomic64_set(&seq, (ktime_get_real_seconds()) << 32);
+ else
+ atomic64_set(&seq, 0x8000000000000000ull |
+ ktime_get_real_seconds() << 24);
+ }
+
+ return atomic64_inc_return(&seq);
+}
+EXPORT_SYMBOL_GPL(cper_next_record_id);
+
+static const char * const severity_strs[] = {
+ "recoverable",
+ "fatal",
+ "corrected",
+ "info",
+};
+
+const char *cper_severity_str(unsigned int severity)
+{
+ return severity < ARRAY_SIZE(severity_strs) ?
+ severity_strs[severity] : "unknown";
+}
+EXPORT_SYMBOL_GPL(cper_severity_str);
+
+/*
+ * cper_print_bits - print strings for set bits
+ * @pfx: prefix for each line, including log level and prefix string
+ * @bits: bit mask
+ * @strs: string array, indexed by bit position
+ * @strs_size: size of the string array: @strs
+ *
+ * For each set bit in @bits, print the corresponding string in @strs.
+ * If the output length is longer than 80, multiple line will be
+ * printed, with @pfx is printed at the beginning of each line.
+ */
+void cper_print_bits(const char *pfx, unsigned int bits,
+ const char * const strs[], unsigned int strs_size)
+{
+ int i, len = 0;
+ const char *str;
+ char buf[84];
+
+ for (i = 0; i < strs_size; i++) {
+ if (!(bits & (1U << i)))
+ continue;
+ str = strs[i];
+ if (!str)
+ continue;
+ if (len && len + strlen(str) + 2 > 80) {
+ printk("%s\n", buf);
+ len = 0;
+ }
+ if (!len)
+ len = snprintf(buf, sizeof(buf), "%s%s", pfx, str);
+ else
+ len += scnprintf(buf+len, sizeof(buf)-len, ", %s", str);
+ }
+ if (len)
+ printk("%s\n", buf);
+}
+
+static const char * const proc_type_strs[] = {
+ "IA32/X64",
+ "IA64",
+ "ARM",
+};
+
+static const char * const proc_isa_strs[] = {
+ "IA32",
+ "IA64",
+ "X64",
+ "ARM A32/T32",
+ "ARM A64",
+};
+
+const char * const cper_proc_error_type_strs[] = {
+ "cache error",
+ "TLB error",
+ "bus error",
+ "micro-architectural error",
+};
+
+static const char * const proc_op_strs[] = {
+ "unknown or generic",
+ "data read",
+ "data write",
+ "instruction execution",
+};
+
+static const char * const proc_flag_strs[] = {
+ "restartable",
+ "precise IP",
+ "overflow",
+ "corrected",
+};
+
+static void cper_print_proc_generic(const char *pfx,
+ const struct cper_sec_proc_generic *proc)
+{
+ if (proc->validation_bits & CPER_PROC_VALID_TYPE)
+ printk("%s""processor_type: %d, %s\n", pfx, proc->proc_type,
+ proc->proc_type < ARRAY_SIZE(proc_type_strs) ?
+ proc_type_strs[proc->proc_type] : "unknown");
+ if (proc->validation_bits & CPER_PROC_VALID_ISA)
+ printk("%s""processor_isa: %d, %s\n", pfx, proc->proc_isa,
+ proc->proc_isa < ARRAY_SIZE(proc_isa_strs) ?
+ proc_isa_strs[proc->proc_isa] : "unknown");
+ if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) {
+ printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type);
+ cper_print_bits(pfx, proc->proc_error_type,
+ cper_proc_error_type_strs,
+ ARRAY_SIZE(cper_proc_error_type_strs));
+ }
+ if (proc->validation_bits & CPER_PROC_VALID_OPERATION)
+ printk("%s""operation: %d, %s\n", pfx, proc->operation,
+ proc->operation < ARRAY_SIZE(proc_op_strs) ?
+ proc_op_strs[proc->operation] : "unknown");
+ if (proc->validation_bits & CPER_PROC_VALID_FLAGS) {
+ printk("%s""flags: 0x%02x\n", pfx, proc->flags);
+ cper_print_bits(pfx, proc->flags, proc_flag_strs,
+ ARRAY_SIZE(proc_flag_strs));
+ }
+ if (proc->validation_bits & CPER_PROC_VALID_LEVEL)
+ printk("%s""level: %d\n", pfx, proc->level);
+ if (proc->validation_bits & CPER_PROC_VALID_VERSION)
+ printk("%s""version_info: 0x%016llx\n", pfx, proc->cpu_version);
+ if (proc->validation_bits & CPER_PROC_VALID_ID)
+ printk("%s""processor_id: 0x%016llx\n", pfx, proc->proc_id);
+ if (proc->validation_bits & CPER_PROC_VALID_TARGET_ADDRESS)
+ printk("%s""target_address: 0x%016llx\n",
+ pfx, proc->target_addr);
+ if (proc->validation_bits & CPER_PROC_VALID_REQUESTOR_ID)
+ printk("%s""requestor_id: 0x%016llx\n",
+ pfx, proc->requestor_id);
+ if (proc->validation_bits & CPER_PROC_VALID_RESPONDER_ID)
+ printk("%s""responder_id: 0x%016llx\n",
+ pfx, proc->responder_id);
+ if (proc->validation_bits & CPER_PROC_VALID_IP)
+ printk("%s""IP: 0x%016llx\n", pfx, proc->ip);
+}
+
+static const char * const mem_err_type_strs[] = {
+ "unknown",
+ "no error",
+ "single-bit ECC",
+ "multi-bit ECC",
+ "single-symbol chipkill ECC",
+ "multi-symbol chipkill ECC",
+ "master abort",
+ "target abort",
+ "parity error",
+ "watchdog timeout",
+ "invalid address",
+ "mirror Broken",
+ "memory sparing",
+ "scrub corrected error",
+ "scrub uncorrected error",
+ "physical memory map-out event",
+};
+
+const char *cper_mem_err_type_str(unsigned int etype)
+{
+ return etype < ARRAY_SIZE(mem_err_type_strs) ?
+ mem_err_type_strs[etype] : "unknown";
+}
+EXPORT_SYMBOL_GPL(cper_mem_err_type_str);
+
+const char *cper_mem_err_status_str(u64 status)
+{
+ switch ((status >> 8) & 0xff) {
+ case 1: return "Error detected internal to the component";
+ case 4: return "Storage error in DRAM memory";
+ case 5: return "Storage error in TLB";
+ case 6: return "Storage error in cache";
+ case 7: return "Error in one or more functional units";
+ case 8: return "Component failed self test";
+ case 9: return "Overflow or undervalue of internal queue";
+ case 16: return "Error detected in the bus";
+ case 17: return "Virtual address not found on IO-TLB or IO-PDIR";
+ case 18: return "Improper access error";
+ case 19: return "Access to a memory address which is not mapped to any component";
+ case 20: return "Loss of Lockstep";
+ case 21: return "Response not associated with a request";
+ case 22: return "Bus parity error - must also set the A, C, or D Bits";
+ case 23: return "Detection of a protocol error";
+ case 24: return "Detection of a PATH_ERROR";
+ case 25: return "Bus operation timeout";
+ case 26: return "A read was issued to data that has been poisoned";
+ default: return "Reserved";
+ }
+}
+EXPORT_SYMBOL_GPL(cper_mem_err_status_str);
+
+int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg)
+{
+ u32 len, n;
+
+ if (!msg)
+ return 0;
+
+ n = 0;
+ len = CPER_REC_LEN;
+ if (mem->validation_bits & CPER_MEM_VALID_NODE)
+ n += scnprintf(msg + n, len - n, "node:%d ", mem->node);
+ if (mem->validation_bits & CPER_MEM_VALID_CARD)
+ n += scnprintf(msg + n, len - n, "card:%d ", mem->card);
+ if (mem->validation_bits & CPER_MEM_VALID_MODULE)
+ n += scnprintf(msg + n, len - n, "module:%d ", mem->module);
+ if (mem->validation_bits & CPER_MEM_VALID_RANK_NUMBER)
+ n += scnprintf(msg + n, len - n, "rank:%d ", mem->rank);
+ if (mem->validation_bits & CPER_MEM_VALID_BANK)
+ n += scnprintf(msg + n, len - n, "bank:%d ", mem->bank);
+ if (mem->validation_bits & CPER_MEM_VALID_BANK_GROUP)
+ n += scnprintf(msg + n, len - n, "bank_group:%d ",
+ mem->bank >> CPER_MEM_BANK_GROUP_SHIFT);
+ if (mem->validation_bits & CPER_MEM_VALID_BANK_ADDRESS)
+ n += scnprintf(msg + n, len - n, "bank_address:%d ",
+ mem->bank & CPER_MEM_BANK_ADDRESS_MASK);
+ if (mem->validation_bits & CPER_MEM_VALID_DEVICE)
+ n += scnprintf(msg + n, len - n, "device:%d ", mem->device);
+ if (mem->validation_bits & (CPER_MEM_VALID_ROW | CPER_MEM_VALID_ROW_EXT)) {
+ u32 row = mem->row;
+
+ row |= cper_get_mem_extension(mem->validation_bits, mem->extended);
+ n += scnprintf(msg + n, len - n, "row:%d ", row);
+ }
+ if (mem->validation_bits & CPER_MEM_VALID_COLUMN)
+ n += scnprintf(msg + n, len - n, "column:%d ", mem->column);
+ if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION)
+ n += scnprintf(msg + n, len - n, "bit_position:%d ",
+ mem->bit_pos);
+ if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
+ n += scnprintf(msg + n, len - n, "requestor_id:0x%016llx ",
+ mem->requestor_id);
+ if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
+ n += scnprintf(msg + n, len - n, "responder_id:0x%016llx ",
+ mem->responder_id);
+ if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID)
+ n += scnprintf(msg + n, len - n, "target_id:0x%016llx ",
+ mem->target_id);
+ if (mem->validation_bits & CPER_MEM_VALID_CHIP_ID)
+ n += scnprintf(msg + n, len - n, "chip_id:%d ",
+ mem->extended >> CPER_MEM_CHIP_ID_SHIFT);
+
+ return n;
+}
+EXPORT_SYMBOL_GPL(cper_mem_err_location);
+
+int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
+{
+ u32 len, n;
+ const char *bank = NULL, *device = NULL;
+
+ if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
+ return 0;
+
+ len = CPER_REC_LEN;
+ dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
+ if (bank && device)
+ n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
+ else
+ n = snprintf(msg, len,
+ "DIMM location: not present. DMI handle: 0x%.4x ",
+ mem->mem_dev_handle);
+
+ return n;
+}
+EXPORT_SYMBOL_GPL(cper_dimm_err_location);
+
+void cper_mem_err_pack(const struct cper_sec_mem_err *mem,
+ struct cper_mem_err_compact *cmem)
+{
+ cmem->validation_bits = mem->validation_bits;
+ cmem->node = mem->node;
+ cmem->card = mem->card;
+ cmem->module = mem->module;
+ cmem->bank = mem->bank;
+ cmem->device = mem->device;
+ cmem->row = mem->row;
+ cmem->column = mem->column;
+ cmem->bit_pos = mem->bit_pos;
+ cmem->requestor_id = mem->requestor_id;
+ cmem->responder_id = mem->responder_id;
+ cmem->target_id = mem->target_id;
+ cmem->extended = mem->extended;
+ cmem->rank = mem->rank;
+ cmem->mem_array_handle = mem->mem_array_handle;
+ cmem->mem_dev_handle = mem->mem_dev_handle;
+}
+EXPORT_SYMBOL_GPL(cper_mem_err_pack);
+
+const char *cper_mem_err_unpack(struct trace_seq *p,
+ struct cper_mem_err_compact *cmem)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ char rcd_decode_str[CPER_REC_LEN];
+
+ if (cper_mem_err_location(cmem, rcd_decode_str))
+ trace_seq_printf(p, "%s", rcd_decode_str);
+ if (cper_dimm_err_location(cmem, rcd_decode_str))
+ trace_seq_printf(p, "%s", rcd_decode_str);
+ trace_seq_putc(p, '\0');
+
+ return ret;
+}
+
+static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
+ int len)
+{
+ struct cper_mem_err_compact cmem;
+ char rcd_decode_str[CPER_REC_LEN];
+
+ /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
+ if (len == sizeof(struct cper_sec_mem_err_old) &&
+ (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
+ pr_err(FW_WARN "valid bits set for fields beyond structure\n");
+ return;
+ }
+ if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
+ printk("%s error_status: %s (0x%016llx)\n",
+ pfx, cper_mem_err_status_str(mem->error_status),
+ mem->error_status);
+ if (mem->validation_bits & CPER_MEM_VALID_PA)
+ printk("%s""physical_address: 0x%016llx\n",
+ pfx, mem->physical_addr);
+ if (mem->validation_bits & CPER_MEM_VALID_PA_MASK)
+ printk("%s""physical_address_mask: 0x%016llx\n",
+ pfx, mem->physical_addr_mask);
+ cper_mem_err_pack(mem, &cmem);
+ if (cper_mem_err_location(&cmem, rcd_decode_str))
+ printk("%s%s\n", pfx, rcd_decode_str);
+ if (mem->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
+ u8 etype = mem->error_type;
+ printk("%s""error_type: %d, %s\n", pfx, etype,
+ cper_mem_err_type_str(etype));
+ }
+ if (cper_dimm_err_location(&cmem, rcd_decode_str))
+ printk("%s%s\n", pfx, rcd_decode_str);
+}
+
+static const char * const pcie_port_type_strs[] = {
+ "PCIe end point",
+ "legacy PCI end point",
+ "unknown",
+ "unknown",
+ "root port",
+ "upstream switch port",
+ "downstream switch port",
+ "PCIe to PCI/PCI-X bridge",
+ "PCI/PCI-X to PCIe bridge",
+ "root complex integrated endpoint device",
+ "root complex event collector",
+};
+
+static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
+ const struct acpi_hest_generic_data *gdata)
+{
+ if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
+ printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
+ pcie->port_type < ARRAY_SIZE(pcie_port_type_strs) ?
+ pcie_port_type_strs[pcie->port_type] : "unknown");
+ if (pcie->validation_bits & CPER_PCIE_VALID_VERSION)
+ printk("%s""version: %d.%d\n", pfx,
+ pcie->version.major, pcie->version.minor);
+ if (pcie->validation_bits & CPER_PCIE_VALID_COMMAND_STATUS)
+ printk("%s""command: 0x%04x, status: 0x%04x\n", pfx,
+ pcie->command, pcie->status);
+ if (pcie->validation_bits & CPER_PCIE_VALID_DEVICE_ID) {
+ const __u8 *p;
+ printk("%s""device_id: %04x:%02x:%02x.%x\n", pfx,
+ pcie->device_id.segment, pcie->device_id.bus,
+ pcie->device_id.device, pcie->device_id.function);
+ printk("%s""slot: %d\n", pfx,
+ pcie->device_id.slot >> CPER_PCIE_SLOT_SHIFT);
+ printk("%s""secondary_bus: 0x%02x\n", pfx,
+ pcie->device_id.secondary_bus);
+ printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
+ pcie->device_id.vendor_id, pcie->device_id.device_id);
+ p = pcie->device_id.class_code;
+ printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]);
+ }
+ if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
+ printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,
+ pcie->serial_number.lower, pcie->serial_number.upper);
+ if (pcie->validation_bits & CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS)
+ printk(
+ "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
+ pfx, pcie->bridge.secondary_status, pcie->bridge.control);
+
+ /* Fatal errors call __ghes_panic() before AER handler prints this */
+ if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) &&
+ (gdata->error_severity & CPER_SEV_FATAL)) {
+ struct aer_capability_regs *aer;
+
+ aer = (struct aer_capability_regs *)pcie->aer_info;
+ printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n",
+ pfx, aer->uncor_status, aer->uncor_mask);
+ printk("%saer_uncor_severity: 0x%08x\n",
+ pfx, aer->uncor_severity);
+ printk("%sTLP Header: %08x %08x %08x %08x\n", pfx,
+ aer->header_log.dw0, aer->header_log.dw1,
+ aer->header_log.dw2, aer->header_log.dw3);
+ }
+}
+
+static const char * const fw_err_rec_type_strs[] = {
+ "IPF SAL Error Record",
+ "SOC Firmware Error Record Type1 (Legacy CrashLog Support)",
+ "SOC Firmware Error Record Type2",
+};
+
+static void cper_print_fw_err(const char *pfx,
+ struct acpi_hest_generic_data *gdata,
+ const struct cper_sec_fw_err_rec_ref *fw_err)
+{
+ void *buf = acpi_hest_get_payload(gdata);
+ u32 offset, length = gdata->error_data_length;
+
+ printk("%s""Firmware Error Record Type: %s\n", pfx,
+ fw_err->record_type < ARRAY_SIZE(fw_err_rec_type_strs) ?
+ fw_err_rec_type_strs[fw_err->record_type] : "unknown");
+ printk("%s""Revision: %d\n", pfx, fw_err->revision);
+
+ /* Record Type based on UEFI 2.7 */
+ if (fw_err->revision == 0) {
+ printk("%s""Record Identifier: %08llx\n", pfx,
+ fw_err->record_identifier);
+ } else if (fw_err->revision == 2) {
+ printk("%s""Record Identifier: %pUl\n", pfx,
+ &fw_err->record_identifier_guid);
+ }
+
+ /*
+ * The FW error record may contain trailing data beyond the
+ * structure defined by the specification. As the fields
+ * defined (and hence the offset of any trailing data) vary
+ * with the revision, set the offset to account for this
+ * variation.
+ */
+ if (fw_err->revision == 0) {
+ /* record_identifier_guid not defined */
+ offset = offsetof(struct cper_sec_fw_err_rec_ref,
+ record_identifier_guid);
+ } else if (fw_err->revision == 1) {
+ /* record_identifier not defined */
+ offset = offsetof(struct cper_sec_fw_err_rec_ref,
+ record_identifier);
+ } else {
+ offset = sizeof(*fw_err);
+ }
+
+ buf += offset;
+ length -= offset;
+
+ print_hex_dump(pfx, "", DUMP_PREFIX_OFFSET, 16, 4, buf, length, true);
+}
+
+static void cper_print_tstamp(const char *pfx,
+ struct acpi_hest_generic_data_v300 *gdata)
+{
+ __u8 hour, min, sec, day, mon, year, century, *timestamp;
+
+ if (gdata->validation_bits & ACPI_HEST_GEN_VALID_TIMESTAMP) {
+ timestamp = (__u8 *)&(gdata->time_stamp);
+ sec = bcd2bin(timestamp[0]);
+ min = bcd2bin(timestamp[1]);
+ hour = bcd2bin(timestamp[2]);
+ day = bcd2bin(timestamp[4]);
+ mon = bcd2bin(timestamp[5]);
+ year = bcd2bin(timestamp[6]);
+ century = bcd2bin(timestamp[7]);
+
+ printk("%s%ststamp: %02d%02d-%02d-%02d %02d:%02d:%02d\n", pfx,
+ (timestamp[3] & 0x1 ? "precise " : "imprecise "),
+ century, year, mon, day, hour, min, sec);
+ }
+}
+
+static void
+cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata,
+ int sec_no)
+{
+ guid_t *sec_type = (guid_t *)gdata->section_type;
+ __u16 severity;
+ char newpfx[64];
+
+ if (acpi_hest_get_version(gdata) >= 3)
+ cper_print_tstamp(pfx, (struct acpi_hest_generic_data_v300 *)gdata);
+
+ severity = gdata->error_severity;
+ printk("%s""Error %d, type: %s\n", pfx, sec_no,
+ cper_severity_str(severity));
+ if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
+ printk("%s""fru_id: %pUl\n", pfx, gdata->fru_id);
+ if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
+ printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
+
+ snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+ if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) {
+ struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata);
+
+ printk("%s""section_type: general processor error\n", newpfx);
+ if (gdata->error_data_length >= sizeof(*proc_err))
+ cper_print_proc_generic(newpfx, proc_err);
+ else
+ goto err_section_too_small;
+ } else if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
+ struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
+
+ printk("%s""section_type: memory error\n", newpfx);
+ if (gdata->error_data_length >=
+ sizeof(struct cper_sec_mem_err_old))
+ cper_print_mem(newpfx, mem_err,
+ gdata->error_data_length);
+ else
+ goto err_section_too_small;
+ } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
+ struct cper_sec_pcie *pcie = acpi_hest_get_payload(gdata);
+
+ printk("%s""section_type: PCIe error\n", newpfx);
+ if (gdata->error_data_length >= sizeof(*pcie))
+ cper_print_pcie(newpfx, pcie, gdata);
+ else
+ goto err_section_too_small;
+#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
+ } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
+ struct cper_sec_proc_arm *arm_err = acpi_hest_get_payload(gdata);
+
+ printk("%ssection_type: ARM processor error\n", newpfx);
+ if (gdata->error_data_length >= sizeof(*arm_err))
+ cper_print_proc_arm(newpfx, arm_err);
+ else
+ goto err_section_too_small;
+#endif
+#if defined(CONFIG_UEFI_CPER_X86)
+ } else if (guid_equal(sec_type, &CPER_SEC_PROC_IA)) {
+ struct cper_sec_proc_ia *ia_err = acpi_hest_get_payload(gdata);
+
+ printk("%ssection_type: IA32/X64 processor error\n", newpfx);
+ if (gdata->error_data_length >= sizeof(*ia_err))
+ cper_print_proc_ia(newpfx, ia_err);
+ else
+ goto err_section_too_small;
+#endif
+ } else if (guid_equal(sec_type, &CPER_SEC_FW_ERR_REC_REF)) {
+ struct cper_sec_fw_err_rec_ref *fw_err = acpi_hest_get_payload(gdata);
+
+ printk("%ssection_type: Firmware Error Record Reference\n",
+ newpfx);
+ /* The minimal FW Error Record contains 16 bytes */
+ if (gdata->error_data_length >= SZ_16)
+ cper_print_fw_err(newpfx, gdata, fw_err);
+ else
+ goto err_section_too_small;
+ } else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) {
+ struct cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
+
+ printk("%ssection_type: CXL Protocol Error\n", newpfx);
+ if (gdata->error_data_length >= sizeof(*prot_err))
+ cper_print_prot_err(newpfx, prot_err);
+ else
+ goto err_section_too_small;
+ } else {
+ const void *err = acpi_hest_get_payload(gdata);
+
+ printk("%ssection type: unknown, %pUl\n", newpfx, sec_type);
+ printk("%ssection length: %#x\n", newpfx,
+ gdata->error_data_length);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, err,
+ gdata->error_data_length, true);
+ }
+
+ return;
+
+err_section_too_small:
+ pr_err(FW_WARN "error section length is too small\n");
+}
+
+void cper_estatus_print(const char *pfx,
+ const struct acpi_hest_generic_status *estatus)
+{
+ struct acpi_hest_generic_data *gdata;
+ int sec_no = 0;
+ char newpfx[64];
+ __u16 severity;
+
+ severity = estatus->error_severity;
+ if (severity == CPER_SEV_CORRECTED)
+ printk("%s%s\n", pfx,
+ "It has been corrected by h/w "
+ "and requires no further action");
+ printk("%s""event severity: %s\n", pfx, cper_severity_str(severity));
+ snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+
+ apei_estatus_for_each_section(estatus, gdata) {
+ cper_estatus_print_section(newpfx, gdata, sec_no);
+ sec_no++;
+ }
+}
+EXPORT_SYMBOL_GPL(cper_estatus_print);
+
+int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus)
+{
+ if (estatus->data_length &&
+ estatus->data_length < sizeof(struct acpi_hest_generic_data))
+ return -EINVAL;
+ if (estatus->raw_data_length &&
+ estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cper_estatus_check_header);
+
+int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
+{
+ struct acpi_hest_generic_data *gdata;
+ unsigned int data_len, record_size;
+ int rc;
+
+ rc = cper_estatus_check_header(estatus);
+ if (rc)
+ return rc;
+
+ data_len = estatus->data_length;
+
+ apei_estatus_for_each_section(estatus, gdata) {
+ if (acpi_hest_get_size(gdata) > data_len)
+ return -EINVAL;
+
+ record_size = acpi_hest_get_record_size(gdata);
+ if (record_size > data_len)
+ return -EINVAL;
+
+ data_len -= record_size;
+ }
+ if (data_len)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cper_estatus_check);
diff --git a/drivers/firmware/efi/cper_cxl.c b/drivers/firmware/efi/cper_cxl.c
new file mode 100644
index 0000000000..a55771b99a
--- /dev/null
+++ b/drivers/firmware/efi/cper_cxl.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UEFI Common Platform Error Record (CPER) support for CXL Section.
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ *
+ * Author: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
+ */
+
+#include <linux/cper.h>
+#include "cper_cxl.h"
+
+#define PROT_ERR_VALID_AGENT_TYPE BIT_ULL(0)
+#define PROT_ERR_VALID_AGENT_ADDRESS BIT_ULL(1)
+#define PROT_ERR_VALID_DEVICE_ID BIT_ULL(2)
+#define PROT_ERR_VALID_SERIAL_NUMBER BIT_ULL(3)
+#define PROT_ERR_VALID_CAPABILITY BIT_ULL(4)
+#define PROT_ERR_VALID_DVSEC BIT_ULL(5)
+#define PROT_ERR_VALID_ERROR_LOG BIT_ULL(6)
+
+/* CXL RAS Capability Structure, CXL v3.0 sec 8.2.4.16 */
+struct cxl_ras_capability_regs {
+ u32 uncor_status;
+ u32 uncor_mask;
+ u32 uncor_severity;
+ u32 cor_status;
+ u32 cor_mask;
+ u32 cap_control;
+ u32 header_log[16];
+};
+
+static const char * const prot_err_agent_type_strs[] = {
+ "Restricted CXL Device",
+ "Restricted CXL Host Downstream Port",
+ "CXL Device",
+ "CXL Logical Device",
+ "CXL Fabric Manager managed Logical Device",
+ "CXL Root Port",
+ "CXL Downstream Switch Port",
+ "CXL Upstream Switch Port",
+};
+
+/*
+ * The layout of the enumeration and the values matches CXL Agent Type
+ * field in the UEFI 2.10 Section N.2.13,
+ */
+enum {
+ RCD, /* Restricted CXL Device */
+ RCH_DP, /* Restricted CXL Host Downstream Port */
+ DEVICE, /* CXL Device */
+ LD, /* CXL Logical Device */
+ FMLD, /* CXL Fabric Manager managed Logical Device */
+ RP, /* CXL Root Port */
+ DSP, /* CXL Downstream Switch Port */
+ USP, /* CXL Upstream Switch Port */
+};
+
+void cper_print_prot_err(const char *pfx, const struct cper_sec_prot_err *prot_err)
+{
+ if (prot_err->valid_bits & PROT_ERR_VALID_AGENT_TYPE)
+ pr_info("%s agent_type: %d, %s\n", pfx, prot_err->agent_type,
+ prot_err->agent_type < ARRAY_SIZE(prot_err_agent_type_strs)
+ ? prot_err_agent_type_strs[prot_err->agent_type]
+ : "unknown");
+
+ if (prot_err->valid_bits & PROT_ERR_VALID_AGENT_ADDRESS) {
+ switch (prot_err->agent_type) {
+ /*
+ * According to UEFI 2.10 Section N.2.13, the term CXL Device
+ * is used to refer to Restricted CXL Device, CXL Device, CXL
+ * Logical Device or a CXL Fabric Manager Managed Logical
+ * Device.
+ */
+ case RCD:
+ case DEVICE:
+ case LD:
+ case FMLD:
+ case RP:
+ case DSP:
+ case USP:
+ pr_info("%s agent_address: %04x:%02x:%02x.%x\n",
+ pfx, prot_err->agent_addr.segment,
+ prot_err->agent_addr.bus,
+ prot_err->agent_addr.device,
+ prot_err->agent_addr.function);
+ break;
+ case RCH_DP:
+ pr_info("%s rcrb_base_address: 0x%016llx\n", pfx,
+ prot_err->agent_addr.rcrb_base_addr);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (prot_err->valid_bits & PROT_ERR_VALID_DEVICE_ID) {
+ const __u8 *class_code;
+
+ switch (prot_err->agent_type) {
+ case RCD:
+ case DEVICE:
+ case LD:
+ case FMLD:
+ case RP:
+ case DSP:
+ case USP:
+ pr_info("%s slot: %d\n", pfx,
+ prot_err->device_id.slot >> CPER_PCIE_SLOT_SHIFT);
+ pr_info("%s vendor_id: 0x%04x, device_id: 0x%04x\n",
+ pfx, prot_err->device_id.vendor_id,
+ prot_err->device_id.device_id);
+ pr_info("%s sub_vendor_id: 0x%04x, sub_device_id: 0x%04x\n",
+ pfx, prot_err->device_id.subsystem_vendor_id,
+ prot_err->device_id.subsystem_id);
+ class_code = prot_err->device_id.class_code;
+ pr_info("%s class_code: %02x%02x\n", pfx,
+ class_code[1], class_code[0]);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (prot_err->valid_bits & PROT_ERR_VALID_SERIAL_NUMBER) {
+ switch (prot_err->agent_type) {
+ case RCD:
+ case DEVICE:
+ case LD:
+ case FMLD:
+ pr_info("%s lower_dw: 0x%08x, upper_dw: 0x%08x\n", pfx,
+ prot_err->dev_serial_num.lower_dw,
+ prot_err->dev_serial_num.upper_dw);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (prot_err->valid_bits & PROT_ERR_VALID_CAPABILITY) {
+ switch (prot_err->agent_type) {
+ case RCD:
+ case DEVICE:
+ case LD:
+ case FMLD:
+ case RP:
+ case DSP:
+ case USP:
+ print_hex_dump(pfx, "", DUMP_PREFIX_OFFSET, 16, 4,
+ prot_err->capability,
+ sizeof(prot_err->capability), 0);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (prot_err->valid_bits & PROT_ERR_VALID_DVSEC) {
+ pr_info("%s DVSEC length: 0x%04x\n", pfx, prot_err->dvsec_len);
+
+ pr_info("%s CXL DVSEC:\n", pfx);
+ print_hex_dump(pfx, "", DUMP_PREFIX_OFFSET, 16, 4, (prot_err + 1),
+ prot_err->dvsec_len, 0);
+ }
+
+ if (prot_err->valid_bits & PROT_ERR_VALID_ERROR_LOG) {
+ size_t size = sizeof(*prot_err) + prot_err->dvsec_len;
+ struct cxl_ras_capability_regs *cxl_ras;
+
+ pr_info("%s Error log length: 0x%04x\n", pfx, prot_err->err_len);
+
+ pr_info("%s CXL Error Log:\n", pfx);
+ cxl_ras = (struct cxl_ras_capability_regs *)((long)prot_err + size);
+ pr_info("%s cxl_ras_uncor_status: 0x%08x", pfx,
+ cxl_ras->uncor_status);
+ pr_info("%s cxl_ras_uncor_mask: 0x%08x\n", pfx,
+ cxl_ras->uncor_mask);
+ pr_info("%s cxl_ras_uncor_severity: 0x%08x\n", pfx,
+ cxl_ras->uncor_severity);
+ pr_info("%s cxl_ras_cor_status: 0x%08x", pfx,
+ cxl_ras->cor_status);
+ pr_info("%s cxl_ras_cor_mask: 0x%08x\n", pfx,
+ cxl_ras->cor_mask);
+ pr_info("%s cap_control: 0x%08x\n", pfx,
+ cxl_ras->cap_control);
+ pr_info("%s Header Log Registers:\n", pfx);
+ print_hex_dump(pfx, "", DUMP_PREFIX_OFFSET, 16, 4, cxl_ras->header_log,
+ sizeof(cxl_ras->header_log), 0);
+ }
+}
diff --git a/drivers/firmware/efi/cper_cxl.h b/drivers/firmware/efi/cper_cxl.h
new file mode 100644
index 0000000000..86bfcf7909
--- /dev/null
+++ b/drivers/firmware/efi/cper_cxl.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * UEFI Common Platform Error Record (CPER) support for CXL Section.
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ *
+ * Author: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
+ */
+
+#ifndef LINUX_CPER_CXL_H
+#define LINUX_CPER_CXL_H
+
+/* CXL Protocol Error Section */
+#define CPER_SEC_CXL_PROT_ERR \
+ GUID_INIT(0x80B9EFB4, 0x52B5, 0x4DE3, 0xA7, 0x77, 0x68, 0x78, \
+ 0x4B, 0x77, 0x10, 0x48)
+
+#pragma pack(1)
+
+/* Compute Express Link Protocol Error Section, UEFI v2.10 sec N.2.13 */
+struct cper_sec_prot_err {
+ u64 valid_bits;
+ u8 agent_type;
+ u8 reserved[7];
+
+ /*
+ * Except for RCH Downstream Port, all the remaining CXL Agent
+ * types are uniquely identified by the PCIe compatible SBDF number.
+ */
+ union {
+ u64 rcrb_base_addr;
+ struct {
+ u8 function;
+ u8 device;
+ u8 bus;
+ u16 segment;
+ u8 reserved_1[3];
+ };
+ } agent_addr;
+
+ struct {
+ u16 vendor_id;
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_id;
+ u8 class_code[2];
+ u16 slot;
+ u8 reserved_1[4];
+ } device_id;
+
+ struct {
+ u32 lower_dw;
+ u32 upper_dw;
+ } dev_serial_num;
+
+ u8 capability[60];
+ u16 dvsec_len;
+ u16 err_len;
+ u8 reserved_2[4];
+};
+
+#pragma pack()
+
+void cper_print_prot_err(const char *pfx, const struct cper_sec_prot_err *prot_err);
+
+#endif //__CPER_CXL_
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
new file mode 100644
index 0000000000..f80d87c199
--- /dev/null
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dev-path-parser.c - EFI Device Path parser
+ * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/pci.h>
+
+static long __init parse_acpi_path(const struct efi_dev_path *node,
+ struct device *parent, struct device **child)
+{
+ struct acpi_device *adev;
+ struct device *phys_dev;
+ char hid[ACPI_ID_LEN];
+ u64 uid;
+ int ret;
+
+ if (node->header.length != 12)
+ return -EINVAL;
+
+ sprintf(hid, "%c%c%c%04X",
+ 'A' + ((node->acpi.hid >> 10) & 0x1f) - 1,
+ 'A' + ((node->acpi.hid >> 5) & 0x1f) - 1,
+ 'A' + ((node->acpi.hid >> 0) & 0x1f) - 1,
+ node->acpi.hid >> 16);
+
+ for_each_acpi_dev_match(adev, hid, NULL, -1) {
+ ret = acpi_dev_uid_to_integer(adev, &uid);
+ if (ret == 0 && node->acpi.uid == uid)
+ break;
+ if (ret == -ENODATA && node->acpi.uid == 0)
+ break;
+ }
+ if (!adev)
+ return -ENODEV;
+
+ phys_dev = acpi_get_first_physical_node(adev);
+ if (phys_dev) {
+ *child = get_device(phys_dev);
+ acpi_dev_put(adev);
+ } else
+ *child = &adev->dev;
+
+ return 0;
+}
+
+static int __init match_pci_dev(struct device *dev, void *data)
+{
+ unsigned int devfn = *(unsigned int *)data;
+
+ return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn;
+}
+
+static long __init parse_pci_path(const struct efi_dev_path *node,
+ struct device *parent, struct device **child)
+{
+ unsigned int devfn;
+
+ if (node->header.length != 6)
+ return -EINVAL;
+ if (!parent)
+ return -EINVAL;
+
+ devfn = PCI_DEVFN(node->pci.dev, node->pci.fn);
+
+ *child = device_find_child(parent, &devfn, match_pci_dev);
+ if (!*child)
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * Insert parsers for further node types here.
+ *
+ * Each parser takes a pointer to the @node and to the @parent (will be NULL
+ * for the first device path node). If a device corresponding to @node was
+ * found below @parent, its reference count should be incremented and the
+ * device returned in @child.
+ *
+ * The return value should be 0 on success or a negative int on failure.
+ * The special return values 0x01 (EFI_DEV_END_INSTANCE) and 0xFF
+ * (EFI_DEV_END_ENTIRE) signal the end of the device path, only
+ * parse_end_path() is supposed to return this.
+ *
+ * Be sure to validate the node length and contents before commencing the
+ * search for a device.
+ */
+
+static long __init parse_end_path(const struct efi_dev_path *node,
+ struct device *parent, struct device **child)
+{
+ if (node->header.length != 4)
+ return -EINVAL;
+ if (node->header.sub_type != EFI_DEV_END_INSTANCE &&
+ node->header.sub_type != EFI_DEV_END_ENTIRE)
+ return -EINVAL;
+ if (!parent)
+ return -ENODEV;
+
+ *child = get_device(parent);
+ return node->header.sub_type;
+}
+
+/**
+ * efi_get_device_by_path - find device by EFI Device Path
+ * @node: EFI Device Path
+ * @len: maximum length of EFI Device Path in bytes
+ *
+ * Parse a series of EFI Device Path nodes at @node and find the corresponding
+ * device. If the device was found, its reference count is incremented and a
+ * pointer to it is returned. The caller needs to drop the reference with
+ * put_device() after use. The @node pointer is updated to point to the
+ * location immediately after the "End of Hardware Device Path" node.
+ *
+ * If another Device Path instance follows, @len is decremented by the number
+ * of bytes consumed. Otherwise @len is set to %0.
+ *
+ * If a Device Path node is malformed or its corresponding device is not found,
+ * @node is updated to point to this offending node and an ERR_PTR is returned.
+ *
+ * If @len is initially %0, the function returns %NULL. Thus, to iterate over
+ * all instances in a path, the following idiom may be used:
+ *
+ * while (!IS_ERR_OR_NULL(dev = efi_get_device_by_path(&node, &len))) {
+ * // do something with dev
+ * put_device(dev);
+ * }
+ * if (IS_ERR(dev))
+ * // report error
+ *
+ * Devices can only be found if they're already instantiated. Most buses
+ * instantiate devices in the "subsys" initcall level, hence the earliest
+ * initcall level in which this function should be called is "fs".
+ *
+ * Returns the device on success or
+ * %ERR_PTR(-ENODEV) if no device was found,
+ * %ERR_PTR(-EINVAL) if a node is malformed or exceeds @len,
+ * %ERR_PTR(-ENOTSUPP) if support for a node type is not yet implemented.
+ */
+struct device * __init efi_get_device_by_path(const struct efi_dev_path **node,
+ size_t *len)
+{
+ struct device *parent = NULL, *child;
+ long ret = 0;
+
+ if (!*len)
+ return NULL;
+
+ while (!ret) {
+ if (*len < 4 || *len < (*node)->header.length)
+ ret = -EINVAL;
+ else if ((*node)->header.type == EFI_DEV_ACPI &&
+ (*node)->header.sub_type == EFI_DEV_BASIC_ACPI)
+ ret = parse_acpi_path(*node, parent, &child);
+ else if ((*node)->header.type == EFI_DEV_HW &&
+ (*node)->header.sub_type == EFI_DEV_PCI)
+ ret = parse_pci_path(*node, parent, &child);
+ else if (((*node)->header.type == EFI_DEV_END_PATH ||
+ (*node)->header.type == EFI_DEV_END_PATH2))
+ ret = parse_end_path(*node, parent, &child);
+ else
+ ret = -ENOTSUPP;
+
+ put_device(parent);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ parent = child;
+ *node = (void *)*node + (*node)->header.length;
+ *len -= (*node)->header.length;
+ }
+
+ if (ret == EFI_DEV_END_ENTIRE)
+ *len = 0;
+
+ return child;
+}
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
new file mode 100644
index 0000000000..f80a9af3d1
--- /dev/null
+++ b/drivers/firmware/efi/earlycon.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 Intel Corporation; author Matt Fleming
+ */
+
+#include <linux/console.h>
+#include <linux/efi.h>
+#include <linux/font.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/serial_core.h>
+#include <linux/screen_info.h>
+#include <linux/string.h>
+
+#include <asm/early_ioremap.h>
+
+static const struct console *earlycon_console __initdata;
+static const struct font_desc *font;
+static u16 cur_line_y, max_line_y;
+static u32 efi_x_array[1024];
+static u32 efi_x, efi_y;
+static u64 fb_base;
+static bool fb_wb;
+static void *efi_fb;
+
+/*
+ * EFI earlycon needs to use early_memremap() to map the framebuffer.
+ * But early_memremap() is not usable for 'earlycon=efifb keep_bootcon',
+ * memremap() should be used instead. memremap() will be available after
+ * paging_init() which is earlier than initcall callbacks. Thus adding this
+ * early initcall function early_efi_map_fb() to map the whole EFI framebuffer.
+ */
+static int __init efi_earlycon_remap_fb(void)
+{
+ /* bail if there is no bootconsole or it was unregistered already */
+ if (!earlycon_console || !console_is_registered(earlycon_console))
+ return 0;
+
+ efi_fb = memremap(fb_base, screen_info.lfb_size,
+ fb_wb ? MEMREMAP_WB : MEMREMAP_WC);
+
+ return efi_fb ? 0 : -ENOMEM;
+}
+early_initcall(efi_earlycon_remap_fb);
+
+static int __init efi_earlycon_unmap_fb(void)
+{
+ /* unmap the bootconsole fb unless keep_bootcon left it registered */
+ if (efi_fb && !console_is_registered(earlycon_console))
+ memunmap(efi_fb);
+ return 0;
+}
+late_initcall(efi_earlycon_unmap_fb);
+
+static __ref void *efi_earlycon_map(unsigned long start, unsigned long len)
+{
+ pgprot_t fb_prot;
+
+ if (efi_fb)
+ return efi_fb + start;
+
+ fb_prot = fb_wb ? PAGE_KERNEL : pgprot_writecombine(PAGE_KERNEL);
+ return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot));
+}
+
+static __ref void efi_earlycon_unmap(void *addr, unsigned long len)
+{
+ if (efi_fb)
+ return;
+
+ early_memunmap(addr, len);
+}
+
+static void efi_earlycon_clear_scanline(unsigned int y)
+{
+ unsigned long *dst;
+ u16 len;
+
+ len = screen_info.lfb_linelength;
+ dst = efi_earlycon_map(y*len, len);
+ if (!dst)
+ return;
+
+ memset(dst, 0, len);
+ efi_earlycon_unmap(dst, len);
+}
+
+static void efi_earlycon_scroll_up(void)
+{
+ unsigned long *dst, *src;
+ u16 maxlen = 0;
+ u16 len;
+ u32 i, height;
+
+ /* Find the cached maximum x coordinate */
+ for (i = 0; i < max_line_y; i++) {
+ if (efi_x_array[i] > maxlen)
+ maxlen = efi_x_array[i];
+ }
+ maxlen *= 4;
+
+ len = screen_info.lfb_linelength;
+ height = screen_info.lfb_height;
+
+ for (i = 0; i < height - font->height; i++) {
+ dst = efi_earlycon_map(i*len, len);
+ if (!dst)
+ return;
+
+ src = efi_earlycon_map((i + font->height) * len, len);
+ if (!src) {
+ efi_earlycon_unmap(dst, len);
+ return;
+ }
+
+ memmove(dst, src, maxlen);
+
+ efi_earlycon_unmap(src, len);
+ efi_earlycon_unmap(dst, len);
+ }
+}
+
+static void efi_earlycon_write_char(u32 *dst, unsigned char c, unsigned int h)
+{
+ const u32 color_black = 0x00000000;
+ const u32 color_white = 0x00ffffff;
+ const u8 *src;
+ int m, n, bytes;
+ u8 x;
+
+ bytes = BITS_TO_BYTES(font->width);
+ src = font->data + c * font->height * bytes + h * bytes;
+
+ for (m = 0; m < font->width; m++) {
+ n = m % 8;
+ x = *(src + m / 8);
+ if ((x >> (7 - n)) & 1)
+ *dst = color_white;
+ else
+ *dst = color_black;
+ dst++;
+ }
+}
+
+static void
+efi_earlycon_write(struct console *con, const char *str, unsigned int num)
+{
+ struct screen_info *si;
+ u32 cur_efi_x = efi_x;
+ unsigned int len;
+ const char *s;
+ void *dst;
+
+ si = &screen_info;
+ len = si->lfb_linelength;
+
+ while (num) {
+ unsigned int linemax = (si->lfb_width - efi_x) / font->width;
+ unsigned int h, count;
+
+ count = strnchrnul(str, num, '\n') - str;
+ if (count > linemax)
+ count = linemax;
+
+ for (h = 0; h < font->height; h++) {
+ unsigned int n, x;
+
+ dst = efi_earlycon_map((efi_y + h) * len, len);
+ if (!dst)
+ return;
+
+ s = str;
+ n = count;
+ x = efi_x;
+
+ while (n-- > 0) {
+ efi_earlycon_write_char(dst + x*4, *s, h);
+ x += font->width;
+ s++;
+ }
+
+ efi_earlycon_unmap(dst, len);
+ }
+
+ num -= count;
+ efi_x += count * font->width;
+ str += count;
+
+ if (num > 0 && *s == '\n') {
+ cur_efi_x = efi_x;
+ efi_x = 0;
+ efi_y += font->height;
+ str++;
+ num--;
+ }
+
+ if (efi_x + font->width > si->lfb_width) {
+ cur_efi_x = efi_x;
+ efi_x = 0;
+ efi_y += font->height;
+ }
+
+ if (efi_y + font->height > si->lfb_height) {
+ u32 i;
+
+ efi_x_array[cur_line_y] = cur_efi_x;
+ cur_line_y = (cur_line_y + 1) % max_line_y;
+
+ efi_y -= font->height;
+ efi_earlycon_scroll_up();
+
+ for (i = 0; i < font->height; i++)
+ efi_earlycon_clear_scanline(efi_y + i);
+ }
+ }
+}
+
+static bool __initdata fb_probed;
+
+void __init efi_earlycon_reprobe(void)
+{
+ if (fb_probed)
+ setup_earlycon("efifb");
+}
+
+static int __init efi_earlycon_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ struct screen_info *si;
+ u16 xres, yres;
+ u32 i;
+
+ fb_wb = opt && !strcmp(opt, "ram");
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) {
+ fb_probed = true;
+ return -ENODEV;
+ }
+
+ fb_base = screen_info.lfb_base;
+ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ fb_base |= (u64)screen_info.ext_lfb_base << 32;
+
+ si = &screen_info;
+ xres = si->lfb_width;
+ yres = si->lfb_height;
+
+ /*
+ * efi_earlycon_write_char() implicitly assumes a framebuffer with
+ * 32 bits per pixel.
+ */
+ if (si->lfb_depth != 32)
+ return -ENODEV;
+
+ font = get_default_font(xres, yres, -1, -1);
+ if (!font)
+ return -ENODEV;
+
+ /* Fill the cache with maximum possible value of x coordinate */
+ memset32(efi_x_array, rounddown(xres, font->width), ARRAY_SIZE(efi_x_array));
+ efi_y = rounddown(yres, font->height);
+
+ /* Make sure we have cache for the x coordinate for the full screen */
+ max_line_y = efi_y / font->height + 1;
+ cur_line_y = 0;
+
+ efi_y -= font->height;
+ for (i = 0; i < (yres - efi_y) / font->height; i++)
+ efi_earlycon_scroll_up();
+
+ device->con->write = efi_earlycon_write;
+ earlycon_console = device->con;
+ return 0;
+}
+EARLYCON_DECLARE(efifb, efi_earlycon_setup);
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
new file mode 100644
index 0000000000..6aafdb67db
--- /dev/null
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2012 Intel Corporation
+ * Author: Josh Triplett <josh@joshtriplett.org>
+ *
+ * Based on the bgrt driver:
+ * Copyright 2012 Red Hat, Inc <mjg@redhat.com>
+ * Author: Matthew Garrett
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/efi-bgrt.h>
+
+struct acpi_table_bgrt bgrt_tab;
+size_t bgrt_image_size;
+
+struct bmp_header {
+ u16 id;
+ u32 size;
+} __packed;
+
+void __init efi_bgrt_init(struct acpi_table_header *table)
+{
+ void *image;
+ struct bmp_header bmp_header;
+ struct acpi_table_bgrt *bgrt = &bgrt_tab;
+
+ if (acpi_disabled)
+ return;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
+ if (table->length < sizeof(bgrt_tab)) {
+ pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n",
+ table->length, sizeof(bgrt_tab));
+ return;
+ }
+ *bgrt = *(struct acpi_table_bgrt *)table;
+ /*
+ * Only version 1 is defined but some older laptops (seen on Lenovo
+ * Ivy Bridge models) have a correct version 1 BGRT table with the
+ * version set to 0, so we accept version 0 and 1.
+ */
+ if (bgrt->version > 1) {
+ pr_notice("Ignoring BGRT: invalid version %u (expected 1)\n",
+ bgrt->version);
+ goto out;
+ }
+ if (bgrt->image_type != 0) {
+ pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
+ bgrt->image_type);
+ goto out;
+ }
+ if (!bgrt->image_address) {
+ pr_notice("Ignoring BGRT: null image address\n");
+ goto out;
+ }
+
+ if (efi_mem_type(bgrt->image_address) != EFI_BOOT_SERVICES_DATA) {
+ pr_notice("Ignoring BGRT: invalid image address\n");
+ goto out;
+ }
+ image = early_memremap(bgrt->image_address, sizeof(bmp_header));
+ if (!image) {
+ pr_notice("Ignoring BGRT: failed to map image header memory\n");
+ goto out;
+ }
+
+ memcpy(&bmp_header, image, sizeof(bmp_header));
+ early_memunmap(image, sizeof(bmp_header));
+ if (bmp_header.id != 0x4d42) {
+ pr_notice("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n",
+ bmp_header.id);
+ goto out;
+ }
+ bgrt_image_size = bmp_header.size;
+ efi_mem_reserve(bgrt->image_address, bgrt_image_size);
+
+ return;
+out:
+ memset(bgrt, 0, sizeof(bgrt_tab));
+}
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
new file mode 100644
index 0000000000..ef0820f1a9
--- /dev/null
+++ b/drivers/firmware/efi/efi-init.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ *
+ * Copyright (C) 2013 - 2015 Linaro Ltd.
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/efi.h>
+#include <linux/fwnode.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/mm_types.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+
+#include <asm/efi.h>
+
+unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR;
+
+static int __init is_memory(efi_memory_desc_t *md)
+{
+ if (md->attribute & (EFI_MEMORY_WB|EFI_MEMORY_WT|EFI_MEMORY_WC))
+ return 1;
+ return 0;
+}
+
+/*
+ * Translate a EFI virtual address into a physical address: this is necessary,
+ * as some data members of the EFI system table are virtually remapped after
+ * SetVirtualAddressMap() has been called.
+ */
+static phys_addr_t __init efi_to_phys(unsigned long addr)
+{
+ efi_memory_desc_t *md;
+
+ for_each_efi_memory_desc(md) {
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0)
+ /* no virtual mapping has been installed by the stub */
+ break;
+ if (md->virt_addr <= addr &&
+ (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
+ return md->phys_addr + addr - md->virt_addr;
+ }
+ return addr;
+}
+
+extern __weak const efi_config_table_type_t efi_arch_tables[];
+
+static void __init init_screen_info(void)
+{
+ struct screen_info *si;
+
+ if (screen_info_table != EFI_INVALID_TABLE_ADDR) {
+ si = early_memremap(screen_info_table, sizeof(*si));
+ if (!si) {
+ pr_err("Could not map screen_info config table\n");
+ return;
+ }
+ screen_info = *si;
+ memset(si, 0, sizeof(*si));
+ early_memunmap(si, sizeof(*si));
+
+ if (memblock_is_map_memory(screen_info.lfb_base))
+ memblock_mark_nomap(screen_info.lfb_base,
+ screen_info.lfb_size);
+
+ if (IS_ENABLED(CONFIG_EFI_EARLYCON))
+ efi_earlycon_reprobe();
+ }
+}
+
+static int __init uefi_init(u64 efi_system_table)
+{
+ efi_config_table_t *config_tables;
+ efi_system_table_t *systab;
+ size_t table_size;
+ int retval;
+
+ systab = early_memremap_ro(efi_system_table, sizeof(efi_system_table_t));
+ if (systab == NULL) {
+ pr_warn("Unable to map EFI system table.\n");
+ return -ENOMEM;
+ }
+
+ set_bit(EFI_BOOT, &efi.flags);
+ if (IS_ENABLED(CONFIG_64BIT))
+ set_bit(EFI_64BIT, &efi.flags);
+
+ retval = efi_systab_check_header(&systab->hdr);
+ if (retval)
+ goto out;
+
+ efi.runtime = systab->runtime;
+ efi.runtime_version = systab->hdr.revision;
+
+ efi_systab_report_header(&systab->hdr, efi_to_phys(systab->fw_vendor));
+
+ table_size = sizeof(efi_config_table_t) * systab->nr_tables;
+ config_tables = early_memremap_ro(efi_to_phys(systab->tables),
+ table_size);
+ if (config_tables == NULL) {
+ pr_warn("Unable to map EFI config table array.\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+ retval = efi_config_parse_tables(config_tables, systab->nr_tables,
+ efi_arch_tables);
+
+ early_memunmap(config_tables, table_size);
+out:
+ early_memunmap(systab, sizeof(efi_system_table_t));
+ return retval;
+}
+
+/*
+ * Return true for regions that can be used as System RAM.
+ */
+static __init int is_usable_memory(efi_memory_desc_t *md)
+{
+ switch (md->type) {
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_ACPI_RECLAIM_MEMORY:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_CONVENTIONAL_MEMORY:
+ case EFI_PERSISTENT_MEMORY:
+ /*
+ * Special purpose memory is 'soft reserved', which means it
+ * is set aside initially, but can be hotplugged back in or
+ * be assigned to the dax driver after boot.
+ */
+ if (efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ return false;
+
+ /*
+ * According to the spec, these regions are no longer reserved
+ * after calling ExitBootServices(). However, we can only use
+ * them as System RAM if they can be mapped writeback cacheable.
+ */
+ return (md->attribute & EFI_MEMORY_WB);
+ default:
+ break;
+ }
+ return false;
+}
+
+static __init void reserve_regions(void)
+{
+ efi_memory_desc_t *md;
+ u64 paddr, npages, size;
+
+ if (efi_enabled(EFI_DBG))
+ pr_info("Processing EFI memory map:\n");
+
+ /*
+ * Discard memblocks discovered so far: if there are any at this
+ * point, they originate from memory nodes in the DT, and UEFI
+ * uses its own memory map instead.
+ */
+ memblock_dump_all();
+ memblock_remove(0, PHYS_ADDR_MAX);
+
+ for_each_efi_memory_desc(md) {
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+
+ if (efi_enabled(EFI_DBG)) {
+ char buf[64];
+
+ pr_info(" 0x%012llx-0x%012llx %s\n",
+ paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
+ efi_md_typeattr_format(buf, sizeof(buf), md));
+ }
+
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
+
+ if (is_memory(md)) {
+ early_init_dt_add_memory_arch(paddr, size);
+
+ if (!is_usable_memory(md))
+ memblock_mark_nomap(paddr, size);
+
+ /* keep ACPI reclaim memory intact for kexec etc. */
+ if (md->type == EFI_ACPI_RECLAIM_MEMORY)
+ memblock_reserve(paddr, size);
+ }
+ }
+}
+
+void __init efi_init(void)
+{
+ struct efi_memory_map_data data;
+ u64 efi_system_table;
+
+ /* Grab UEFI information placed in FDT by stub */
+ efi_system_table = efi_get_fdt_params(&data);
+ if (!efi_system_table)
+ return;
+
+ if (efi_memmap_init_early(&data) < 0) {
+ /*
+ * If we are booting via UEFI, the UEFI memory map is the only
+ * description of memory we have, so there is little point in
+ * proceeding if we cannot access it.
+ */
+ panic("Unable to map EFI memory map.\n");
+ }
+
+ WARN(efi.memmap.desc_version != 1,
+ "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
+ efi.memmap.desc_version);
+
+ if (uefi_init(efi_system_table) < 0) {
+ efi_memmap_unmap();
+ return;
+ }
+
+ reserve_regions();
+ /*
+ * For memblock manipulation, the cap should come after the memblock_add().
+ * And now, memblock is fully populated, it is time to do capping.
+ */
+ early_init_dt_check_for_usable_mem_range();
+ efi_find_mirror();
+ efi_esrt_init();
+ efi_mokvar_table_init();
+
+ memblock_reserve(data.phys_map & PAGE_MASK,
+ PAGE_ALIGN(data.size + (data.phys_map & ~PAGE_MASK)));
+
+ init_screen_info();
+}
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
new file mode 100644
index 0000000000..e7b9ec6f8a
--- /dev/null
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/pstore.h>
+#include <linux/slab.h>
+#include <linux/ucs2_string.h>
+
+MODULE_IMPORT_NS(EFIVAR);
+
+#define DUMP_NAME_LEN 66
+
+static unsigned int record_size = 1024;
+module_param(record_size, uint, 0444);
+MODULE_PARM_DESC(record_size, "size of each pstore UEFI var (in bytes, min/default=1024)");
+
+static bool efivars_pstore_disable =
+ IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
+
+module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
+
+#define PSTORE_EFI_ATTRIBUTES \
+ (EFI_VARIABLE_NON_VOLATILE | \
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+ EFI_VARIABLE_RUNTIME_ACCESS)
+
+static int efi_pstore_open(struct pstore_info *psi)
+{
+ int err;
+
+ err = efivar_lock();
+ if (err)
+ return err;
+
+ psi->data = kzalloc(record_size, GFP_KERNEL);
+ if (!psi->data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int efi_pstore_close(struct pstore_info *psi)
+{
+ efivar_unlock();
+ kfree(psi->data);
+ return 0;
+}
+
+static inline u64 generic_id(u64 timestamp, unsigned int part, int count)
+{
+ return (timestamp * 100 + part) * 1000 + count;
+}
+
+static int efi_pstore_read_func(struct pstore_record *record,
+ efi_char16_t *varname)
+{
+ unsigned long wlen, size = record_size;
+ char name[DUMP_NAME_LEN], data_type;
+ efi_status_t status;
+ int cnt;
+ unsigned int part;
+ u64 time;
+
+ ucs2_as_utf8(name, varname, DUMP_NAME_LEN);
+
+ if (sscanf(name, "dump-type%u-%u-%d-%llu-%c",
+ &record->type, &part, &cnt, &time, &data_type) == 5) {
+ record->id = generic_id(time, part, cnt);
+ record->part = part;
+ record->count = cnt;
+ record->time.tv_sec = time;
+ record->time.tv_nsec = 0;
+ if (data_type == 'C')
+ record->compressed = true;
+ else
+ record->compressed = false;
+ record->ecc_notice_size = 0;
+ } else if (sscanf(name, "dump-type%u-%u-%d-%llu",
+ &record->type, &part, &cnt, &time) == 4) {
+ record->id = generic_id(time, part, cnt);
+ record->part = part;
+ record->count = cnt;
+ record->time.tv_sec = time;
+ record->time.tv_nsec = 0;
+ record->compressed = false;
+ record->ecc_notice_size = 0;
+ } else if (sscanf(name, "dump-type%u-%u-%llu",
+ &record->type, &part, &time) == 3) {
+ /*
+ * Check if an old format,
+ * which doesn't support holding
+ * multiple logs, remains.
+ */
+ record->id = generic_id(time, part, 0);
+ record->part = part;
+ record->count = 0;
+ record->time.tv_sec = time;
+ record->time.tv_nsec = 0;
+ record->compressed = false;
+ record->ecc_notice_size = 0;
+ } else
+ return 0;
+
+ record->buf = kmalloc(size, GFP_KERNEL);
+ if (!record->buf)
+ return -ENOMEM;
+
+ status = efivar_get_variable(varname, &LINUX_EFI_CRASH_GUID, NULL,
+ &size, record->buf);
+ if (status != EFI_SUCCESS) {
+ kfree(record->buf);
+ return -EIO;
+ }
+
+ /*
+ * Store the name of the variable in the pstore_record priv field, so
+ * we can reuse it later if we need to delete the EFI variable from the
+ * variable store.
+ */
+ wlen = (ucs2_strnlen(varname, DUMP_NAME_LEN) + 1) * sizeof(efi_char16_t);
+ record->priv = kmemdup(varname, wlen, GFP_KERNEL);
+ if (!record->priv) {
+ kfree(record->buf);
+ return -ENOMEM;
+ }
+
+ return size;
+}
+
+static ssize_t efi_pstore_read(struct pstore_record *record)
+{
+ efi_char16_t *varname = record->psi->data;
+ efi_guid_t guid = LINUX_EFI_CRASH_GUID;
+ unsigned long varname_size;
+ efi_status_t status;
+
+ for (;;) {
+ varname_size = 1024;
+
+ /*
+ * If this is the first read() call in the pstore enumeration,
+ * varname will be the empty string, and the GetNextVariable()
+ * runtime service call will return the first EFI variable in
+ * its own enumeration order, ignoring the guid argument.
+ *
+ * Subsequent calls to GetNextVariable() must pass the name and
+ * guid values returned by the previous call, which is why we
+ * store varname in record->psi->data. Given that we only
+ * enumerate variables with the efi-pstore GUID, there is no
+ * need to record the guid return value.
+ */
+ status = efivar_get_next_variable(&varname_size, varname, &guid);
+ if (status == EFI_NOT_FOUND)
+ return 0;
+
+ if (status != EFI_SUCCESS)
+ return -EIO;
+
+ /* skip variables that don't concern us */
+ if (efi_guidcmp(guid, LINUX_EFI_CRASH_GUID))
+ continue;
+
+ return efi_pstore_read_func(record, varname);
+ }
+}
+
+static int efi_pstore_write(struct pstore_record *record)
+{
+ char name[DUMP_NAME_LEN];
+ efi_char16_t efi_name[DUMP_NAME_LEN];
+ efi_status_t status;
+ int i;
+
+ record->id = generic_id(record->time.tv_sec, record->part,
+ record->count);
+
+ /* Since we copy the entire length of name, make sure it is wiped. */
+ memset(name, 0, sizeof(name));
+
+ snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lld-%c",
+ record->type, record->part, record->count,
+ (long long)record->time.tv_sec,
+ record->compressed ? 'C' : 'D');
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ efi_name[i] = name[i];
+
+ if (efivar_trylock())
+ return -EBUSY;
+ status = efivar_set_variable_locked(efi_name, &LINUX_EFI_CRASH_GUID,
+ PSTORE_EFI_ATTRIBUTES,
+ record->size, record->psi->buf,
+ true);
+ efivar_unlock();
+ return status == EFI_SUCCESS ? 0 : -EIO;
+};
+
+static int efi_pstore_erase(struct pstore_record *record)
+{
+ efi_status_t status;
+
+ status = efivar_set_variable(record->priv, &LINUX_EFI_CRASH_GUID,
+ PSTORE_EFI_ATTRIBUTES, 0, NULL);
+
+ if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
+ return -EIO;
+ return 0;
+}
+
+static struct pstore_info efi_pstore_info = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .flags = PSTORE_FLAGS_DMESG,
+ .open = efi_pstore_open,
+ .close = efi_pstore_close,
+ .read = efi_pstore_read,
+ .write = efi_pstore_write,
+ .erase = efi_pstore_erase,
+};
+
+static __init int efivars_pstore_init(void)
+{
+ if (!efivar_supports_writes())
+ return 0;
+
+ if (efivars_pstore_disable)
+ return 0;
+
+ /*
+ * Notice that 1024 is the minimum here to prevent issues with
+ * decompression algorithms that were spotted during tests;
+ * even in the case of not using compression, smaller values would
+ * just pollute more the pstore FS with many small collected files.
+ */
+ if (record_size < 1024)
+ record_size = 1024;
+
+ efi_pstore_info.buf = kmalloc(record_size, GFP_KERNEL);
+ if (!efi_pstore_info.buf)
+ return -ENOMEM;
+
+ efi_pstore_info.bufsize = record_size;
+
+ if (pstore_register(&efi_pstore_info)) {
+ kfree(efi_pstore_info.buf);
+ efi_pstore_info.buf = NULL;
+ efi_pstore_info.bufsize = 0;
+ }
+
+ return 0;
+}
+
+static __exit void efivars_pstore_exit(void)
+{
+ if (!efi_pstore_info.bufsize)
+ return;
+
+ pstore_unregister(&efi_pstore_info);
+ kfree(efi_pstore_info.buf);
+ efi_pstore_info.buf = NULL;
+ efi_pstore_info.bufsize = 0;
+}
+
+module_init(efivars_pstore_init);
+module_exit(efivars_pstore_exit);
+
+MODULE_DESCRIPTION("EFI variable backend for pstore");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:efivars");
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
new file mode 100644
index 0000000000..1974f0ad32
--- /dev/null
+++ b/drivers/firmware/efi/efi.c
@@ -0,0 +1,1182 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * efi.c - EFI subsystem
+ *
+ * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
+ * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
+ * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
+ *
+ * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
+ * allowing the efivarfs to be mounted or the efivars module to be loaded.
+ * The existance of /sys/firmware/efi may also be used by userspace to
+ * determine that the system supports EFI.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/efi.h>
+#include <linux/of.h>
+#include <linux/initrd.h>
+#include <linux/io.h>
+#include <linux/kexec.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/ucs2_string.h>
+#include <linux/memblock.h>
+#include <linux/security.h>
+
+#include <asm/early_ioremap.h>
+
+struct efi __read_mostly efi = {
+ .runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
+ .acpi = EFI_INVALID_TABLE_ADDR,
+ .acpi20 = EFI_INVALID_TABLE_ADDR,
+ .smbios = EFI_INVALID_TABLE_ADDR,
+ .smbios3 = EFI_INVALID_TABLE_ADDR,
+ .esrt = EFI_INVALID_TABLE_ADDR,
+ .tpm_log = EFI_INVALID_TABLE_ADDR,
+ .tpm_final_log = EFI_INVALID_TABLE_ADDR,
+#ifdef CONFIG_LOAD_UEFI_KEYS
+ .mokvar_table = EFI_INVALID_TABLE_ADDR,
+#endif
+#ifdef CONFIG_EFI_COCO_SECRET
+ .coco_secret = EFI_INVALID_TABLE_ADDR,
+#endif
+#ifdef CONFIG_UNACCEPTED_MEMORY
+ .unaccepted = EFI_INVALID_TABLE_ADDR,
+#endif
+};
+EXPORT_SYMBOL(efi);
+
+unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
+static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
+static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
+static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
+
+extern unsigned long screen_info_table;
+
+struct mm_struct efi_mm = {
+ .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
+ .mm_users = ATOMIC_INIT(2),
+ .mm_count = ATOMIC_INIT(1),
+ .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
+ MMAP_LOCK_INITIALIZER(efi_mm)
+ .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
+ .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
+ .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
+};
+
+struct workqueue_struct *efi_rts_wq;
+
+static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
+static int __init setup_noefi(char *arg)
+{
+ disable_runtime = true;
+ return 0;
+}
+early_param("noefi", setup_noefi);
+
+bool efi_runtime_disabled(void)
+{
+ return disable_runtime;
+}
+
+bool __pure __efi_soft_reserve_enabled(void)
+{
+ return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
+}
+
+static int __init parse_efi_cmdline(char *str)
+{
+ if (!str) {
+ pr_warn("need at least one option\n");
+ return -EINVAL;
+ }
+
+ if (parse_option_str(str, "debug"))
+ set_bit(EFI_DBG, &efi.flags);
+
+ if (parse_option_str(str, "noruntime"))
+ disable_runtime = true;
+
+ if (parse_option_str(str, "runtime"))
+ disable_runtime = false;
+
+ if (parse_option_str(str, "nosoftreserve"))
+ set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
+
+ return 0;
+}
+early_param("efi", parse_efi_cmdline);
+
+struct kobject *efi_kobj;
+
+/*
+ * Let's not leave out systab information that snuck into
+ * the efivars driver
+ * Note, do not add more fields in systab sysfs file as it breaks sysfs
+ * one value per file rule!
+ */
+static ssize_t systab_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char *str = buf;
+
+ if (!kobj || !buf)
+ return -EINVAL;
+
+ if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
+ if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
+ /*
+ * If both SMBIOS and SMBIOS3 entry points are implemented, the
+ * SMBIOS3 entry point shall be preferred, so we list it first to
+ * let applications stop parsing after the first match.
+ */
+ if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
+ if (efi.smbios != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
+
+ if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
+ str = efi_systab_show_arch(str);
+
+ return str - buf;
+}
+
+static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
+
+static ssize_t fw_platform_size_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
+}
+
+extern __weak struct kobj_attribute efi_attr_fw_vendor;
+extern __weak struct kobj_attribute efi_attr_runtime;
+extern __weak struct kobj_attribute efi_attr_config_table;
+static struct kobj_attribute efi_attr_fw_platform_size =
+ __ATTR_RO(fw_platform_size);
+
+static struct attribute *efi_subsys_attrs[] = {
+ &efi_attr_systab.attr,
+ &efi_attr_fw_platform_size.attr,
+ &efi_attr_fw_vendor.attr,
+ &efi_attr_runtime.attr,
+ &efi_attr_config_table.attr,
+ NULL,
+};
+
+umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ return attr->mode;
+}
+
+static const struct attribute_group efi_subsys_attr_group = {
+ .attrs = efi_subsys_attrs,
+ .is_visible = efi_attr_is_visible,
+};
+
+static struct efivars generic_efivars;
+static struct efivar_operations generic_ops;
+
+static bool generic_ops_supported(void)
+{
+ unsigned long name_size;
+ efi_status_t status;
+ efi_char16_t name;
+ efi_guid_t guid;
+
+ name_size = sizeof(name);
+
+ status = efi.get_next_variable(&name_size, &name, &guid);
+ if (status == EFI_UNSUPPORTED)
+ return false;
+
+ return true;
+}
+
+static int generic_ops_register(void)
+{
+ if (!generic_ops_supported())
+ return 0;
+
+ generic_ops.get_variable = efi.get_variable;
+ generic_ops.get_next_variable = efi.get_next_variable;
+ generic_ops.query_variable_store = efi_query_variable_store;
+ generic_ops.query_variable_info = efi.query_variable_info;
+
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
+ generic_ops.set_variable = efi.set_variable;
+ generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
+ }
+ return efivars_register(&generic_efivars, &generic_ops);
+}
+
+static void generic_ops_unregister(void)
+{
+ if (!generic_ops.get_variable)
+ return;
+
+ efivars_unregister(&generic_efivars);
+}
+
+#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
+#define EFIVAR_SSDT_NAME_MAX 16UL
+static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
+static int __init efivar_ssdt_setup(char *str)
+{
+ int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
+
+ if (ret)
+ return ret;
+
+ if (strlen(str) < sizeof(efivar_ssdt))
+ memcpy(efivar_ssdt, str, strlen(str));
+ else
+ pr_warn("efivar_ssdt: name too long: %s\n", str);
+ return 1;
+}
+__setup("efivar_ssdt=", efivar_ssdt_setup);
+
+static __init int efivar_ssdt_load(void)
+{
+ unsigned long name_size = 256;
+ efi_char16_t *name = NULL;
+ efi_status_t status;
+ efi_guid_t guid;
+
+ if (!efivar_ssdt[0])
+ return 0;
+
+ name = kzalloc(name_size, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ for (;;) {
+ char utf8_name[EFIVAR_SSDT_NAME_MAX];
+ unsigned long data_size = 0;
+ void *data;
+ int limit;
+
+ status = efi.get_next_variable(&name_size, name, &guid);
+ if (status == EFI_NOT_FOUND) {
+ break;
+ } else if (status == EFI_BUFFER_TOO_SMALL) {
+ efi_char16_t *name_tmp =
+ krealloc(name, name_size, GFP_KERNEL);
+ if (!name_tmp) {
+ kfree(name);
+ return -ENOMEM;
+ }
+ name = name_tmp;
+ continue;
+ }
+
+ limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
+ ucs2_as_utf8(utf8_name, name, limit - 1);
+ if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
+ continue;
+
+ pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
+
+ status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
+ if (status != EFI_BUFFER_TOO_SMALL || !data_size)
+ return -EIO;
+
+ data = kmalloc(data_size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ status = efi.get_variable(name, &guid, NULL, &data_size, data);
+ if (status == EFI_SUCCESS) {
+ acpi_status ret = acpi_load_table(data, NULL);
+ if (ret)
+ pr_err("failed to load table: %u\n", ret);
+ else
+ continue;
+ } else {
+ pr_err("failed to get var data: 0x%lx\n", status);
+ }
+ kfree(data);
+ }
+ return 0;
+}
+#else
+static inline int efivar_ssdt_load(void) { return 0; }
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+#define EFI_DEBUGFS_MAX_BLOBS 32
+
+static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
+
+static void __init efi_debugfs_init(void)
+{
+ struct dentry *efi_debugfs;
+ efi_memory_desc_t *md;
+ char name[32];
+ int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
+ int i = 0;
+
+ efi_debugfs = debugfs_create_dir("efi", NULL);
+ if (IS_ERR_OR_NULL(efi_debugfs))
+ return;
+
+ for_each_efi_memory_desc(md) {
+ switch (md->type) {
+ case EFI_BOOT_SERVICES_CODE:
+ snprintf(name, sizeof(name), "boot_services_code%d",
+ type_count[md->type]++);
+ break;
+ case EFI_BOOT_SERVICES_DATA:
+ snprintf(name, sizeof(name), "boot_services_data%d",
+ type_count[md->type]++);
+ break;
+ default:
+ continue;
+ }
+
+ if (i >= EFI_DEBUGFS_MAX_BLOBS) {
+ pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
+ EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
+ break;
+ }
+
+ debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
+ debugfs_blob[i].data = memremap(md->phys_addr,
+ debugfs_blob[i].size,
+ MEMREMAP_WB);
+ if (!debugfs_blob[i].data)
+ continue;
+
+ debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
+ i++;
+ }
+}
+#else
+static inline void efi_debugfs_init(void) {}
+#endif
+
+/*
+ * We register the efi subsystem with the firmware subsystem and the
+ * efivars subsystem with the efi subsystem, if the system was booted with
+ * EFI.
+ */
+static int __init efisubsys_init(void)
+{
+ int error;
+
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ efi.runtime_supported_mask = 0;
+
+ if (!efi_enabled(EFI_BOOT))
+ return 0;
+
+ if (efi.runtime_supported_mask) {
+ /*
+ * Since we process only one efi_runtime_service() at a time, an
+ * ordered workqueue (which creates only one execution context)
+ * should suffice for all our needs.
+ */
+ efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
+ if (!efi_rts_wq) {
+ pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ efi.runtime_supported_mask = 0;
+ return 0;
+ }
+ }
+
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
+ platform_device_register_simple("rtc-efi", 0, NULL, 0);
+
+ /* We register the efi directory at /sys/firmware/efi */
+ efi_kobj = kobject_create_and_add("efi", firmware_kobj);
+ if (!efi_kobj) {
+ pr_err("efi: Firmware registration failed.\n");
+ error = -ENOMEM;
+ goto err_destroy_wq;
+ }
+
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
+ EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
+ error = generic_ops_register();
+ if (error)
+ goto err_put;
+ efivar_ssdt_load();
+ platform_device_register_simple("efivars", 0, NULL, 0);
+ }
+
+ error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
+ if (error) {
+ pr_err("efi: Sysfs attribute export failed with error %d.\n",
+ error);
+ goto err_unregister;
+ }
+
+ /* and the standard mountpoint for efivarfs */
+ error = sysfs_create_mount_point(efi_kobj, "efivars");
+ if (error) {
+ pr_err("efivars: Subsystem registration failed.\n");
+ goto err_remove_group;
+ }
+
+ if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
+ efi_debugfs_init();
+
+#ifdef CONFIG_EFI_COCO_SECRET
+ if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
+ platform_device_register_simple("efi_secret", 0, NULL, 0);
+#endif
+
+ return 0;
+
+err_remove_group:
+ sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
+err_unregister:
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
+ EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
+ generic_ops_unregister();
+err_put:
+ kobject_put(efi_kobj);
+ efi_kobj = NULL;
+err_destroy_wq:
+ if (efi_rts_wq)
+ destroy_workqueue(efi_rts_wq);
+
+ return error;
+}
+
+subsys_initcall(efisubsys_init);
+
+void __init efi_find_mirror(void)
+{
+ efi_memory_desc_t *md;
+ u64 mirror_size = 0, total_size = 0;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
+ for_each_efi_memory_desc(md) {
+ unsigned long long start = md->phys_addr;
+ unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+
+ total_size += size;
+ if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
+ memblock_mark_mirror(start, size);
+ mirror_size += size;
+ }
+ }
+ if (mirror_size)
+ pr_info("Memory: %lldM/%lldM mirrored memory\n",
+ mirror_size>>20, total_size>>20);
+}
+
+/*
+ * Find the efi memory descriptor for a given physical address. Given a
+ * physical address, determine if it exists within an EFI Memory Map entry,
+ * and if so, populate the supplied memory descriptor with the appropriate
+ * data.
+ */
+int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+{
+ efi_memory_desc_t *md;
+
+ if (!efi_enabled(EFI_MEMMAP)) {
+ pr_err_once("EFI_MEMMAP is not enabled.\n");
+ return -EINVAL;
+ }
+
+ if (!out_md) {
+ pr_err_once("out_md is null.\n");
+ return -EINVAL;
+ }
+
+ for_each_efi_memory_desc(md) {
+ u64 size;
+ u64 end;
+
+ /* skip bogus entries (including empty ones) */
+ if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) ||
+ (md->num_pages <= 0) ||
+ (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT))
+ continue;
+
+ size = md->num_pages << EFI_PAGE_SHIFT;
+ end = md->phys_addr + size;
+ if (phys_addr >= md->phys_addr && phys_addr < end) {
+ memcpy(out_md, md, sizeof(*out_md));
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+ __weak __alias(__efi_mem_desc_lookup);
+
+/*
+ * Calculate the highest address of an efi memory descriptor.
+ */
+u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
+{
+ u64 size = md->num_pages << EFI_PAGE_SHIFT;
+ u64 end = md->phys_addr + size;
+ return end;
+}
+
+void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
+
+/**
+ * efi_mem_reserve - Reserve an EFI memory region
+ * @addr: Physical address to reserve
+ * @size: Size of reservation
+ *
+ * Mark a region as reserved from general kernel allocation and
+ * prevent it being released by efi_free_boot_services().
+ *
+ * This function should be called drivers once they've parsed EFI
+ * configuration tables to figure out where their data lives, e.g.
+ * efi_esrt_init().
+ */
+void __init efi_mem_reserve(phys_addr_t addr, u64 size)
+{
+ /* efi_mem_reserve() does not work under Xen */
+ if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT)))
+ return;
+
+ if (!memblock_is_region_reserved(addr, size))
+ memblock_reserve(addr, size);
+
+ /*
+ * Some architectures (x86) reserve all boot services ranges
+ * until efi_free_boot_services() because of buggy firmware
+ * implementations. This means the above memblock_reserve() is
+ * superfluous on x86 and instead what it needs to do is
+ * ensure the @start, @size is not freed.
+ */
+ efi_arch_mem_reserve(addr, size);
+}
+
+static const efi_config_table_type_t common_tables[] __initconst = {
+ {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" },
+ {ACPI_TABLE_GUID, &efi.acpi, "ACPI" },
+ {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" },
+ {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" },
+ {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" },
+ {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" },
+ {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" },
+ {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" },
+ {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" },
+ {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
+ {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" },
+ {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
+#ifdef CONFIG_EFI_RCI2_TABLE
+ {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
+#endif
+#ifdef CONFIG_LOAD_UEFI_KEYS
+ {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" },
+#endif
+#ifdef CONFIG_EFI_COCO_SECRET
+ {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" },
+#endif
+#ifdef CONFIG_UNACCEPTED_MEMORY
+ {LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID, &efi.unaccepted, "Unaccepted" },
+#endif
+#ifdef CONFIG_EFI_GENERIC_STUB
+ {LINUX_EFI_SCREEN_INFO_TABLE_GUID, &screen_info_table },
+#endif
+ {},
+};
+
+static __init int match_config_table(const efi_guid_t *guid,
+ unsigned long table,
+ const efi_config_table_type_t *table_types)
+{
+ int i;
+
+ for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
+ if (efi_guidcmp(*guid, table_types[i].guid))
+ continue;
+
+ if (!efi_config_table_is_usable(guid, table)) {
+ if (table_types[i].name[0])
+ pr_cont("(%s=0x%lx unusable) ",
+ table_types[i].name, table);
+ return 1;
+ }
+
+ *(table_types[i].ptr) = table;
+ if (table_types[i].name[0])
+ pr_cont("%s=0x%lx ", table_types[i].name, table);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * reserve_unaccepted - Map and reserve unaccepted configuration table
+ * @unaccepted: Pointer to unaccepted memory table
+ *
+ * memblock_add() makes sure that the table is mapped in direct mapping. During
+ * normal boot it happens automatically because the table is allocated from
+ * usable memory. But during crashkernel boot only memory specifically reserved
+ * for crash scenario is mapped. memblock_add() forces the table to be mapped
+ * in crashkernel case.
+ *
+ * Align the range to the nearest page borders. Ranges smaller than page size
+ * are not going to be mapped.
+ *
+ * memblock_reserve() makes sure that future allocations will not touch the
+ * table.
+ */
+
+static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted)
+{
+ phys_addr_t start, size;
+
+ start = PAGE_ALIGN_DOWN(efi.unaccepted);
+ size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size);
+
+ memblock_add(start, size);
+ memblock_reserve(start, size);
+}
+
+int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
+ int count,
+ const efi_config_table_type_t *arch_tables)
+{
+ const efi_config_table_64_t *tbl64 = (void *)config_tables;
+ const efi_config_table_32_t *tbl32 = (void *)config_tables;
+ const efi_guid_t *guid;
+ unsigned long table;
+ int i;
+
+ pr_info("");
+ for (i = 0; i < count; i++) {
+ if (!IS_ENABLED(CONFIG_X86)) {
+ guid = &config_tables[i].guid;
+ table = (unsigned long)config_tables[i].table;
+ } else if (efi_enabled(EFI_64BIT)) {
+ guid = &tbl64[i].guid;
+ table = tbl64[i].table;
+
+ if (IS_ENABLED(CONFIG_X86_32) &&
+ tbl64[i].table > U32_MAX) {
+ pr_cont("\n");
+ pr_err("Table located above 4GB, disabling EFI.\n");
+ return -EINVAL;
+ }
+ } else {
+ guid = &tbl32[i].guid;
+ table = tbl32[i].table;
+ }
+
+ if (!match_config_table(guid, table, common_tables) && arch_tables)
+ match_config_table(guid, table, arch_tables);
+ }
+ pr_cont("\n");
+ set_bit(EFI_CONFIG_TABLES, &efi.flags);
+
+ if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
+ struct linux_efi_random_seed *seed;
+ u32 size = 0;
+
+ seed = early_memremap(efi_rng_seed, sizeof(*seed));
+ if (seed != NULL) {
+ size = min_t(u32, seed->size, SZ_1K); // sanity check
+ early_memunmap(seed, sizeof(*seed));
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ if (size > 0) {
+ seed = early_memremap(efi_rng_seed,
+ sizeof(*seed) + size);
+ if (seed != NULL) {
+ add_bootloader_randomness(seed->bits, size);
+ memzero_explicit(seed->bits, size);
+ early_memunmap(seed, sizeof(*seed) + size);
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ }
+ }
+
+ if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
+ efi_memattr_init();
+
+ efi_tpm_eventlog_init();
+
+ if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
+ unsigned long prsv = mem_reserve;
+
+ while (prsv) {
+ struct linux_efi_memreserve *rsv;
+ u8 *p;
+
+ /*
+ * Just map a full page: that is what we will get
+ * anyway, and it permits us to map the entire entry
+ * before knowing its size.
+ */
+ p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
+ PAGE_SIZE);
+ if (p == NULL) {
+ pr_err("Could not map UEFI memreserve entry!\n");
+ return -ENOMEM;
+ }
+
+ rsv = (void *)(p + prsv % PAGE_SIZE);
+
+ /* reserve the entry itself */
+ memblock_reserve(prsv,
+ struct_size(rsv, entry, rsv->size));
+
+ for (i = 0; i < atomic_read(&rsv->count); i++) {
+ memblock_reserve(rsv->entry[i].base,
+ rsv->entry[i].size);
+ }
+
+ prsv = rsv->next;
+ early_memunmap(p, PAGE_SIZE);
+ }
+ }
+
+ if (rt_prop != EFI_INVALID_TABLE_ADDR) {
+ efi_rt_properties_table_t *tbl;
+
+ tbl = early_memremap(rt_prop, sizeof(*tbl));
+ if (tbl) {
+ efi.runtime_supported_mask &= tbl->runtime_services_supported;
+ early_memunmap(tbl, sizeof(*tbl));
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
+ initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
+ struct linux_efi_initrd *tbl;
+
+ tbl = early_memremap(initrd, sizeof(*tbl));
+ if (tbl) {
+ phys_initrd_start = tbl->base;
+ phys_initrd_size = tbl->size;
+ early_memunmap(tbl, sizeof(*tbl));
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) &&
+ efi.unaccepted != EFI_INVALID_TABLE_ADDR) {
+ struct efi_unaccepted_memory *unaccepted;
+
+ unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted));
+ if (unaccepted) {
+
+ if (unaccepted->version == 1) {
+ reserve_unaccepted(unaccepted);
+ } else {
+ efi.unaccepted = EFI_INVALID_TABLE_ADDR;
+ }
+
+ early_memunmap(unaccepted, sizeof(*unaccepted));
+ }
+ }
+
+ return 0;
+}
+
+int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr)
+{
+ if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
+ pr_err("System table signature incorrect!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifndef CONFIG_IA64
+static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
+ size_t size)
+{
+ const efi_char16_t *ret;
+
+ ret = early_memremap_ro(fw_vendor, size);
+ if (!ret)
+ pr_err("Could not map the firmware vendor!\n");
+ return ret;
+}
+
+static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
+{
+ early_memunmap((void *)fw_vendor, size);
+}
+#else
+#define map_fw_vendor(p, s) __va(p)
+#define unmap_fw_vendor(v, s)
+#endif
+
+void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
+ unsigned long fw_vendor)
+{
+ char vendor[100] = "unknown";
+ const efi_char16_t *c16;
+ size_t i;
+ u16 rev;
+
+ c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
+ if (c16) {
+ for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
+ vendor[i] = c16[i];
+ vendor[i] = '\0';
+
+ unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
+ }
+
+ rev = (u16)systab_hdr->revision;
+ pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10);
+
+ rev %= 10;
+ if (rev)
+ pr_cont(".%u", rev);
+
+ pr_cont(" by %s\n", vendor);
+
+ if (IS_ENABLED(CONFIG_X86_64) &&
+ systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
+ !strcmp(vendor, "Apple")) {
+ pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
+ efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
+ }
+}
+
+static __initdata char memory_type_name[][13] = {
+ "Reserved",
+ "Loader Code",
+ "Loader Data",
+ "Boot Code",
+ "Boot Data",
+ "Runtime Code",
+ "Runtime Data",
+ "Conventional",
+ "Unusable",
+ "ACPI Reclaim",
+ "ACPI Mem NVS",
+ "MMIO",
+ "MMIO Port",
+ "PAL Code",
+ "Persistent",
+ "Unaccepted",
+};
+
+char * __init efi_md_typeattr_format(char *buf, size_t size,
+ const efi_memory_desc_t *md)
+{
+ char *pos;
+ int type_len;
+ u64 attr;
+
+ pos = buf;
+ if (md->type >= ARRAY_SIZE(memory_type_name))
+ type_len = snprintf(pos, size, "[type=%u", md->type);
+ else
+ type_len = snprintf(pos, size, "[%-*s",
+ (int)(sizeof(memory_type_name[0]) - 1),
+ memory_type_name[md->type]);
+ if (type_len >= size)
+ return buf;
+
+ pos += type_len;
+ size -= type_len;
+
+ attr = md->attribute;
+ if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
+ EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
+ EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
+ EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
+ EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
+ snprintf(pos, size, "|attr=0x%016llx]",
+ (unsigned long long)attr);
+ else
+ snprintf(pos, size,
+ "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+ attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
+ attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
+ attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
+ attr & EFI_MEMORY_SP ? "SP" : "",
+ attr & EFI_MEMORY_NV ? "NV" : "",
+ attr & EFI_MEMORY_XP ? "XP" : "",
+ attr & EFI_MEMORY_RP ? "RP" : "",
+ attr & EFI_MEMORY_WP ? "WP" : "",
+ attr & EFI_MEMORY_RO ? "RO" : "",
+ attr & EFI_MEMORY_UCE ? "UCE" : "",
+ attr & EFI_MEMORY_WB ? "WB" : "",
+ attr & EFI_MEMORY_WT ? "WT" : "",
+ attr & EFI_MEMORY_WC ? "WC" : "",
+ attr & EFI_MEMORY_UC ? "UC" : "");
+ return buf;
+}
+
+/*
+ * IA64 has a funky EFI memory map that doesn't work the same way as
+ * other architectures.
+ */
+#ifndef CONFIG_IA64
+/*
+ * efi_mem_attributes - lookup memmap attributes for physical address
+ * @phys_addr: the physical address to lookup
+ *
+ * Search in the EFI memory map for the region covering
+ * @phys_addr. Returns the EFI memory attributes if the region
+ * was found in the memory map, 0 otherwise.
+ */
+u64 efi_mem_attributes(unsigned long phys_addr)
+{
+ efi_memory_desc_t *md;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return 0;
+
+ for_each_efi_memory_desc(md) {
+ if ((md->phys_addr <= phys_addr) &&
+ (phys_addr < (md->phys_addr +
+ (md->num_pages << EFI_PAGE_SHIFT))))
+ return md->attribute;
+ }
+ return 0;
+}
+
+/*
+ * efi_mem_type - lookup memmap type for physical address
+ * @phys_addr: the physical address to lookup
+ *
+ * Search in the EFI memory map for the region covering @phys_addr.
+ * Returns the EFI memory type if the region was found in the memory
+ * map, -EINVAL otherwise.
+ */
+int efi_mem_type(unsigned long phys_addr)
+{
+ const efi_memory_desc_t *md;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return -ENOTSUPP;
+
+ for_each_efi_memory_desc(md) {
+ if ((md->phys_addr <= phys_addr) &&
+ (phys_addr < (md->phys_addr +
+ (md->num_pages << EFI_PAGE_SHIFT))))
+ return md->type;
+ }
+ return -EINVAL;
+}
+#endif
+
+int efi_status_to_err(efi_status_t status)
+{
+ int err;
+
+ switch (status) {
+ case EFI_SUCCESS:
+ err = 0;
+ break;
+ case EFI_INVALID_PARAMETER:
+ err = -EINVAL;
+ break;
+ case EFI_OUT_OF_RESOURCES:
+ err = -ENOSPC;
+ break;
+ case EFI_DEVICE_ERROR:
+ err = -EIO;
+ break;
+ case EFI_WRITE_PROTECTED:
+ err = -EROFS;
+ break;
+ case EFI_SECURITY_VIOLATION:
+ err = -EACCES;
+ break;
+ case EFI_NOT_FOUND:
+ err = -ENOENT;
+ break;
+ case EFI_ABORTED:
+ err = -EINTR;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(efi_status_to_err);
+
+static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
+static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
+
+static int __init efi_memreserve_map_root(void)
+{
+ if (mem_reserve == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+ efi_memreserve_root = memremap(mem_reserve,
+ sizeof(*efi_memreserve_root),
+ MEMREMAP_WB);
+ if (WARN_ON_ONCE(!efi_memreserve_root))
+ return -ENOMEM;
+ return 0;
+}
+
+static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
+{
+ struct resource *res, *parent;
+ int ret;
+
+ res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
+ if (!res)
+ return -ENOMEM;
+
+ res->name = "reserved";
+ res->flags = IORESOURCE_MEM;
+ res->start = addr;
+ res->end = addr + size - 1;
+
+ /* we expect a conflict with a 'System RAM' region */
+ parent = request_resource_conflict(&iomem_resource, res);
+ ret = parent ? request_resource(parent, res) : 0;
+
+ /*
+ * Given that efi_mem_reserve_iomem() can be called at any
+ * time, only call memblock_reserve() if the architecture
+ * keeps the infrastructure around.
+ */
+ if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
+ memblock_reserve(addr, size);
+
+ return ret;
+}
+
+int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
+{
+ struct linux_efi_memreserve *rsv;
+ unsigned long prsv;
+ int rc, index;
+
+ if (efi_memreserve_root == (void *)ULONG_MAX)
+ return -ENODEV;
+
+ if (!efi_memreserve_root) {
+ rc = efi_memreserve_map_root();
+ if (rc)
+ return rc;
+ }
+
+ /* first try to find a slot in an existing linked list entry */
+ for (prsv = efi_memreserve_root->next; prsv; ) {
+ rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
+ if (!rsv)
+ return -ENOMEM;
+ index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
+ if (index < rsv->size) {
+ rsv->entry[index].base = addr;
+ rsv->entry[index].size = size;
+
+ memunmap(rsv);
+ return efi_mem_reserve_iomem(addr, size);
+ }
+ prsv = rsv->next;
+ memunmap(rsv);
+ }
+
+ /* no slot found - allocate a new linked list entry */
+ rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
+ if (!rsv)
+ return -ENOMEM;
+
+ rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
+ if (rc) {
+ free_page((unsigned long)rsv);
+ return rc;
+ }
+
+ /*
+ * The memremap() call above assumes that a linux_efi_memreserve entry
+ * never crosses a page boundary, so let's ensure that this remains true
+ * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
+ * using SZ_4K explicitly in the size calculation below.
+ */
+ rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
+ atomic_set(&rsv->count, 1);
+ rsv->entry[0].base = addr;
+ rsv->entry[0].size = size;
+
+ spin_lock(&efi_mem_reserve_persistent_lock);
+ rsv->next = efi_memreserve_root->next;
+ efi_memreserve_root->next = __pa(rsv);
+ spin_unlock(&efi_mem_reserve_persistent_lock);
+
+ return efi_mem_reserve_iomem(addr, size);
+}
+
+static int __init efi_memreserve_root_init(void)
+{
+ if (efi_memreserve_root)
+ return 0;
+ if (efi_memreserve_map_root())
+ efi_memreserve_root = (void *)ULONG_MAX;
+ return 0;
+}
+early_initcall(efi_memreserve_root_init);
+
+#ifdef CONFIG_KEXEC
+static int update_efi_random_seed(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ struct linux_efi_random_seed *seed;
+ u32 size = 0;
+
+ if (!kexec_in_progress)
+ return NOTIFY_DONE;
+
+ seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
+ if (seed != NULL) {
+ size = min(seed->size, EFI_RANDOM_SEED_SIZE);
+ memunmap(seed);
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ if (size > 0) {
+ seed = memremap(efi_rng_seed, sizeof(*seed) + size,
+ MEMREMAP_WB);
+ if (seed != NULL) {
+ seed->size = size;
+ get_random_bytes(seed->bits, seed->size);
+ memunmap(seed);
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block efi_random_seed_nb = {
+ .notifier_call = update_efi_random_seed,
+};
+
+static int __init register_update_efi_random_seed(void)
+{
+ if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
+ return 0;
+ return register_reboot_notifier(&efi_random_seed_nb);
+}
+late_initcall(register_update_efi_random_seed);
+#endif
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
new file mode 100644
index 0000000000..4f9fb086ea
--- /dev/null
+++ b/drivers/firmware/efi/efibc.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * efibc: control EFI bootloaders which obey LoaderEntryOneShot var
+ * Copyright (c) 2013-2016, Intel Corporation.
+ */
+
+#define pr_fmt(fmt) "efibc: " fmt
+
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/ucs2_string.h>
+
+#define MAX_DATA_LEN 512
+
+static int efibc_set_variable(efi_char16_t *name, efi_char16_t *value,
+ unsigned long len)
+{
+ efi_status_t status;
+
+ status = efi.set_variable(name, &LINUX_EFI_LOADER_ENTRY_GUID,
+ EFI_VARIABLE_NON_VOLATILE
+ | EFI_VARIABLE_BOOTSERVICE_ACCESS
+ | EFI_VARIABLE_RUNTIME_ACCESS,
+ len * sizeof(efi_char16_t), value);
+
+ if (status != EFI_SUCCESS) {
+ pr_err("failed to set EFI variable: 0x%lx\n", status);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int efibc_reboot_notifier_call(struct notifier_block *notifier,
+ unsigned long event, void *data)
+{
+ efi_char16_t *reason = event == SYS_RESTART ? L"reboot"
+ : L"shutdown";
+ const u8 *str = data;
+ efi_char16_t *wdata;
+ unsigned long l;
+ int ret;
+
+ ret = efibc_set_variable(L"LoaderEntryRebootReason", reason,
+ ucs2_strlen(reason));
+ if (ret || !data)
+ return NOTIFY_DONE;
+
+ wdata = kmalloc(MAX_DATA_LEN * sizeof(efi_char16_t), GFP_KERNEL);
+ if (!wdata)
+ return NOTIFY_DONE;
+
+ for (l = 0; l < MAX_DATA_LEN - 1 && str[l] != '\0'; l++)
+ wdata[l] = str[l];
+ wdata[l] = L'\0';
+
+ efibc_set_variable(L"LoaderEntryOneShot", wdata, l);
+
+ kfree(wdata);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block efibc_reboot_notifier = {
+ .notifier_call = efibc_reboot_notifier_call,
+};
+
+static int __init efibc_init(void)
+{
+ int ret;
+
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE))
+ return -ENODEV;
+
+ ret = register_reboot_notifier(&efibc_reboot_notifier);
+ if (ret)
+ pr_err("unable to register reboot notifier\n");
+
+ return ret;
+}
+module_init(efibc_init);
+
+static void __exit efibc_exit(void)
+{
+ unregister_reboot_notifier(&efibc_reboot_notifier);
+}
+module_exit(efibc_exit);
+
+MODULE_AUTHOR("Jeremy Compostella <jeremy.compostella@intel.com>");
+MODULE_AUTHOR("Matt Gumbel <matthew.k.gumbel@intel.com");
+MODULE_DESCRIPTION("EFI Bootloader Control");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c
new file mode 100644
index 0000000000..f5be8e2230
--- /dev/null
+++ b/drivers/firmware/efi/embedded-firmware.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for extracting embedded firmware for peripherals from EFI code,
+ *
+ * Copyright (c) 2018 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/efi_embedded_fw.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <crypto/sha2.h>
+
+/* Exported for use by lib/test_firmware.c only */
+LIST_HEAD(efi_embedded_fw_list);
+EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_list, TEST_FIRMWARE);
+bool efi_embedded_fw_checked;
+EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_checked, TEST_FIRMWARE);
+
+static const struct dmi_system_id * const embedded_fw_table[] = {
+#ifdef CONFIG_TOUCHSCREEN_DMI
+ touchscreen_dmi_table,
+#endif
+ NULL
+};
+
+/*
+ * Note the efi_check_for_embedded_firmwares() code currently makes the
+ * following 2 assumptions. This may needs to be revisited if embedded firmware
+ * is found where this is not true:
+ * 1) The firmware is only found in EFI_BOOT_SERVICES_CODE memory segments
+ * 2) The firmware always starts at an offset which is a multiple of 8 bytes
+ */
+static int __init efi_check_md_for_embedded_firmware(
+ efi_memory_desc_t *md, const struct efi_embedded_fw_desc *desc)
+{
+ struct efi_embedded_fw *fw;
+ u8 hash[32];
+ u64 i, size;
+ u8 *map;
+
+ size = md->num_pages << EFI_PAGE_SHIFT;
+ map = memremap(md->phys_addr, size, MEMREMAP_WB);
+ if (!map) {
+ pr_err("Error mapping EFI mem at %#llx\n", md->phys_addr);
+ return -ENOMEM;
+ }
+
+ for (i = 0; (i + desc->length) <= size; i += 8) {
+ if (memcmp(map + i, desc->prefix, EFI_EMBEDDED_FW_PREFIX_LEN))
+ continue;
+
+ sha256(map + i, desc->length, hash);
+ if (memcmp(hash, desc->sha256, 32) == 0)
+ break;
+ }
+ if ((i + desc->length) > size) {
+ memunmap(map);
+ return -ENOENT;
+ }
+
+ pr_info("Found EFI embedded fw '%s'\n", desc->name);
+
+ fw = kmalloc(sizeof(*fw), GFP_KERNEL);
+ if (!fw) {
+ memunmap(map);
+ return -ENOMEM;
+ }
+
+ fw->data = kmemdup(map + i, desc->length, GFP_KERNEL);
+ memunmap(map);
+ if (!fw->data) {
+ kfree(fw);
+ return -ENOMEM;
+ }
+
+ fw->name = desc->name;
+ fw->length = desc->length;
+ list_add(&fw->list, &efi_embedded_fw_list);
+
+ return 0;
+}
+
+void __init efi_check_for_embedded_firmwares(void)
+{
+ const struct efi_embedded_fw_desc *fw_desc;
+ const struct dmi_system_id *dmi_id;
+ efi_memory_desc_t *md;
+ int i, r;
+
+ for (i = 0; embedded_fw_table[i]; i++) {
+ dmi_id = dmi_first_match(embedded_fw_table[i]);
+ if (!dmi_id)
+ continue;
+
+ fw_desc = dmi_id->driver_data;
+
+ /*
+ * In some drivers the struct driver_data contains may contain
+ * other driver specific data after the fw_desc struct; and
+ * the fw_desc struct itself may be empty, skip these.
+ */
+ if (!fw_desc->name)
+ continue;
+
+ for_each_efi_memory_desc(md) {
+ if (md->type != EFI_BOOT_SERVICES_CODE)
+ continue;
+
+ r = efi_check_md_for_embedded_firmware(md, fw_desc);
+ if (r == 0)
+ break;
+ }
+ }
+
+ efi_embedded_fw_checked = true;
+}
+
+int efi_get_embedded_fw(const char *name, const u8 **data, size_t *size)
+{
+ struct efi_embedded_fw *iter, *fw = NULL;
+
+ if (!efi_embedded_fw_checked) {
+ pr_warn("Warning %s called while we did not check for embedded fw\n",
+ __func__);
+ return -ENOENT;
+ }
+
+ list_for_each_entry(iter, &efi_embedded_fw_list, list) {
+ if (strcmp(name, iter->name) == 0) {
+ fw = iter;
+ break;
+ }
+ }
+
+ if (!fw)
+ return -ENOENT;
+
+ *data = fw->data;
+ *size = fw->length;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(efi_get_embedded_fw);
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
new file mode 100644
index 0000000000..7a81c0ce47
--- /dev/null
+++ b/drivers/firmware/efi/esrt.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * esrt.c
+ *
+ * This module exports EFI System Resource Table (ESRT) entries into userspace
+ * through the sysfs file system. The ESRT provides a read-only catalog of
+ * system components for which the system accepts firmware upgrades via UEFI's
+ * "Capsule Update" feature. This module allows userland utilities to evaluate
+ * what firmware updates can be applied to this system, and potentially arrange
+ * for those updates to occur.
+ *
+ * Data is currently found below /sys/firmware/efi/esrt/...
+ */
+#define pr_fmt(fmt) "esrt: " fmt
+
+#include <linux/capability.h>
+#include <linux/device.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#include <asm/early_ioremap.h>
+
+struct efi_system_resource_entry_v1 {
+ efi_guid_t fw_class;
+ u32 fw_type;
+ u32 fw_version;
+ u32 lowest_supported_fw_version;
+ u32 capsule_flags;
+ u32 last_attempt_version;
+ u32 last_attempt_status;
+};
+
+/*
+ * _count and _version are what they seem like. _max is actually just
+ * accounting info for the firmware when creating the table; it should never
+ * have been exposed to us. To wit, the spec says:
+ * The maximum number of resource array entries that can be within the
+ * table without reallocating the table, must not be zero.
+ * Since there's no guidance about what that means in terms of memory layout,
+ * it means nothing to us.
+ */
+struct efi_system_resource_table {
+ u32 fw_resource_count;
+ u32 fw_resource_count_max;
+ u64 fw_resource_version;
+ u8 entries[];
+};
+
+static phys_addr_t esrt_data;
+static size_t esrt_data_size;
+
+static struct efi_system_resource_table *esrt;
+
+struct esre_entry {
+ union {
+ struct efi_system_resource_entry_v1 *esre1;
+ } esre;
+
+ struct kobject kobj;
+ struct list_head list;
+};
+
+/* global list of esre_entry. */
+static LIST_HEAD(entry_list);
+
+/* entry attribute */
+struct esre_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct esre_entry *entry, char *buf);
+ ssize_t (*store)(struct esre_entry *entry,
+ const char *buf, size_t count);
+};
+
+static struct esre_entry *to_entry(struct kobject *kobj)
+{
+ return container_of(kobj, struct esre_entry, kobj);
+}
+
+static struct esre_attribute *to_attr(struct attribute *attr)
+{
+ return container_of(attr, struct esre_attribute, attr);
+}
+
+static ssize_t esre_attr_show(struct kobject *kobj,
+ struct attribute *_attr, char *buf)
+{
+ struct esre_entry *entry = to_entry(kobj);
+ struct esre_attribute *attr = to_attr(_attr);
+
+ return attr->show(entry, buf);
+}
+
+static const struct sysfs_ops esre_attr_ops = {
+ .show = esre_attr_show,
+};
+
+/* Generic ESRT Entry ("ESRE") support. */
+static ssize_t fw_class_show(struct esre_entry *entry, char *buf)
+{
+ char *str = buf;
+
+ efi_guid_to_str(&entry->esre.esre1->fw_class, str);
+ str += strlen(str);
+ str += sprintf(str, "\n");
+
+ return str - buf;
+}
+
+static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400);
+
+#define esre_attr_decl(name, size, fmt) \
+static ssize_t name##_show(struct esre_entry *entry, char *buf) \
+{ \
+ return sprintf(buf, fmt "\n", \
+ le##size##_to_cpu(entry->esre.esre1->name)); \
+} \
+\
+static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400)
+
+esre_attr_decl(fw_type, 32, "%u");
+esre_attr_decl(fw_version, 32, "%u");
+esre_attr_decl(lowest_supported_fw_version, 32, "%u");
+esre_attr_decl(capsule_flags, 32, "0x%x");
+esre_attr_decl(last_attempt_version, 32, "%u");
+esre_attr_decl(last_attempt_status, 32, "%u");
+
+static struct attribute *esre1_attrs[] = {
+ &esre_fw_class.attr,
+ &esre_fw_type.attr,
+ &esre_fw_version.attr,
+ &esre_lowest_supported_fw_version.attr,
+ &esre_capsule_flags.attr,
+ &esre_last_attempt_version.attr,
+ &esre_last_attempt_status.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(esre1);
+
+static void esre_release(struct kobject *kobj)
+{
+ struct esre_entry *entry = to_entry(kobj);
+
+ list_del(&entry->list);
+ kfree(entry);
+}
+
+static const struct kobj_type esre1_ktype = {
+ .release = esre_release,
+ .sysfs_ops = &esre_attr_ops,
+ .default_groups = esre1_groups,
+};
+
+
+static struct kobject *esrt_kobj;
+static struct kset *esrt_kset;
+
+static int esre_create_sysfs_entry(void *esre, int entry_num)
+{
+ struct esre_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->kobj.kset = esrt_kset;
+
+ if (esrt->fw_resource_version == 1) {
+ int rc = 0;
+
+ entry->esre.esre1 = esre;
+ rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
+ "entry%d", entry_num);
+ if (rc) {
+ kobject_put(&entry->kobj);
+ return rc;
+ }
+ }
+
+ list_add_tail(&entry->list, &entry_list);
+ return 0;
+}
+
+/* support for displaying ESRT fields at the top level */
+#define esrt_attr_decl(name, size, fmt) \
+static ssize_t name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf)\
+{ \
+ return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \
+} \
+\
+static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400)
+
+esrt_attr_decl(fw_resource_count, 32, "%u");
+esrt_attr_decl(fw_resource_count_max, 32, "%u");
+esrt_attr_decl(fw_resource_version, 64, "%llu");
+
+static struct attribute *esrt_attrs[] = {
+ &esrt_fw_resource_count.attr,
+ &esrt_fw_resource_count_max.attr,
+ &esrt_fw_resource_version.attr,
+ NULL,
+};
+
+static inline int esrt_table_exists(void)
+{
+ if (!efi_enabled(EFI_CONFIG_TABLES))
+ return 0;
+ if (efi.esrt == EFI_INVALID_TABLE_ADDR)
+ return 0;
+ return 1;
+}
+
+static umode_t esrt_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (!esrt_table_exists())
+ return 0;
+ return attr->mode;
+}
+
+static const struct attribute_group esrt_attr_group = {
+ .attrs = esrt_attrs,
+ .is_visible = esrt_attr_is_visible,
+};
+
+/*
+ * remap the table, validate it, mark it reserved and unmap it.
+ */
+void __init efi_esrt_init(void)
+{
+ void *va;
+ struct efi_system_resource_table tmpesrt;
+ size_t size, max, entry_size, entries_size;
+ efi_memory_desc_t md;
+ int rc;
+ phys_addr_t end;
+
+ if (!efi_enabled(EFI_MEMMAP) && !efi_enabled(EFI_PARAVIRT))
+ return;
+
+ pr_debug("esrt-init: loading.\n");
+ if (!esrt_table_exists())
+ return;
+
+ rc = efi_mem_desc_lookup(efi.esrt, &md);
+ if (rc < 0 ||
+ (!(md.attribute & EFI_MEMORY_RUNTIME) &&
+ md.type != EFI_BOOT_SERVICES_DATA &&
+ md.type != EFI_RUNTIME_SERVICES_DATA &&
+ md.type != EFI_ACPI_RECLAIM_MEMORY &&
+ md.type != EFI_ACPI_MEMORY_NVS)) {
+ pr_warn("ESRT header is not in the memory map.\n");
+ return;
+ }
+
+ max = efi_mem_desc_end(&md) - efi.esrt;
+ size = sizeof(*esrt);
+
+ if (max < size) {
+ pr_err("ESRT header doesn't fit on single memory map entry. (size: %zu max: %zu)\n",
+ size, max);
+ return;
+ }
+
+ va = early_memremap(efi.esrt, size);
+ if (!va) {
+ pr_err("early_memremap(%p, %zu) failed.\n", (void *)efi.esrt,
+ size);
+ return;
+ }
+
+ memcpy(&tmpesrt, va, sizeof(tmpesrt));
+ early_memunmap(va, size);
+
+ if (tmpesrt.fw_resource_version != 1) {
+ pr_err("Unsupported ESRT version %lld.\n",
+ tmpesrt.fw_resource_version);
+ return;
+ }
+
+ entry_size = sizeof(struct efi_system_resource_entry_v1);
+ if (tmpesrt.fw_resource_count > 0 && max - size < entry_size) {
+ pr_err("ESRT memory map entry can only hold the header. (max: %zu size: %zu)\n",
+ max - size, entry_size);
+ return;
+ }
+
+ /*
+ * The format doesn't really give us any boundary to test here,
+ * so I'm making up 128 as the max number of individually updatable
+ * components we support.
+ * 128 should be pretty excessive, but there's still some chance
+ * somebody will do that someday and we'll need to raise this.
+ */
+ if (tmpesrt.fw_resource_count > 128) {
+ pr_err("ESRT says fw_resource_count has very large value %d.\n",
+ tmpesrt.fw_resource_count);
+ return;
+ }
+
+ /*
+ * We know it can't be larger than N * sizeof() here, and N is limited
+ * by the previous test to a small number, so there's no overflow.
+ */
+ entries_size = tmpesrt.fw_resource_count * entry_size;
+ if (max < size + entries_size) {
+ pr_err("ESRT does not fit on single memory map entry (size: %zu max: %zu)\n",
+ size, max);
+ return;
+ }
+
+ size += entries_size;
+
+ esrt_data = (phys_addr_t)efi.esrt;
+ esrt_data_size = size;
+
+ end = esrt_data + size;
+ pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end);
+ if (md.type == EFI_BOOT_SERVICES_DATA)
+ efi_mem_reserve(esrt_data, esrt_data_size);
+
+ pr_debug("esrt-init: loaded.\n");
+}
+
+static int __init register_entries(void)
+{
+ struct efi_system_resource_entry_v1 *v1_entries = (void *)esrt->entries;
+ int i, rc;
+
+ if (!esrt_table_exists())
+ return 0;
+
+ for (i = 0; i < le32_to_cpu(esrt->fw_resource_count); i++) {
+ void *esre = NULL;
+ if (esrt->fw_resource_version == 1) {
+ esre = &v1_entries[i];
+ } else {
+ pr_err("Unsupported ESRT version %lld.\n",
+ esrt->fw_resource_version);
+ return -EINVAL;
+ }
+
+ rc = esre_create_sysfs_entry(esre, i);
+ if (rc < 0) {
+ pr_err("ESRT entry creation failed with error %d.\n",
+ rc);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static void cleanup_entry_list(void)
+{
+ struct esre_entry *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &entry_list, list) {
+ kobject_put(&entry->kobj);
+ }
+}
+
+static int __init esrt_sysfs_init(void)
+{
+ int error;
+
+ pr_debug("esrt-sysfs: loading.\n");
+ if (!esrt_data || !esrt_data_size)
+ return -ENOSYS;
+
+ esrt = memremap(esrt_data, esrt_data_size, MEMREMAP_WB);
+ if (!esrt) {
+ pr_err("memremap(%pa, %zu) failed.\n", &esrt_data,
+ esrt_data_size);
+ return -ENOMEM;
+ }
+
+ esrt_kobj = kobject_create_and_add("esrt", efi_kobj);
+ if (!esrt_kobj) {
+ pr_err("Firmware table registration failed.\n");
+ error = -ENOMEM;
+ goto err;
+ }
+
+ error = sysfs_create_group(esrt_kobj, &esrt_attr_group);
+ if (error) {
+ pr_err("Sysfs attribute export failed with error %d.\n",
+ error);
+ goto err_remove_esrt;
+ }
+
+ esrt_kset = kset_create_and_add("entries", NULL, esrt_kobj);
+ if (!esrt_kset) {
+ pr_err("kset creation failed.\n");
+ error = -ENOMEM;
+ goto err_remove_group;
+ }
+
+ error = register_entries();
+ if (error)
+ goto err_cleanup_list;
+
+ pr_debug("esrt-sysfs: loaded.\n");
+
+ return 0;
+err_cleanup_list:
+ cleanup_entry_list();
+ kset_unregister(esrt_kset);
+err_remove_group:
+ sysfs_remove_group(esrt_kobj, &esrt_attr_group);
+err_remove_esrt:
+ kobject_put(esrt_kobj);
+err:
+ memunmap(esrt);
+ esrt = NULL;
+ return error;
+}
+device_initcall(esrt_sysfs_init);
+
+/*
+MODULE_AUTHOR("Peter Jones <pjones@redhat.com>");
+MODULE_DESCRIPTION("EFI System Resource Table support");
+MODULE_LICENSE("GPL");
+*/
diff --git a/drivers/firmware/efi/fdtparams.c b/drivers/firmware/efi/fdtparams.c
new file mode 100644
index 0000000000..0ec83ba580
--- /dev/null
+++ b/drivers/firmware/efi/fdtparams.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/efi.h>
+#include <linux/libfdt.h>
+#include <linux/of_fdt.h>
+
+#include <asm/unaligned.h>
+
+enum {
+ SYSTAB,
+ MMBASE,
+ MMSIZE,
+ DCSIZE,
+ DCVERS,
+
+ PARAMCOUNT
+};
+
+static __initconst const char name[][22] = {
+ [SYSTAB] = "System Table ",
+ [MMBASE] = "MemMap Address ",
+ [MMSIZE] = "MemMap Size ",
+ [DCSIZE] = "MemMap Desc. Size ",
+ [DCVERS] = "MemMap Desc. Version ",
+};
+
+static __initconst const struct {
+ const char path[17];
+ u8 paravirt;
+ const char params[PARAMCOUNT][26];
+} dt_params[] = {
+ {
+#ifdef CONFIG_XEN // <-------17------>
+ .path = "/hypervisor/uefi",
+ .paravirt = 1,
+ .params = {
+ [SYSTAB] = "xen,uefi-system-table",
+ [MMBASE] = "xen,uefi-mmap-start",
+ [MMSIZE] = "xen,uefi-mmap-size",
+ [DCSIZE] = "xen,uefi-mmap-desc-size",
+ [DCVERS] = "xen,uefi-mmap-desc-ver",
+ }
+ }, {
+#endif
+ .path = "/chosen",
+ .params = { // <-----------26----------->
+ [SYSTAB] = "linux,uefi-system-table",
+ [MMBASE] = "linux,uefi-mmap-start",
+ [MMSIZE] = "linux,uefi-mmap-size",
+ [DCSIZE] = "linux,uefi-mmap-desc-size",
+ [DCVERS] = "linux,uefi-mmap-desc-ver",
+ }
+ }
+};
+
+static int __init efi_get_fdt_prop(const void *fdt, int node, const char *pname,
+ const char *rname, void *var, int size)
+{
+ const void *prop;
+ int len;
+ u64 val;
+
+ prop = fdt_getprop(fdt, node, pname, &len);
+ if (!prop)
+ return 1;
+
+ val = (len == 4) ? (u64)be32_to_cpup(prop) : get_unaligned_be64(prop);
+
+ if (size == 8)
+ *(u64 *)var = val;
+ else
+ *(u32 *)var = (val < U32_MAX) ? val : U32_MAX; // saturate
+
+ if (efi_enabled(EFI_DBG))
+ pr_info(" %s: 0x%0*llx\n", rname, size * 2, val);
+
+ return 0;
+}
+
+u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm)
+{
+ const void *fdt = initial_boot_params;
+ unsigned long systab;
+ int i, j, node;
+ struct {
+ void *var;
+ int size;
+ } target[] = {
+ [SYSTAB] = { &systab, sizeof(systab) },
+ [MMBASE] = { &mm->phys_map, sizeof(mm->phys_map) },
+ [MMSIZE] = { &mm->size, sizeof(mm->size) },
+ [DCSIZE] = { &mm->desc_size, sizeof(mm->desc_size) },
+ [DCVERS] = { &mm->desc_version, sizeof(mm->desc_version) },
+ };
+
+ BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(name));
+ BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(dt_params[0].params));
+
+ if (!fdt)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
+ node = fdt_path_offset(fdt, dt_params[i].path);
+ if (node < 0)
+ continue;
+
+ if (efi_enabled(EFI_DBG))
+ pr_info("Getting UEFI parameters from %s in DT:\n",
+ dt_params[i].path);
+
+ for (j = 0; j < ARRAY_SIZE(target); j++) {
+ const char *pname = dt_params[i].params[j];
+
+ if (!efi_get_fdt_prop(fdt, node, pname, name[j],
+ target[j].var, target[j].size))
+ continue;
+ if (!j)
+ goto notfound;
+ pr_err("Can't find property '%s' in DT!\n", pname);
+ return 0;
+ }
+ if (dt_params[i].paravirt)
+ set_bit(EFI_PARAVIRT, &efi.flags);
+ return systab;
+ }
+notfound:
+ pr_info("UEFI not found.\n");
+ return 0;
+}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
new file mode 100644
index 0000000000..a1157c2a71
--- /dev/null
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -0,0 +1,177 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# The stub may be linked into the kernel proper or into a separate boot binary,
+# but in either case, it executes before the kernel does (with MMU disabled) so
+# things like ftrace and stack-protector are likely to cause trouble if left
+# enabled, even if doing so doesn't break the build.
+#
+
+# non-x86 reuses KBUILD_CFLAGS, x86 does not
+cflags-y := $(KBUILD_CFLAGS)
+
+cflags-$(CONFIG_X86_32) := -march=i386
+cflags-$(CONFIG_X86_64) := -mcmodel=small
+cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
+ -fPIC -fno-strict-aliasing -mno-red-zone \
+ -mno-mmx -mno-sse -fshort-wchar \
+ -Wno-pointer-sign \
+ $(call cc-disable-warning, address-of-packed-member) \
+ $(call cc-disable-warning, gnu) \
+ -fno-asynchronous-unwind-tables \
+ $(CLANG_FLAGS)
+
+# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
+# disable the stackleak plugin
+cflags-$(CONFIG_ARM64) += -fpie $(DISABLE_STACKLEAK_PLUGIN) \
+ -fno-unwind-tables -fno-asynchronous-unwind-tables
+cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
+ -DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
+ -DEFI_HAVE_STRCMP -fno-builtin -fpic \
+ $(call cc-option,-mno-single-pic-base)
+cflags-$(CONFIG_RISCV) += -fpic
+cflags-$(CONFIG_LOONGARCH) += -fpie
+
+cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt
+
+KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(cflags-y)) \
+ -Os -DDISABLE_BRANCH_PROFILING \
+ -include $(srctree)/include/linux/hidden.h \
+ -D__NO_FORTIFY \
+ -ffreestanding \
+ -fno-stack-protector \
+ $(call cc-option,-fno-addrsig) \
+ -D__DISABLE_EXPORTS
+
+#
+# struct randomization only makes sense for Linux internal types, which the EFI
+# stub code never touches, so let's turn off struct randomization for the stub
+# altogether
+#
+KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS), $(KBUILD_CFLAGS))
+
+# remove SCS flags from all objects in this directory
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
+# disable CFI
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_CFI), $(KBUILD_CFLAGS))
+# disable LTO
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
+
+GCOV_PROFILE := n
+# Sanitizer runtimes are unavailable and cannot be linked here.
+KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
+KMSAN_SANITIZE := n
+UBSAN_SANITIZE := n
+OBJECT_FILES_NON_STANDARD := y
+
+# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+KCOV_INSTRUMENT := n
+
+lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
+ file.o mem.o random.o randomalloc.o pci.o \
+ skip_spaces.o lib-cmdline.o lib-ctype.o \
+ alignedmem.o relocate.o printk.o vsprintf.o
+
+# include the stub's libfdt dependencies from lib/ when needed
+libfdt-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c \
+ fdt_empty_tree.c fdt_sw.c
+
+lib-$(CONFIG_EFI_PARAMS_FROM_FDT) += fdt.o \
+ $(patsubst %.c,lib-%.o,$(libfdt-deps))
+
+$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
+ $(call if_changed_rule,cc_o_c)
+
+lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
+ screen_info.o efi-stub-entry.o
+
+lib-$(CONFIG_ARM) += arm32-stub.o
+lib-$(CONFIG_ARM64) += kaslr.o arm64.o arm64-stub.o smbios.o
+lib-$(CONFIG_X86) += x86-stub.o
+lib-$(CONFIG_X86_64) += x86-5lvl.o
+lib-$(CONFIG_RISCV) += kaslr.o riscv.o riscv-stub.o
+lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
+
+CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
+
+zboot-obj-$(CONFIG_RISCV) := lib-clz_ctz.o lib-ashldi3.o
+lib-$(CONFIG_EFI_ZBOOT) += zboot.o $(zboot-obj-y)
+
+lib-$(CONFIG_UNACCEPTED_MEMORY) += unaccepted_memory.o bitmap.o find.o
+
+extra-y := $(lib-y)
+lib-y := $(patsubst %.o,%.stub.o,$(lib-y))
+
+# Even when -mbranch-protection=none is set, Clang will generate a
+# .note.gnu.property for code-less object files (like lib/ctype.c),
+# so work around this by explicitly removing the unwanted section.
+# https://bugs.llvm.org/show_bug.cgi?id=46480
+STUBCOPY_FLAGS-y += --remove-section=.note.gnu.property
+
+#
+# For x86, bootloaders like systemd-boot or grub-efi do not zero-initialize the
+# .bss section, so the .bss section of the EFI stub needs to be included in the
+# .data section of the compressed kernel to ensure initialization. Rename the
+# .bss section here so it's easy to pick out in the linker script.
+#
+STUBCOPY_FLAGS-$(CONFIG_X86) += --rename-section .bss=.bss.efistub,load,alloc
+STUBCOPY_RELOC-$(CONFIG_X86_32) := R_386_32
+STUBCOPY_RELOC-$(CONFIG_X86_64) := R_X86_64_64
+
+#
+# ARM discards the .data section because it disallows r/w data in the
+# decompressor. So move our .data to .data.efistub and .bss to .bss.efistub,
+# which are preserved explicitly by the decompressor linker script.
+#
+STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub \
+ --rename-section .bss=.bss.efistub,load,alloc
+STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
+
+#
+# arm64 puts the stub in the kernel proper, which will unnecessarily retain all
+# code indefinitely unless it is annotated as __init/__initdata/__initconst etc.
+# So let's apply the __init annotations at the section level, by prefixing
+# the section names directly. This will ensure that even all the inline string
+# literals are covered.
+# The fact that the stub and the kernel proper are essentially the same binary
+# also means that we need to be extra careful to make sure that the stub does
+# not rely on any absolute symbol references, considering that the virtual
+# kernel mapping that the linker uses is not active yet when the stub is
+# executing. So build all C dependencies of the EFI stub into libstub, and do
+# a verification pass to see if any absolute relocations exist in any of the
+# object files.
+#
+STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
+ --prefix-symbols=__efistub_
+STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
+
+# For RISC-V, we don't need anything special other than arm64. Keep all the
+# symbols in .init section and make sure that no absolute symbols references
+# exist.
+STUBCOPY_FLAGS-$(CONFIG_RISCV) += --prefix-alloc-sections=.init \
+ --prefix-symbols=__efistub_
+STUBCOPY_RELOC-$(CONFIG_RISCV) := R_RISCV_HI20
+
+# For LoongArch, keep all the symbols in .init section and make sure that no
+# absolute symbols references exist.
+STUBCOPY_FLAGS-$(CONFIG_LOONGARCH) += --prefix-alloc-sections=.init \
+ --prefix-symbols=__efistub_
+STUBCOPY_RELOC-$(CONFIG_LOONGARCH) := R_LARCH_MARK_LA
+
+$(obj)/%.stub.o: $(obj)/%.o FORCE
+ $(call if_changed,stubcopy)
+
+#
+# Strip debug sections and some other sections that may legally contain
+# absolute relocations, so that we can inspect the remaining sections for
+# such relocations. If none are found, regenerate the output object, but
+# this time, use objcopy and leave all sections in place.
+#
+quiet_cmd_stubcopy = STUBCPY $@
+ cmd_stubcopy = \
+ $(STRIP) --strip-debug -o $@ $<; \
+ if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); then \
+ echo "$@: absolute symbol references not allowed in the EFI stub" >&2; \
+ /bin/false; \
+ fi; \
+ $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
new file mode 100644
index 0000000000..2c489627a8
--- /dev/null
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# to be include'd by arch/$(ARCH)/boot/Makefile after setting
+# EFI_ZBOOT_PAYLOAD, EFI_ZBOOT_BFD_TARGET, EFI_ZBOOT_MACH_TYPE and
+# EFI_ZBOOT_FORWARD_CFI
+
+quiet_cmd_copy_and_pad = PAD $@
+ cmd_copy_and_pad = cp $< $@ && \
+ truncate -s $(shell hexdump -s16 -n4 -e '"%u"' $<) $@
+
+# Pad the file to the size of the uncompressed image in memory, including BSS
+$(obj)/vmlinux.bin: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE
+ $(call if_changed,copy_and_pad)
+
+comp-type-$(CONFIG_KERNEL_GZIP) := gzip
+comp-type-$(CONFIG_KERNEL_LZ4) := lz4
+comp-type-$(CONFIG_KERNEL_LZMA) := lzma
+comp-type-$(CONFIG_KERNEL_LZO) := lzo
+comp-type-$(CONFIG_KERNEL_XZ) := xzkern
+comp-type-$(CONFIG_KERNEL_ZSTD) := zstd22
+
+# in GZIP, the appended le32 carrying the uncompressed size is part of the
+# format, but in other cases, we just append it at the end for convenience,
+# causing the original tools to complain when checking image integrity.
+# So disregard it when calculating the payload size in the zimage header.
+zboot-method-y := $(comp-type-y)_with_size
+zboot-size-len-y := 4
+
+zboot-method-$(CONFIG_KERNEL_GZIP) := gzip
+zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0
+
+$(obj)/vmlinuz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,$(zboot-method-y))
+
+# avoid eager evaluation to prevent references to non-existent build artifacts
+OBJCOPYFLAGS_vmlinuz.o = -I binary -O $(EFI_ZBOOT_BFD_TARGET) $(EFI_ZBOOT_OBJCOPY_FLAGS) \
+ --rename-section .data=.gzdata,load,alloc,readonly,contents
+$(obj)/vmlinuz.o: $(obj)/vmlinuz FORCE
+ $(call if_changed,objcopy)
+
+aflags-zboot-header-$(EFI_ZBOOT_FORWARD_CFI) := \
+ -DPE_DLL_CHAR_EX=IMAGE_DLLCHARACTERISTICS_EX_FORWARD_CFI_COMPAT
+
+AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE) \
+ -DZBOOT_EFI_PATH="\"$(realpath $(obj)/vmlinuz.efi.elf)\"" \
+ -DZBOOT_SIZE_LEN=$(zboot-size-len-y) \
+ -DCOMP_TYPE="\"$(comp-type-y)\"" \
+ $(aflags-zboot-header-y)
+
+$(obj)/zboot-header.o: $(srctree)/drivers/firmware/efi/libstub/zboot-header.S FORCE
+ $(call if_changed_rule,as_o_S)
+
+ZBOOT_DEPS := $(obj)/zboot-header.o $(objtree)/drivers/firmware/efi/libstub/lib.a
+
+LDFLAGS_vmlinuz.efi.elf := -T $(srctree)/drivers/firmware/efi/libstub/zboot.lds
+$(obj)/vmlinuz.efi.elf: $(obj)/vmlinuz.o $(ZBOOT_DEPS) FORCE
+ $(call if_changed,ld)
+
+OBJCOPYFLAGS_vmlinuz.efi := -O binary
+$(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.elf FORCE
+ $(call if_changed,objcopy)
+
+targets += zboot-header.o vmlinux.bin vmlinuz vmlinuz.o vmlinuz.efi.elf vmlinuz.efi
diff --git a/drivers/firmware/efi/libstub/alignedmem.c b/drivers/firmware/efi/libstub/alignedmem.c
new file mode 100644
index 0000000000..6b83c492c3
--- /dev/null
+++ b/drivers/firmware/efi/libstub/alignedmem.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/**
+ * efi_allocate_pages_aligned() - Allocate memory pages
+ * @size: minimum number of bytes to allocate
+ * @addr: On return the address of the first allocated page. The first
+ * allocated page has alignment EFI_ALLOC_ALIGN which is an
+ * architecture dependent multiple of the page size.
+ * @max: the address that the last allocated memory page shall not
+ * exceed
+ * @align: minimum alignment of the base of the allocation
+ *
+ * Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according
+ * to @align, which should be >= EFI_ALLOC_ALIGN. The last allocated page will
+ * not exceed the address given by @max.
+ *
+ * Return: status code
+ */
+efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
+ unsigned long max, unsigned long align,
+ int memory_type)
+{
+ efi_physical_addr_t alloc_addr;
+ efi_status_t status;
+ int slack;
+
+ max = min(max, EFI_ALLOC_LIMIT);
+
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
+
+ alloc_addr = ALIGN_DOWN(max + 1, align) - 1;
+ size = round_up(size, EFI_ALLOC_ALIGN);
+ slack = align / EFI_PAGE_SIZE - 1;
+
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
+ memory_type, size / EFI_PAGE_SIZE + slack,
+ &alloc_addr);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ *addr = ALIGN((unsigned long)alloc_addr, align);
+
+ if (slack > 0) {
+ int l = (alloc_addr & (align - 1)) / EFI_PAGE_SIZE;
+
+ if (l) {
+ efi_bs_call(free_pages, alloc_addr, slack - l + 1);
+ slack = l - 1;
+ }
+ if (slack)
+ efi_bs_call(free_pages, *addr + size, slack);
+ }
+ return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
new file mode 100644
index 0000000000..1073dd9475
--- /dev/null
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 Linaro Ltd; <roy.franz@linaro.org>
+ */
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+static efi_guid_t cpu_state_guid = LINUX_EFI_ARM_CPU_STATE_TABLE_GUID;
+
+struct efi_arm_entry_state *efi_entry_state;
+
+static void get_cpu_state(u32 *cpsr, u32 *sctlr)
+{
+ asm("mrs %0, cpsr" : "=r"(*cpsr));
+ if ((*cpsr & MODE_MASK) == HYP_MODE)
+ asm("mrc p15, 4, %0, c1, c0, 0" : "=r"(*sctlr));
+ else
+ asm("mrc p15, 0, %0, c1, c0, 0" : "=r"(*sctlr));
+}
+
+efi_status_t check_platform_features(void)
+{
+ efi_status_t status;
+ u32 cpsr, sctlr;
+ int block;
+
+ get_cpu_state(&cpsr, &sctlr);
+
+ efi_info("Entering in %s mode with MMU %sabled\n",
+ ((cpsr & MODE_MASK) == HYP_MODE) ? "HYP" : "SVC",
+ (sctlr & 1) ? "en" : "dis");
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ sizeof(*efi_entry_state),
+ (void **)&efi_entry_state);
+ if (status != EFI_SUCCESS) {
+ efi_err("allocate_pool() failed\n");
+ return status;
+ }
+
+ efi_entry_state->cpsr_before_ebs = cpsr;
+ efi_entry_state->sctlr_before_ebs = sctlr;
+
+ status = efi_bs_call(install_configuration_table, &cpu_state_guid,
+ efi_entry_state);
+ if (status != EFI_SUCCESS) {
+ efi_err("install_configuration_table() failed\n");
+ goto free_state;
+ }
+
+ /* non-LPAE kernels can run anywhere */
+ if (!IS_ENABLED(CONFIG_ARM_LPAE))
+ return EFI_SUCCESS;
+
+ /* LPAE kernels need compatible hardware */
+ block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
+ if (block < 5) {
+ efi_err("This LPAE kernel is not supported by your CPU\n");
+ status = EFI_UNSUPPORTED;
+ goto drop_table;
+ }
+ return EFI_SUCCESS;
+
+drop_table:
+ efi_bs_call(install_configuration_table, &cpu_state_guid, NULL);
+free_state:
+ efi_bs_call(free_pool, efi_entry_state);
+ return status;
+}
+
+void efi_handle_post_ebs_state(void)
+{
+ get_cpu_state(&efi_entry_state->cpsr_after_ebs,
+ &efi_entry_state->sctlr_after_ebs);
+}
+
+efi_status_t handle_kernel_image(unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ efi_loaded_image_t *image,
+ efi_handle_t image_handle)
+{
+ const int slack = TEXT_OFFSET - 5 * PAGE_SIZE;
+ int alloc_size = MAX_UNCOMP_KERNEL_SIZE + EFI_PHYS_ALIGN;
+ unsigned long alloc_base, kernel_base;
+ efi_status_t status;
+
+ /*
+ * Allocate space for the decompressed kernel as low as possible.
+ * The region should be 16 MiB aligned, but the first 'slack' bytes
+ * are not used by Linux, so we allow those to be occupied by the
+ * firmware.
+ */
+ status = efi_low_alloc_above(alloc_size, EFI_PAGE_SIZE, &alloc_base, 0x0);
+ if (status != EFI_SUCCESS) {
+ efi_err("Unable to allocate memory for uncompressed kernel.\n");
+ return status;
+ }
+
+ if ((alloc_base % EFI_PHYS_ALIGN) > slack) {
+ /*
+ * More than 'slack' bytes are already occupied at the base of
+ * the allocation, so we need to advance to the next 16 MiB block.
+ */
+ kernel_base = round_up(alloc_base, EFI_PHYS_ALIGN);
+ efi_info("Free memory starts at 0x%lx, setting kernel_base to 0x%lx\n",
+ alloc_base, kernel_base);
+ } else {
+ kernel_base = round_down(alloc_base, EFI_PHYS_ALIGN);
+ }
+
+ *reserve_addr = kernel_base + slack;
+ *reserve_size = MAX_UNCOMP_KERNEL_SIZE;
+
+ /* now free the parts that we will not use */
+ if (*reserve_addr > alloc_base) {
+ efi_bs_call(free_pages, alloc_base,
+ (*reserve_addr - alloc_base) / EFI_PAGE_SIZE);
+ alloc_size -= *reserve_addr - alloc_base;
+ }
+ efi_bs_call(free_pages, *reserve_addr + MAX_UNCOMP_KERNEL_SIZE,
+ (alloc_size - MAX_UNCOMP_KERNEL_SIZE) / EFI_PAGE_SIZE);
+
+ *image_addr = kernel_base + TEXT_OFFSET;
+ *image_size = 0;
+
+ efi_debug("image addr == 0x%lx, reserve_addr == 0x%lx\n",
+ *image_addr, *reserve_addr);
+
+ return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
new file mode 100644
index 0000000000..452b7ccd33
--- /dev/null
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013, 2014 Linaro Ltd; <roy.franz@linaro.org>
+ *
+ * This file implements the EFI boot stub for the arm64 kernel.
+ * Adapted from ARM version by Mark Salter <msalter@redhat.com>
+ */
+
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+#include <asm/memory.h>
+#include <asm/sections.h>
+
+#include "efistub.h"
+
+efi_status_t handle_kernel_image(unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ efi_loaded_image_t *image,
+ efi_handle_t image_handle)
+{
+ efi_status_t status;
+ unsigned long kernel_size, kernel_codesize, kernel_memsize;
+
+ if (image->image_base != _text) {
+ efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
+ image->image_base = _text;
+ }
+
+ if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN))
+ efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n",
+ SEGMENT_ALIGN >> 10);
+
+ kernel_size = _edata - _text;
+ kernel_codesize = __inittext_end - _text;
+ kernel_memsize = kernel_size + (_end - _edata);
+ *reserve_size = kernel_memsize;
+ *image_addr = (unsigned long)_text;
+
+ status = efi_kaslr_relocate_kernel(image_addr,
+ reserve_addr, reserve_size,
+ kernel_size, kernel_codesize,
+ kernel_memsize,
+ efi_kaslr_get_phys_seed(image_handle));
+ if (status != EFI_SUCCESS)
+ return status;
+
+ return EFI_SUCCESS;
+}
+
+asmlinkage void primary_entry(void);
+
+unsigned long primary_entry_offset(void)
+{
+ /*
+ * When built as part of the kernel, the EFI stub cannot branch to the
+ * kernel proper via the image header, as the PE/COFF header is
+ * strictly not part of the in-memory presentation of the image, only
+ * of the file representation. So instead, we need to jump to the
+ * actual entrypoint in the .text region of the image.
+ */
+ return (char *)primary_entry - _text;
+}
+
+void efi_icache_sync(unsigned long start, unsigned long end)
+{
+ caches_clean_inval_pou(start, end);
+}
diff --git a/drivers/firmware/efi/libstub/arm64.c b/drivers/firmware/efi/libstub/arm64.c
new file mode 100644
index 0000000000..446e35eaf3
--- /dev/null
+++ b/drivers/firmware/efi/libstub/arm64.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013, 2014 Linaro Ltd; <roy.franz@linaro.org>
+ *
+ * This file implements the EFI boot stub for the arm64 kernel.
+ * Adapted from ARM version by Mark Salter <msalter@redhat.com>
+ */
+
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+#include <asm/image.h>
+#include <asm/memory.h>
+#include <asm/sysreg.h>
+
+#include "efistub.h"
+
+static bool system_needs_vamap(void)
+{
+ const struct efi_smbios_type4_record *record;
+ const u32 __aligned(1) *socid;
+ const u8 *version;
+
+ /*
+ * Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
+ * SetVirtualAddressMap() has not been called prior. Most Altra systems
+ * can be identified by the SMCCC soc ID, which is conveniently exposed
+ * via the type 4 SMBIOS records. Otherwise, test the processor version
+ * field. eMAG systems all appear to have the processor version field
+ * set to "eMAG".
+ */
+ record = (struct efi_smbios_type4_record *)efi_get_smbios_record(4);
+ if (!record)
+ return false;
+
+ socid = (u32 *)record->processor_id;
+ switch (*socid & 0xffff000f) {
+ static char const altra[] = "Ampere(TM) Altra(TM) Processor";
+ static char const emag[] = "eMAG";
+
+ default:
+ version = efi_get_smbios_string(&record->header, 4,
+ processor_version);
+ if (!version || (strncmp(version, altra, sizeof(altra) - 1) &&
+ strncmp(version, emag, sizeof(emag) - 1)))
+ break;
+
+ fallthrough;
+
+ case 0x0a160001: // Altra
+ case 0x0a160002: // Altra Max
+ efi_warn("Working around broken SetVirtualAddressMap()\n");
+ return true;
+ }
+
+ return false;
+}
+
+efi_status_t check_platform_features(void)
+{
+ u64 tg;
+
+ /*
+ * If we have 48 bits of VA space for TTBR0 mappings, we can map the
+ * UEFI runtime regions 1:1 and so calling SetVirtualAddressMap() is
+ * unnecessary.
+ */
+ if (VA_BITS_MIN >= 48 && !system_needs_vamap())
+ efi_novamap = true;
+
+ /* UEFI mandates support for 4 KB granularity, no need to check */
+ if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
+ return EFI_SUCCESS;
+
+ tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_EL1_TGRAN_SHIFT) & 0xf;
+ if (tg < ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX) {
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
+ efi_err("This 64 KB granular kernel is not supported by your CPU\n");
+ else
+ efi_err("This 16 KB granular kernel is not supported by your CPU\n");
+ return EFI_UNSUPPORTED;
+ }
+ return EFI_SUCCESS;
+}
+
+#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
+#define DCTYPE "civac"
+#else
+#define DCTYPE "cvau"
+#endif
+
+u32 __weak code_size;
+
+void efi_cache_sync_image(unsigned long image_base,
+ unsigned long alloc_size)
+{
+ u32 ctr = read_cpuid_effective_cachetype();
+ u64 lsize = 4 << cpuid_feature_extract_unsigned_field(ctr,
+ CTR_EL0_DminLine_SHIFT);
+
+ /* only perform the cache maintenance if needed for I/D coherency */
+ if (!(ctr & BIT(CTR_EL0_IDC_SHIFT))) {
+ unsigned long base = image_base;
+ unsigned long size = code_size;
+
+ do {
+ asm("dc " DCTYPE ", %0" :: "r"(base));
+ base += lsize;
+ size -= lsize;
+ } while (size >= lsize);
+ }
+
+ asm("ic ialluis");
+ dsb(ish);
+ isb();
+
+ efi_remap_image(image_base, alloc_size, code_size);
+}
+
+unsigned long __weak primary_entry_offset(void)
+{
+ /*
+ * By default, we can invoke the kernel via the branch instruction in
+ * the image header, so offset #0. This will be overridden by the EFI
+ * stub build that is linked into the core kernel, as in that case, the
+ * image header may not have been loaded into memory, or may be mapped
+ * with non-executable permissions.
+ */
+ return 0;
+}
+
+void __noreturn efi_enter_kernel(unsigned long entrypoint,
+ unsigned long fdt_addr,
+ unsigned long fdt_size)
+{
+ void (* __noreturn enter_kernel)(u64, u64, u64, u64);
+
+ enter_kernel = (void *)entrypoint + primary_entry_offset();
+ enter_kernel(fdt_addr, 0, 0, 0);
+}
diff --git a/drivers/firmware/efi/libstub/bitmap.c b/drivers/firmware/efi/libstub/bitmap.c
new file mode 100644
index 0000000000..5c9bba0d54
--- /dev/null
+++ b/drivers/firmware/efi/libstub/bitmap.c
@@ -0,0 +1,41 @@
+#include <linux/bitmap.h>
+
+void __bitmap_set(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const unsigned int size = start + len;
+ int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_set >= 0) {
+ *p |= mask_to_set;
+ len -= bits_to_set;
+ bits_to_set = BITS_PER_LONG;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_set &= BITMAP_LAST_WORD_MASK(size);
+ *p |= mask_to_set;
+ }
+}
+
+void __bitmap_clear(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const unsigned int size = start + len;
+ int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_clear >= 0) {
+ *p &= ~mask_to_clear;
+ len -= bits_to_clear;
+ bits_to_clear = BITS_PER_LONG;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
+ *p &= ~mask_to_clear;
+ }
+}
diff --git a/drivers/firmware/efi/libstub/efi-stub-entry.c b/drivers/firmware/efi/libstub/efi-stub-entry.c
new file mode 100644
index 0000000000..2f1902e5d4
--- /dev/null
+++ b/drivers/firmware/efi/libstub/efi-stub-entry.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/efi.h>
+#include <linux/screen_info.h>
+
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+static unsigned long screen_info_offset;
+
+struct screen_info *alloc_screen_info(void)
+{
+ if (IS_ENABLED(CONFIG_ARM))
+ return __alloc_screen_info();
+ return (void *)&screen_info + screen_info_offset;
+}
+
+/*
+ * EFI entry point for the generic EFI stub used by ARM, arm64, RISC-V and
+ * LoongArch. This is the entrypoint that is described in the PE/COFF header
+ * of the core kernel.
+ */
+efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ efi_system_table_t *systab)
+{
+ efi_loaded_image_t *image;
+ efi_status_t status;
+ unsigned long image_addr;
+ unsigned long image_size = 0;
+ /* addr/point and size pairs for memory management*/
+ char *cmdline_ptr = NULL;
+ efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID;
+ unsigned long reserve_addr = 0;
+ unsigned long reserve_size = 0;
+
+ WRITE_ONCE(efi_system_table, systab);
+
+ /* Check if we were booted by the EFI firmware */
+ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+ return EFI_INVALID_PARAMETER;
+
+ /*
+ * Get a handle to the loaded image protocol. This is used to get
+ * information about the running image, such as size and the command
+ * line.
+ */
+ status = efi_bs_call(handle_protocol, handle, &loaded_image_proto,
+ (void *)&image);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to get loaded image protocol\n");
+ return status;
+ }
+
+ status = efi_handle_cmdline(image, &cmdline_ptr);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ efi_info("Booting Linux Kernel...\n");
+
+ status = handle_kernel_image(&image_addr, &image_size,
+ &reserve_addr,
+ &reserve_size,
+ image, handle);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to relocate kernel\n");
+ return status;
+ }
+
+ screen_info_offset = image_addr - (unsigned long)image->image_base;
+
+ status = efi_stub_common(handle, image, image_addr, cmdline_ptr);
+
+ efi_free(image_size, image_addr);
+ efi_free(reserve_size, reserve_addr);
+
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
new file mode 100644
index 0000000000..bfa30625f5
--- /dev/null
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helper functions used by the EFI stub on multiple
+ * architectures. This should be #included by the EFI stub
+ * implementation files.
+ *
+ * Copyright 2011 Intel Corporation; author Matt Fleming
+ */
+
+#include <linux/stdarg.h>
+
+#include <linux/efi.h>
+#include <linux/kernel.h>
+#include <asm/efi.h>
+#include <asm/setup.h>
+
+#include "efistub.h"
+
+bool efi_nochunk;
+bool efi_nokaslr = !IS_ENABLED(CONFIG_RANDOMIZE_BASE);
+bool efi_novamap;
+
+static bool efi_noinitrd;
+static bool efi_nosoftreserve;
+static bool efi_disable_pci_dma = IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
+
+bool __pure __efi_soft_reserve_enabled(void)
+{
+ return !efi_nosoftreserve;
+}
+
+/**
+ * efi_parse_options() - Parse EFI command line options
+ * @cmdline: kernel command line
+ *
+ * Parse the ASCII string @cmdline for EFI options, denoted by the efi=
+ * option, e.g. efi=nochunk.
+ *
+ * It should be noted that efi= is parsed in two very different
+ * environments, first in the early boot environment of the EFI boot
+ * stub, and subsequently during the kernel boot.
+ *
+ * Return: status code
+ */
+efi_status_t efi_parse_options(char const *cmdline)
+{
+ size_t len;
+ efi_status_t status;
+ char *str, *buf;
+
+ if (!cmdline)
+ return EFI_SUCCESS;
+
+ len = strnlen(cmdline, COMMAND_LINE_SIZE - 1) + 1;
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, len, (void **)&buf);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ memcpy(buf, cmdline, len - 1);
+ buf[len - 1] = '\0';
+ str = skip_spaces(buf);
+
+ while (*str) {
+ char *param, *val;
+
+ str = next_arg(str, &param, &val);
+ if (!val && !strcmp(param, "--"))
+ break;
+
+ if (!strcmp(param, "nokaslr")) {
+ efi_nokaslr = true;
+ } else if (!strcmp(param, "quiet")) {
+ efi_loglevel = CONSOLE_LOGLEVEL_QUIET;
+ } else if (!strcmp(param, "noinitrd")) {
+ efi_noinitrd = true;
+ } else if (IS_ENABLED(CONFIG_X86_64) && !strcmp(param, "no5lvl")) {
+ efi_no5lvl = true;
+ } else if (!strcmp(param, "efi") && val) {
+ efi_nochunk = parse_option_str(val, "nochunk");
+ efi_novamap |= parse_option_str(val, "novamap");
+
+ efi_nosoftreserve = IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) &&
+ parse_option_str(val, "nosoftreserve");
+
+ if (parse_option_str(val, "disable_early_pci_dma"))
+ efi_disable_pci_dma = true;
+ if (parse_option_str(val, "no_disable_early_pci_dma"))
+ efi_disable_pci_dma = false;
+ if (parse_option_str(val, "debug"))
+ efi_loglevel = CONSOLE_LOGLEVEL_DEBUG;
+ } else if (!strcmp(param, "video") &&
+ val && strstarts(val, "efifb:")) {
+ efi_parse_option_graphics(val + strlen("efifb:"));
+ }
+ }
+ efi_bs_call(free_pool, buf);
+ return EFI_SUCCESS;
+}
+
+/*
+ * The EFI_LOAD_OPTION descriptor has the following layout:
+ * u32 Attributes;
+ * u16 FilePathListLength;
+ * u16 Description[];
+ * efi_device_path_protocol_t FilePathList[];
+ * u8 OptionalData[];
+ *
+ * This function validates and unpacks the variable-size data fields.
+ */
+static
+bool efi_load_option_unpack(efi_load_option_unpacked_t *dest,
+ const efi_load_option_t *src, size_t size)
+{
+ const void *pos;
+ u16 c;
+ efi_device_path_protocol_t header;
+ const efi_char16_t *description;
+ const efi_device_path_protocol_t *file_path_list;
+
+ if (size < offsetof(efi_load_option_t, variable_data))
+ return false;
+ pos = src->variable_data;
+ size -= offsetof(efi_load_option_t, variable_data);
+
+ if ((src->attributes & ~EFI_LOAD_OPTION_MASK) != 0)
+ return false;
+
+ /* Scan description. */
+ description = pos;
+ do {
+ if (size < sizeof(c))
+ return false;
+ c = *(const u16 *)pos;
+ pos += sizeof(c);
+ size -= sizeof(c);
+ } while (c != L'\0');
+
+ /* Scan file_path_list. */
+ file_path_list = pos;
+ do {
+ if (size < sizeof(header))
+ return false;
+ header = *(const efi_device_path_protocol_t *)pos;
+ if (header.length < sizeof(header))
+ return false;
+ if (size < header.length)
+ return false;
+ pos += header.length;
+ size -= header.length;
+ } while ((header.type != EFI_DEV_END_PATH && header.type != EFI_DEV_END_PATH2) ||
+ (header.sub_type != EFI_DEV_END_ENTIRE));
+ if (pos != (const void *)file_path_list + src->file_path_list_length)
+ return false;
+
+ dest->attributes = src->attributes;
+ dest->file_path_list_length = src->file_path_list_length;
+ dest->description = description;
+ dest->file_path_list = file_path_list;
+ dest->optional_data_size = size;
+ dest->optional_data = size ? pos : NULL;
+
+ return true;
+}
+
+/*
+ * At least some versions of Dell firmware pass the entire contents of the
+ * Boot#### variable, i.e. the EFI_LOAD_OPTION descriptor, rather than just the
+ * OptionalData field.
+ *
+ * Detect this case and extract OptionalData.
+ */
+void efi_apply_loadoptions_quirk(const void **load_options, u32 *load_options_size)
+{
+ const efi_load_option_t *load_option = *load_options;
+ efi_load_option_unpacked_t load_option_unpacked;
+
+ if (!IS_ENABLED(CONFIG_X86))
+ return;
+ if (!load_option)
+ return;
+ if (*load_options_size < sizeof(*load_option))
+ return;
+ if ((load_option->attributes & ~EFI_LOAD_OPTION_BOOT_MASK) != 0)
+ return;
+
+ if (!efi_load_option_unpack(&load_option_unpacked, load_option, *load_options_size))
+ return;
+
+ efi_warn_once(FW_BUG "LoadOptions is an EFI_LOAD_OPTION descriptor\n");
+ efi_warn_once(FW_BUG "Using OptionalData as a workaround\n");
+
+ *load_options = load_option_unpacked.optional_data;
+ *load_options_size = load_option_unpacked.optional_data_size;
+}
+
+enum efistub_event {
+ EFISTUB_EVT_INITRD,
+ EFISTUB_EVT_LOAD_OPTIONS,
+ EFISTUB_EVT_COUNT,
+};
+
+#define STR_WITH_SIZE(s) sizeof(s), s
+
+static const struct {
+ u32 pcr_index;
+ u32 event_id;
+ u32 event_data_len;
+ u8 event_data[52];
+} events[] = {
+ [EFISTUB_EVT_INITRD] = {
+ 9,
+ INITRD_EVENT_TAG_ID,
+ STR_WITH_SIZE("Linux initrd")
+ },
+ [EFISTUB_EVT_LOAD_OPTIONS] = {
+ 9,
+ LOAD_OPTIONS_EVENT_TAG_ID,
+ STR_WITH_SIZE("LOADED_IMAGE::LoadOptions")
+ },
+};
+
+static efi_status_t efi_measure_tagged_event(unsigned long load_addr,
+ unsigned long load_size,
+ enum efistub_event event)
+{
+ efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
+ efi_tcg2_protocol_t *tcg2 = NULL;
+ efi_status_t status;
+
+ efi_bs_call(locate_protocol, &tcg2_guid, NULL, (void **)&tcg2);
+ if (tcg2) {
+ struct efi_measured_event {
+ efi_tcg2_event_t event_data;
+ efi_tcg2_tagged_event_t tagged_event;
+ u8 tagged_event_data[];
+ } *evt;
+ int size = sizeof(*evt) + events[event].event_data_len;
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&evt);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ evt->event_data = (struct efi_tcg2_event){
+ .event_size = size,
+ .event_header.header_size = sizeof(evt->event_data.event_header),
+ .event_header.header_version = EFI_TCG2_EVENT_HEADER_VERSION,
+ .event_header.pcr_index = events[event].pcr_index,
+ .event_header.event_type = EV_EVENT_TAG,
+ };
+
+ evt->tagged_event = (struct efi_tcg2_tagged_event){
+ .tagged_event_id = events[event].event_id,
+ .tagged_event_data_size = events[event].event_data_len,
+ };
+
+ memcpy(evt->tagged_event_data, events[event].event_data,
+ events[event].event_data_len);
+
+ status = efi_call_proto(tcg2, hash_log_extend_event, 0,
+ load_addr, load_size, &evt->event_data);
+ efi_bs_call(free_pool, evt);
+
+ if (status != EFI_SUCCESS)
+ goto fail;
+ return EFI_SUCCESS;
+ }
+
+ return EFI_UNSUPPORTED;
+fail:
+ efi_warn("Failed to measure data for event %d: 0x%lx\n", event, status);
+ return status;
+}
+
+/*
+ * Convert the unicode UEFI command line to ASCII to pass to kernel.
+ * Size of memory allocated return in *cmd_line_len.
+ * Returns NULL on error.
+ */
+char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
+{
+ const efi_char16_t *options = efi_table_attr(image, load_options);
+ u32 options_size = efi_table_attr(image, load_options_size);
+ int options_bytes = 0, safe_options_bytes = 0; /* UTF-8 bytes */
+ unsigned long cmdline_addr = 0;
+ const efi_char16_t *s2;
+ bool in_quote = false;
+ efi_status_t status;
+ u32 options_chars;
+
+ if (options_size > 0)
+ efi_measure_tagged_event((unsigned long)options, options_size,
+ EFISTUB_EVT_LOAD_OPTIONS);
+
+ efi_apply_loadoptions_quirk((const void **)&options, &options_size);
+ options_chars = options_size / sizeof(efi_char16_t);
+
+ if (options) {
+ s2 = options;
+ while (options_bytes < COMMAND_LINE_SIZE && options_chars--) {
+ efi_char16_t c = *s2++;
+
+ if (c < 0x80) {
+ if (c == L'\0' || c == L'\n')
+ break;
+ if (c == L'"')
+ in_quote = !in_quote;
+ else if (!in_quote && isspace((char)c))
+ safe_options_bytes = options_bytes;
+
+ options_bytes++;
+ continue;
+ }
+
+ /*
+ * Get the number of UTF-8 bytes corresponding to a
+ * UTF-16 character.
+ * The first part handles everything in the BMP.
+ */
+ options_bytes += 2 + (c >= 0x800);
+ /*
+ * Add one more byte for valid surrogate pairs. Invalid
+ * surrogates will be replaced with 0xfffd and take up
+ * only 3 bytes.
+ */
+ if ((c & 0xfc00) == 0xd800) {
+ /*
+ * If the very last word is a high surrogate,
+ * we must ignore it since we can't access the
+ * low surrogate.
+ */
+ if (!options_chars) {
+ options_bytes -= 3;
+ } else if ((*s2 & 0xfc00) == 0xdc00) {
+ options_bytes++;
+ options_chars--;
+ s2++;
+ }
+ }
+ }
+ if (options_bytes >= COMMAND_LINE_SIZE) {
+ options_bytes = safe_options_bytes;
+ efi_err("Command line is too long: truncated to %d bytes\n",
+ options_bytes);
+ }
+ }
+
+ options_bytes++; /* NUL termination */
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, options_bytes,
+ (void **)&cmdline_addr);
+ if (status != EFI_SUCCESS)
+ return NULL;
+
+ snprintf((char *)cmdline_addr, options_bytes, "%.*ls",
+ options_bytes - 1, options);
+
+ *cmd_line_len = options_bytes;
+ return (char *)cmdline_addr;
+}
+
+/**
+ * efi_exit_boot_services() - Exit boot services
+ * @handle: handle of the exiting image
+ * @priv: argument to be passed to @priv_func
+ * @priv_func: function to process the memory map before exiting boot services
+ *
+ * Handle calling ExitBootServices according to the requirements set out by the
+ * spec. Obtains the current memory map, and returns that info after calling
+ * ExitBootServices. The client must specify a function to perform any
+ * processing of the memory map data prior to ExitBootServices. A client
+ * specific structure may be passed to the function via priv. The client
+ * function may be called multiple times.
+ *
+ * Return: status code
+ */
+efi_status_t efi_exit_boot_services(void *handle, void *priv,
+ efi_exit_boot_map_processing priv_func)
+{
+ struct efi_boot_memmap *map;
+ efi_status_t status;
+
+ if (efi_disable_pci_dma)
+ efi_pci_disable_bridge_busmaster();
+
+ status = efi_get_memory_map(&map, true);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = priv_func(map, priv);
+ if (status != EFI_SUCCESS) {
+ efi_bs_call(free_pool, map);
+ return status;
+ }
+
+ status = efi_bs_call(exit_boot_services, handle, map->map_key);
+
+ if (status == EFI_INVALID_PARAMETER) {
+ /*
+ * The memory map changed between efi_get_memory_map() and
+ * exit_boot_services(). Per the UEFI Spec v2.6, Section 6.4:
+ * EFI_BOOT_SERVICES.ExitBootServices we need to get the
+ * updated map, and try again. The spec implies one retry
+ * should be sufficent, which is confirmed against the EDK2
+ * implementation. Per the spec, we can only invoke
+ * get_memory_map() and exit_boot_services() - we cannot alloc
+ * so efi_get_memory_map() cannot be used, and we must reuse
+ * the buffer. For all practical purposes, the headroom in the
+ * buffer should account for any changes in the map so the call
+ * to get_memory_map() is expected to succeed here.
+ */
+ map->map_size = map->buff_size;
+ status = efi_bs_call(get_memory_map,
+ &map->map_size,
+ &map->map,
+ &map->map_key,
+ &map->desc_size,
+ &map->desc_ver);
+
+ /* exit_boot_services() was called, thus cannot free */
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = priv_func(map, priv);
+ /* exit_boot_services() was called, thus cannot free */
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_bs_call(exit_boot_services, handle, map->map_key);
+ }
+
+ return status;
+}
+
+/**
+ * get_efi_config_table() - retrieve UEFI configuration table
+ * @guid: GUID of the configuration table to be retrieved
+ * Return: pointer to the configuration table or NULL
+ */
+void *get_efi_config_table(efi_guid_t guid)
+{
+ unsigned long tables = efi_table_attr(efi_system_table, tables);
+ int nr_tables = efi_table_attr(efi_system_table, nr_tables);
+ int i;
+
+ for (i = 0; i < nr_tables; i++) {
+ efi_config_table_t *t = (void *)tables;
+
+ if (efi_guidcmp(t->guid, guid) == 0)
+ return efi_table_attr(t, table);
+
+ tables += efi_is_native() ? sizeof(efi_config_table_t)
+ : sizeof(efi_config_table_32_t);
+ }
+ return NULL;
+}
+
+/*
+ * The LINUX_EFI_INITRD_MEDIA_GUID vendor media device path below provides a way
+ * for the firmware or bootloader to expose the initrd data directly to the stub
+ * via the trivial LoadFile2 protocol, which is defined in the UEFI spec, and is
+ * very easy to implement. It is a simple Linux initrd specific conduit between
+ * kernel and firmware, allowing us to put the EFI stub (being part of the
+ * kernel) in charge of where and when to load the initrd, while leaving it up
+ * to the firmware to decide whether it needs to expose its filesystem hierarchy
+ * via EFI protocols.
+ */
+static const struct {
+ struct efi_vendor_dev_path vendor;
+ struct efi_generic_dev_path end;
+} __packed initrd_dev_path = {
+ {
+ {
+ EFI_DEV_MEDIA,
+ EFI_DEV_MEDIA_VENDOR,
+ sizeof(struct efi_vendor_dev_path),
+ },
+ LINUX_EFI_INITRD_MEDIA_GUID
+ }, {
+ EFI_DEV_END_PATH,
+ EFI_DEV_END_ENTIRE,
+ sizeof(struct efi_generic_dev_path)
+ }
+};
+
+/**
+ * efi_load_initrd_dev_path() - load the initrd from the Linux initrd device path
+ * @initrd: pointer of struct to store the address where the initrd was loaded
+ * and the size of the loaded initrd
+ * @max: upper limit for the initrd memory allocation
+ *
+ * Return:
+ * * %EFI_SUCCESS if the initrd was loaded successfully, in which
+ * case @load_addr and @load_size are assigned accordingly
+ * * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd device path
+ * * %EFI_OUT_OF_RESOURCES if memory allocation failed
+ * * %EFI_LOAD_ERROR in all other cases
+ */
+static
+efi_status_t efi_load_initrd_dev_path(struct linux_efi_initrd *initrd,
+ unsigned long max)
+{
+ efi_guid_t lf2_proto_guid = EFI_LOAD_FILE2_PROTOCOL_GUID;
+ efi_device_path_protocol_t *dp;
+ efi_load_file2_protocol_t *lf2;
+ efi_handle_t handle;
+ efi_status_t status;
+
+ dp = (efi_device_path_protocol_t *)&initrd_dev_path;
+ status = efi_bs_call(locate_device_path, &lf2_proto_guid, &dp, &handle);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_bs_call(handle_protocol, handle, &lf2_proto_guid,
+ (void **)&lf2);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ initrd->size = 0;
+ status = efi_call_proto(lf2, load_file, dp, false, &initrd->size, NULL);
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return EFI_LOAD_ERROR;
+
+ status = efi_allocate_pages(initrd->size, &initrd->base, max);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_call_proto(lf2, load_file, dp, false, &initrd->size,
+ (void *)initrd->base);
+ if (status != EFI_SUCCESS) {
+ efi_free(initrd->size, initrd->base);
+ return EFI_LOAD_ERROR;
+ }
+ return EFI_SUCCESS;
+}
+
+static
+efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
+ struct linux_efi_initrd *initrd,
+ unsigned long soft_limit,
+ unsigned long hard_limit)
+{
+ if (image == NULL)
+ return EFI_UNSUPPORTED;
+
+ return handle_cmdline_files(image, L"initrd=", sizeof(L"initrd=") - 2,
+ soft_limit, hard_limit,
+ &initrd->base, &initrd->size);
+}
+
+/**
+ * efi_load_initrd() - Load initial RAM disk
+ * @image: EFI loaded image protocol
+ * @soft_limit: preferred address for loading the initrd
+ * @hard_limit: upper limit address for loading the initrd
+ *
+ * Return: status code
+ */
+efi_status_t efi_load_initrd(efi_loaded_image_t *image,
+ unsigned long soft_limit,
+ unsigned long hard_limit,
+ const struct linux_efi_initrd **out)
+{
+ efi_guid_t tbl_guid = LINUX_EFI_INITRD_MEDIA_GUID;
+ efi_status_t status = EFI_SUCCESS;
+ struct linux_efi_initrd initrd, *tbl;
+
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD) || efi_noinitrd)
+ return EFI_SUCCESS;
+
+ status = efi_load_initrd_dev_path(&initrd, hard_limit);
+ if (status == EFI_SUCCESS) {
+ efi_info("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n");
+ if (initrd.size > 0 &&
+ efi_measure_tagged_event(initrd.base, initrd.size,
+ EFISTUB_EVT_INITRD) == EFI_SUCCESS)
+ efi_info("Measured initrd data into PCR 9\n");
+ } else if (status == EFI_NOT_FOUND) {
+ status = efi_load_initrd_cmdline(image, &initrd, soft_limit,
+ hard_limit);
+ /* command line loader disabled or no initrd= passed? */
+ if (status == EFI_UNSUPPORTED || status == EFI_NOT_READY)
+ return EFI_SUCCESS;
+ if (status == EFI_SUCCESS)
+ efi_info("Loaded initrd from command line option\n");
+ }
+ if (status != EFI_SUCCESS)
+ goto failed;
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(initrd),
+ (void **)&tbl);
+ if (status != EFI_SUCCESS)
+ goto free_initrd;
+
+ *tbl = initrd;
+ status = efi_bs_call(install_configuration_table, &tbl_guid, tbl);
+ if (status != EFI_SUCCESS)
+ goto free_tbl;
+
+ if (out)
+ *out = tbl;
+ return EFI_SUCCESS;
+
+free_tbl:
+ efi_bs_call(free_pool, tbl);
+free_initrd:
+ efi_free(initrd.size, initrd.base);
+failed:
+ efi_err("Failed to load initrd: 0x%lx\n", status);
+ return status;
+}
+
+/**
+ * efi_wait_for_key() - Wait for key stroke
+ * @usec: number of microseconds to wait for key stroke
+ * @key: key entered
+ *
+ * Wait for up to @usec microseconds for a key stroke.
+ *
+ * Return: status code, EFI_SUCCESS if key received
+ */
+efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key)
+{
+ efi_event_t events[2], timer;
+ unsigned long index;
+ efi_simple_text_input_protocol_t *con_in;
+ efi_status_t status;
+
+ con_in = efi_table_attr(efi_system_table, con_in);
+ if (!con_in)
+ return EFI_UNSUPPORTED;
+ efi_set_event_at(events, 0, efi_table_attr(con_in, wait_for_key));
+
+ status = efi_bs_call(create_event, EFI_EVT_TIMER, 0, NULL, NULL, &timer);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_bs_call(set_timer, timer, EfiTimerRelative,
+ EFI_100NSEC_PER_USEC * usec);
+ if (status != EFI_SUCCESS)
+ return status;
+ efi_set_event_at(events, 1, timer);
+
+ status = efi_bs_call(wait_for_event, 2, events, &index);
+ if (status == EFI_SUCCESS) {
+ if (index == 0)
+ status = efi_call_proto(con_in, read_keystroke, key);
+ else
+ status = EFI_TIMEOUT;
+ }
+
+ efi_bs_call(close_event, timer);
+
+ return status;
+}
+
+/**
+ * efi_remap_image - Remap a loaded image with the appropriate permissions
+ * for code and data
+ *
+ * @image_base: the base of the image in memory
+ * @alloc_size: the size of the area in memory occupied by the image
+ * @code_size: the size of the leading part of the image containing code
+ * and read-only data
+ *
+ * efi_remap_image() uses the EFI memory attribute protocol to remap the code
+ * region of the loaded image read-only/executable, and the remainder
+ * read-write/non-executable. The code region is assumed to start at the base
+ * of the image, and will therefore cover the PE/COFF header as well.
+ */
+void efi_remap_image(unsigned long image_base, unsigned alloc_size,
+ unsigned long code_size)
+{
+ efi_guid_t guid = EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID;
+ efi_memory_attribute_protocol_t *memattr;
+ efi_status_t status;
+ u64 attr;
+
+ /*
+ * If the firmware implements the EFI_MEMORY_ATTRIBUTE_PROTOCOL, let's
+ * invoke it to remap the text/rodata region of the decompressed image
+ * as read-only and the data/bss region as non-executable.
+ */
+ status = efi_bs_call(locate_protocol, &guid, NULL, (void **)&memattr);
+ if (status != EFI_SUCCESS)
+ return;
+
+ // Get the current attributes for the entire region
+ status = memattr->get_memory_attributes(memattr, image_base,
+ alloc_size, &attr);
+ if (status != EFI_SUCCESS) {
+ efi_warn("Failed to retrieve memory attributes for image region: 0x%lx\n",
+ status);
+ return;
+ }
+
+ // Mark the code region as read-only
+ status = memattr->set_memory_attributes(memattr, image_base, code_size,
+ EFI_MEMORY_RO);
+ if (status != EFI_SUCCESS) {
+ efi_warn("Failed to remap code region read-only\n");
+ return;
+ }
+
+ // If the entire region was already mapped as non-exec, clear the
+ // attribute from the code region. Otherwise, set it on the data
+ // region.
+ if (attr & EFI_MEMORY_XP) {
+ status = memattr->clear_memory_attributes(memattr, image_base,
+ code_size,
+ EFI_MEMORY_XP);
+ if (status != EFI_SUCCESS)
+ efi_warn("Failed to remap code region executable\n");
+ } else {
+ status = memattr->set_memory_attributes(memattr,
+ image_base + code_size,
+ alloc_size - code_size,
+ EFI_MEMORY_XP);
+ if (status != EFI_SUCCESS)
+ efi_warn("Failed to remap data region non-executable\n");
+ }
+}
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
new file mode 100644
index 0000000000..f9c1e8a2bd
--- /dev/null
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * EFI stub implementation that is shared by arm and arm64 architectures.
+ * This should be #included by the EFI stub implementation files.
+ *
+ * Copyright (C) 2013,2014 Linaro Limited
+ * Roy Franz <roy.franz@linaro.org
+ * Copyright (C) 2013 Red Hat, Inc.
+ * Mark Salter <msalter@redhat.com>
+ */
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/*
+ * This is the base address at which to start allocating virtual memory ranges
+ * for UEFI Runtime Services.
+ *
+ * For ARM/ARM64:
+ * This is in the low TTBR0 range so that we can use
+ * any allocation we choose, and eliminate the risk of a conflict after kexec.
+ * The value chosen is the largest non-zero power of 2 suitable for this purpose
+ * both on 32-bit and 64-bit ARM CPUs, to maximize the likelihood that it can
+ * be mapped efficiently.
+ * Since 32-bit ARM could potentially execute with a 1G/3G user/kernel split,
+ * map everything below 1 GB. (512 MB is a reasonable upper bound for the
+ * entire footprint of the UEFI runtime services memory regions)
+ *
+ * For RISC-V:
+ * There is no specific reason for which, this address (512MB) can't be used
+ * EFI runtime virtual address for RISC-V. It also helps to use EFI runtime
+ * services on both RV32/RV64. Keep the same runtime virtual address for RISC-V
+ * as well to minimize the code churn.
+ */
+#define EFI_RT_VIRTUAL_BASE SZ_512M
+
+/*
+ * Some architectures map the EFI regions into the kernel's linear map using a
+ * fixed offset.
+ */
+#ifndef EFI_RT_VIRTUAL_OFFSET
+#define EFI_RT_VIRTUAL_OFFSET 0
+#endif
+
+static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
+static bool flat_va_mapping = (EFI_RT_VIRTUAL_OFFSET != 0);
+
+void __weak free_screen_info(struct screen_info *si)
+{
+}
+
+static struct screen_info *setup_graphics(void)
+{
+ efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
+ efi_status_t status;
+ unsigned long size;
+ void **gop_handle = NULL;
+ struct screen_info *si = NULL;
+
+ size = 0;
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &gop_proto, NULL, &size, gop_handle);
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ si = alloc_screen_info();
+ if (!si)
+ return NULL;
+ status = efi_setup_gop(si, &gop_proto, size);
+ if (status != EFI_SUCCESS) {
+ free_screen_info(si);
+ return NULL;
+ }
+ }
+ return si;
+}
+
+static void install_memreserve_table(void)
+{
+ struct linux_efi_memreserve *rsv;
+ efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
+ efi_status_t status;
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
+ (void **)&rsv);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate memreserve entry!\n");
+ return;
+ }
+
+ rsv->next = 0;
+ rsv->size = 0;
+ atomic_set(&rsv->count, 0);
+
+ status = efi_bs_call(install_configuration_table,
+ &memreserve_table_guid, rsv);
+ if (status != EFI_SUCCESS)
+ efi_err("Failed to install memreserve config table!\n");
+}
+
+static u32 get_supported_rt_services(void)
+{
+ const efi_rt_properties_table_t *rt_prop_table;
+ u32 supported = EFI_RT_SUPPORTED_ALL;
+
+ rt_prop_table = get_efi_config_table(EFI_RT_PROPERTIES_TABLE_GUID);
+ if (rt_prop_table)
+ supported &= rt_prop_table->runtime_services_supported;
+
+ return supported;
+}
+
+efi_status_t efi_handle_cmdline(efi_loaded_image_t *image, char **cmdline_ptr)
+{
+ int cmdline_size = 0;
+ efi_status_t status;
+ char *cmdline;
+
+ /*
+ * Get the command line from EFI, using the LOADED_IMAGE
+ * protocol. We are going to copy the command line into the
+ * device tree, so this can be allocated anywhere.
+ */
+ cmdline = efi_convert_cmdline(image, &cmdline_size);
+ if (!cmdline) {
+ efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
+ IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
+ cmdline_size == 0) {
+ status = efi_parse_options(CONFIG_CMDLINE);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail_free_cmdline;
+ }
+ }
+
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0) {
+ status = efi_parse_options(cmdline);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail_free_cmdline;
+ }
+ }
+
+ *cmdline_ptr = cmdline;
+ return EFI_SUCCESS;
+
+fail_free_cmdline:
+ efi_bs_call(free_pool, cmdline_ptr);
+ return status;
+}
+
+efi_status_t efi_stub_common(efi_handle_t handle,
+ efi_loaded_image_t *image,
+ unsigned long image_addr,
+ char *cmdline_ptr)
+{
+ struct screen_info *si;
+ efi_status_t status;
+
+ status = check_platform_features();
+ if (status != EFI_SUCCESS)
+ return status;
+
+ si = setup_graphics();
+
+ efi_retrieve_tpm2_eventlog();
+
+ /* Ask the firmware to clear memory on unclean shutdown */
+ efi_enable_reset_attack_mitigation();
+
+ efi_load_initrd(image, ULONG_MAX, efi_get_max_initrd_addr(image_addr),
+ NULL);
+
+ efi_random_get_seed();
+
+ /* force efi_novamap if SetVirtualAddressMap() is unsupported */
+ efi_novamap |= !(get_supported_rt_services() &
+ EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP);
+
+ install_memreserve_table();
+
+ status = efi_boot_kernel(handle, image, image_addr, cmdline_ptr);
+
+ free_screen_info(si);
+ return status;
+}
+
+/*
+ * efi_allocate_virtmap() - create a pool allocation for the virtmap
+ *
+ * Create an allocation that is of sufficient size to hold all the memory
+ * descriptors that will be passed to SetVirtualAddressMap() to inform the
+ * firmware about the virtual mapping that will be used under the OS to call
+ * into the firmware.
+ */
+efi_status_t efi_alloc_virtmap(efi_memory_desc_t **virtmap,
+ unsigned long *desc_size, u32 *desc_ver)
+{
+ unsigned long size, mmap_key;
+ efi_status_t status;
+
+ /*
+ * Use the size of the current memory map as an upper bound for the
+ * size of the buffer we need to pass to SetVirtualAddressMap() to
+ * cover all EFI_MEMORY_RUNTIME regions.
+ */
+ size = 0;
+ status = efi_bs_call(get_memory_map, &size, NULL, &mmap_key, desc_size,
+ desc_ver);
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return EFI_LOAD_ERROR;
+
+ return efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)virtmap);
+}
+
+/*
+ * efi_get_virtmap() - create a virtual mapping for the EFI memory map
+ *
+ * This function populates the virt_addr fields of all memory region descriptors
+ * in @memory_map whose EFI_MEMORY_RUNTIME attribute is set. Those descriptors
+ * are also copied to @runtime_map, and their total count is returned in @count.
+ */
+void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
+ unsigned long desc_size, efi_memory_desc_t *runtime_map,
+ int *count)
+{
+ u64 efi_virt_base = virtmap_base;
+ efi_memory_desc_t *in, *out = runtime_map;
+ int l;
+
+ *count = 0;
+
+ for (l = 0; l < map_size; l += desc_size) {
+ u64 paddr, size;
+
+ in = (void *)memory_map + l;
+ if (!(in->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+
+ paddr = in->phys_addr;
+ size = in->num_pages * EFI_PAGE_SIZE;
+
+ in->virt_addr = in->phys_addr + EFI_RT_VIRTUAL_OFFSET;
+ if (efi_novamap) {
+ continue;
+ }
+
+ /*
+ * Make the mapping compatible with 64k pages: this allows
+ * a 4k page size kernel to kexec a 64k page size kernel and
+ * vice versa.
+ */
+ if (!flat_va_mapping) {
+
+ paddr = round_down(in->phys_addr, SZ_64K);
+ size += in->phys_addr - paddr;
+
+ /*
+ * Avoid wasting memory on PTEs by choosing a virtual
+ * base that is compatible with section mappings if this
+ * region has the appropriate size and physical
+ * alignment. (Sections are 2 MB on 4k granule kernels)
+ */
+ if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
+ efi_virt_base = round_up(efi_virt_base, SZ_2M);
+ else
+ efi_virt_base = round_up(efi_virt_base, SZ_64K);
+
+ in->virt_addr += efi_virt_base - paddr;
+ efi_virt_base += size;
+ }
+
+ memcpy(out, in, desc_size);
+ out = (void *)out + desc_size;
+ ++*count;
+ }
+}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
new file mode 100644
index 0000000000..212687c30d
--- /dev/null
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -0,0 +1,1154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DRIVERS_FIRMWARE_EFI_EFISTUB_H
+#define _DRIVERS_FIRMWARE_EFI_EFISTUB_H
+
+#include <linux/compiler.h>
+#include <linux/efi.h>
+#include <linux/kernel.h>
+#include <linux/kern_levels.h>
+#include <linux/types.h>
+#include <asm/efi.h>
+
+/*
+ * __init annotations should not be used in the EFI stub, since the code is
+ * either included in the decompressor (x86, ARM) where they have no effect,
+ * or the whole stub is __init annotated at the section level (arm64), by
+ * renaming the sections, in which case the __init annotation will be
+ * redundant, and will result in section names like .init.init.text, and our
+ * linker script does not expect that.
+ */
+#undef __init
+
+/*
+ * Allow the platform to override the allocation granularity: this allows
+ * systems that have the capability to run with a larger page size to deal
+ * with the allocations for initrd and fdt more efficiently.
+ */
+#ifndef EFI_ALLOC_ALIGN
+#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
+#endif
+
+#ifndef EFI_ALLOC_LIMIT
+#define EFI_ALLOC_LIMIT ULONG_MAX
+#endif
+
+extern bool efi_no5lvl;
+extern bool efi_nochunk;
+extern bool efi_nokaslr;
+extern int efi_loglevel;
+extern bool efi_novamap;
+
+extern const efi_system_table_t *efi_system_table;
+
+typedef union efi_dxe_services_table efi_dxe_services_table_t;
+extern const efi_dxe_services_table_t *efi_dxe_table;
+
+efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg);
+
+#ifndef ARCH_HAS_EFISTUB_WRAPPERS
+
+#define efi_is_native() (true)
+#define efi_table_attr(inst, attr) (inst)->attr
+#define efi_fn_call(inst, func, ...) (inst)->func(__VA_ARGS__)
+
+#endif
+
+#define efi_call_proto(inst, func, ...) ({ \
+ __typeof__(inst) __inst = (inst); \
+ efi_fn_call(__inst, func, __inst, ##__VA_ARGS__); \
+})
+#define efi_bs_call(func, ...) \
+ efi_fn_call(efi_table_attr(efi_system_table, boottime), func, ##__VA_ARGS__)
+#define efi_rt_call(func, ...) \
+ efi_fn_call(efi_table_attr(efi_system_table, runtime), func, ##__VA_ARGS__)
+#define efi_dxe_call(func, ...) \
+ efi_fn_call(efi_dxe_table, func, ##__VA_ARGS__)
+
+#define efi_info(fmt, ...) \
+ efi_printk(KERN_INFO fmt, ##__VA_ARGS__)
+#define efi_warn(fmt, ...) \
+ efi_printk(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
+#define efi_err(fmt, ...) \
+ efi_printk(KERN_ERR "ERROR: " fmt, ##__VA_ARGS__)
+#define efi_debug(fmt, ...) \
+ efi_printk(KERN_DEBUG "DEBUG: " fmt, ##__VA_ARGS__)
+
+#define efi_printk_once(fmt, ...) \
+({ \
+ static bool __print_once; \
+ bool __ret_print_once = !__print_once; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ efi_printk(fmt, ##__VA_ARGS__); \
+ } \
+ __ret_print_once; \
+})
+
+#define efi_info_once(fmt, ...) \
+ efi_printk_once(KERN_INFO fmt, ##__VA_ARGS__)
+#define efi_warn_once(fmt, ...) \
+ efi_printk_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
+#define efi_err_once(fmt, ...) \
+ efi_printk_once(KERN_ERR "ERROR: " fmt, ##__VA_ARGS__)
+#define efi_debug_once(fmt, ...) \
+ efi_printk_once(KERN_DEBUG "DEBUG: " fmt, ##__VA_ARGS__)
+
+/* Helper macros for the usual case of using simple C variables: */
+#ifndef fdt_setprop_inplace_var
+#define fdt_setprop_inplace_var(fdt, node_offset, name, var) \
+ fdt_setprop_inplace((fdt), (node_offset), (name), &(var), sizeof(var))
+#endif
+
+#ifndef fdt_setprop_var
+#define fdt_setprop_var(fdt, node_offset, name, var) \
+ fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var))
+#endif
+
+#define get_efi_var(name, vendor, ...) \
+ efi_rt_call(get_variable, (efi_char16_t *)(name), \
+ (efi_guid_t *)(vendor), __VA_ARGS__)
+
+#define set_efi_var(name, vendor, ...) \
+ efi_rt_call(set_variable, (efi_char16_t *)(name), \
+ (efi_guid_t *)(vendor), __VA_ARGS__)
+
+#define efi_get_handle_at(array, idx) \
+ (efi_is_native() ? (array)[idx] \
+ : (efi_handle_t)(unsigned long)((u32 *)(array))[idx])
+
+#define efi_get_handle_num(size) \
+ ((size) / (efi_is_native() ? sizeof(efi_handle_t) : sizeof(u32)))
+
+#define for_each_efi_handle(handle, array, size, i) \
+ for (i = 0; \
+ i < efi_get_handle_num(size) && \
+ ((handle = efi_get_handle_at((array), i)) || true); \
+ i++)
+
+static inline
+void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
+{
+ *lo = lower_32_bits(data);
+ *hi = upper_32_bits(data);
+}
+
+/*
+ * Allocation types for calls to boottime->allocate_pages.
+ */
+#define EFI_ALLOCATE_ANY_PAGES 0
+#define EFI_ALLOCATE_MAX_ADDRESS 1
+#define EFI_ALLOCATE_ADDRESS 2
+#define EFI_MAX_ALLOCATE_TYPE 3
+
+/*
+ * The type of search to perform when calling boottime->locate_handle
+ */
+#define EFI_LOCATE_ALL_HANDLES 0
+#define EFI_LOCATE_BY_REGISTER_NOTIFY 1
+#define EFI_LOCATE_BY_PROTOCOL 2
+
+/*
+ * boottime->stall takes the time period in microseconds
+ */
+#define EFI_USEC_PER_SEC 1000000
+
+/*
+ * boottime->set_timer takes the time in 100ns units
+ */
+#define EFI_100NSEC_PER_USEC ((u64)10)
+
+/*
+ * An efi_boot_memmap is used by efi_get_memory_map() to return the
+ * EFI memory map in a dynamically allocated buffer.
+ *
+ * The buffer allocated for the EFI memory map includes extra room for
+ * a minimum of EFI_MMAP_NR_SLACK_SLOTS additional EFI memory descriptors.
+ * This facilitates the reuse of the EFI memory map buffer when a second
+ * call to ExitBootServices() is needed because of intervening changes to
+ * the EFI memory map. Other related structures, e.g. x86 e820ext, need
+ * to factor in this headroom requirement as well.
+ */
+#define EFI_MMAP_NR_SLACK_SLOTS 8
+
+typedef struct efi_generic_dev_path efi_device_path_protocol_t;
+
+union efi_device_path_to_text_protocol {
+ struct {
+ efi_char16_t *(__efiapi *convert_device_node_to_text)(
+ const efi_device_path_protocol_t *,
+ bool, bool);
+ efi_char16_t *(__efiapi *convert_device_path_to_text)(
+ const efi_device_path_protocol_t *,
+ bool, bool);
+ };
+ struct {
+ u32 convert_device_node_to_text;
+ u32 convert_device_path_to_text;
+ } mixed_mode;
+};
+
+typedef union efi_device_path_to_text_protocol efi_device_path_to_text_protocol_t;
+
+union efi_device_path_from_text_protocol {
+ struct {
+ efi_device_path_protocol_t *
+ (__efiapi *convert_text_to_device_node)(const efi_char16_t *);
+ efi_device_path_protocol_t *
+ (__efiapi *convert_text_to_device_path)(const efi_char16_t *);
+ };
+ struct {
+ u32 convert_text_to_device_node;
+ u32 convert_text_to_device_path;
+ } mixed_mode;
+};
+
+typedef union efi_device_path_from_text_protocol efi_device_path_from_text_protocol_t;
+
+typedef void *efi_event_t;
+/* Note that notifications won't work in mixed mode */
+typedef void (__efiapi *efi_event_notify_t)(efi_event_t, void *);
+
+#define EFI_EVT_TIMER 0x80000000U
+#define EFI_EVT_RUNTIME 0x40000000U
+#define EFI_EVT_NOTIFY_WAIT 0x00000100U
+#define EFI_EVT_NOTIFY_SIGNAL 0x00000200U
+
+/**
+ * efi_set_event_at() - add event to events array
+ *
+ * @events: array of UEFI events
+ * @ids: index where to put the event in the array
+ * @event: event to add to the aray
+ *
+ * boottime->wait_for_event() takes an array of events as input.
+ * Provide a helper to set it up correctly for mixed mode.
+ */
+static inline
+void efi_set_event_at(efi_event_t *events, size_t idx, efi_event_t event)
+{
+ if (efi_is_native())
+ events[idx] = event;
+ else
+ ((u32 *)events)[idx] = (u32)(unsigned long)event;
+}
+
+#define EFI_TPL_APPLICATION 4
+#define EFI_TPL_CALLBACK 8
+#define EFI_TPL_NOTIFY 16
+#define EFI_TPL_HIGH_LEVEL 31
+
+typedef enum {
+ EfiTimerCancel,
+ EfiTimerPeriodic,
+ EfiTimerRelative
+} EFI_TIMER_DELAY;
+
+/*
+ * EFI Boot Services table
+ */
+union efi_boot_services {
+ struct {
+ efi_table_hdr_t hdr;
+ void *raise_tpl;
+ void *restore_tpl;
+ efi_status_t (__efiapi *allocate_pages)(int, int, unsigned long,
+ efi_physical_addr_t *);
+ efi_status_t (__efiapi *free_pages)(efi_physical_addr_t,
+ unsigned long);
+ efi_status_t (__efiapi *get_memory_map)(unsigned long *, void *,
+ unsigned long *,
+ unsigned long *, u32 *);
+ efi_status_t (__efiapi *allocate_pool)(int, unsigned long,
+ void **);
+ efi_status_t (__efiapi *free_pool)(void *);
+ efi_status_t (__efiapi *create_event)(u32, unsigned long,
+ efi_event_notify_t, void *,
+ efi_event_t *);
+ efi_status_t (__efiapi *set_timer)(efi_event_t,
+ EFI_TIMER_DELAY, u64);
+ efi_status_t (__efiapi *wait_for_event)(unsigned long,
+ efi_event_t *,
+ unsigned long *);
+ void *signal_event;
+ efi_status_t (__efiapi *close_event)(efi_event_t);
+ void *check_event;
+ void *install_protocol_interface;
+ void *reinstall_protocol_interface;
+ void *uninstall_protocol_interface;
+ efi_status_t (__efiapi *handle_protocol)(efi_handle_t,
+ efi_guid_t *, void **);
+ void *__reserved;
+ void *register_protocol_notify;
+ efi_status_t (__efiapi *locate_handle)(int, efi_guid_t *,
+ void *, unsigned long *,
+ efi_handle_t *);
+ efi_status_t (__efiapi *locate_device_path)(efi_guid_t *,
+ efi_device_path_protocol_t **,
+ efi_handle_t *);
+ efi_status_t (__efiapi *install_configuration_table)(efi_guid_t *,
+ void *);
+ efi_status_t (__efiapi *load_image)(bool, efi_handle_t,
+ efi_device_path_protocol_t *,
+ void *, unsigned long,
+ efi_handle_t *);
+ efi_status_t (__efiapi *start_image)(efi_handle_t, unsigned long *,
+ efi_char16_t **);
+ efi_status_t __noreturn (__efiapi *exit)(efi_handle_t,
+ efi_status_t,
+ unsigned long,
+ efi_char16_t *);
+ efi_status_t (__efiapi *unload_image)(efi_handle_t);
+ efi_status_t (__efiapi *exit_boot_services)(efi_handle_t,
+ unsigned long);
+ void *get_next_monotonic_count;
+ efi_status_t (__efiapi *stall)(unsigned long);
+ void *set_watchdog_timer;
+ void *connect_controller;
+ efi_status_t (__efiapi *disconnect_controller)(efi_handle_t,
+ efi_handle_t,
+ efi_handle_t);
+ void *open_protocol;
+ void *close_protocol;
+ void *open_protocol_information;
+ void *protocols_per_handle;
+ void *locate_handle_buffer;
+ efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *,
+ void **);
+ efi_status_t (__efiapi *install_multiple_protocol_interfaces)(efi_handle_t *, ...);
+ efi_status_t (__efiapi *uninstall_multiple_protocol_interfaces)(efi_handle_t, ...);
+ void *calculate_crc32;
+ void (__efiapi *copy_mem)(void *, const void *, unsigned long);
+ void (__efiapi *set_mem)(void *, unsigned long, unsigned char);
+ void *create_event_ex;
+ };
+ struct {
+ efi_table_hdr_t hdr;
+ u32 raise_tpl;
+ u32 restore_tpl;
+ u32 allocate_pages;
+ u32 free_pages;
+ u32 get_memory_map;
+ u32 allocate_pool;
+ u32 free_pool;
+ u32 create_event;
+ u32 set_timer;
+ u32 wait_for_event;
+ u32 signal_event;
+ u32 close_event;
+ u32 check_event;
+ u32 install_protocol_interface;
+ u32 reinstall_protocol_interface;
+ u32 uninstall_protocol_interface;
+ u32 handle_protocol;
+ u32 __reserved;
+ u32 register_protocol_notify;
+ u32 locate_handle;
+ u32 locate_device_path;
+ u32 install_configuration_table;
+ u32 load_image;
+ u32 start_image;
+ u32 exit;
+ u32 unload_image;
+ u32 exit_boot_services;
+ u32 get_next_monotonic_count;
+ u32 stall;
+ u32 set_watchdog_timer;
+ u32 connect_controller;
+ u32 disconnect_controller;
+ u32 open_protocol;
+ u32 close_protocol;
+ u32 open_protocol_information;
+ u32 protocols_per_handle;
+ u32 locate_handle_buffer;
+ u32 locate_protocol;
+ u32 install_multiple_protocol_interfaces;
+ u32 uninstall_multiple_protocol_interfaces;
+ u32 calculate_crc32;
+ u32 copy_mem;
+ u32 set_mem;
+ u32 create_event_ex;
+ } mixed_mode;
+};
+
+typedef enum {
+ EfiGcdMemoryTypeNonExistent,
+ EfiGcdMemoryTypeReserved,
+ EfiGcdMemoryTypeSystemMemory,
+ EfiGcdMemoryTypeMemoryMappedIo,
+ EfiGcdMemoryTypePersistent,
+ EfiGcdMemoryTypeMoreReliable,
+ EfiGcdMemoryTypeMaximum
+} efi_gcd_memory_type_t;
+
+typedef struct {
+ efi_physical_addr_t base_address;
+ u64 length;
+ u64 capabilities;
+ u64 attributes;
+ efi_gcd_memory_type_t gcd_memory_type;
+ void *image_handle;
+ void *device_handle;
+} efi_gcd_memory_space_desc_t;
+
+/*
+ * EFI DXE Services table
+ */
+union efi_dxe_services_table {
+ struct {
+ efi_table_hdr_t hdr;
+ void *add_memory_space;
+ void *allocate_memory_space;
+ void *free_memory_space;
+ void *remove_memory_space;
+ efi_status_t (__efiapi *get_memory_space_descriptor)(efi_physical_addr_t,
+ efi_gcd_memory_space_desc_t *);
+ efi_status_t (__efiapi *set_memory_space_attributes)(efi_physical_addr_t,
+ u64, u64);
+ void *get_memory_space_map;
+ void *add_io_space;
+ void *allocate_io_space;
+ void *free_io_space;
+ void *remove_io_space;
+ void *get_io_space_descriptor;
+ void *get_io_space_map;
+ void *dispatch;
+ void *schedule;
+ void *trust;
+ void *process_firmware_volume;
+ void *set_memory_space_capabilities;
+ };
+ struct {
+ efi_table_hdr_t hdr;
+ u32 add_memory_space;
+ u32 allocate_memory_space;
+ u32 free_memory_space;
+ u32 remove_memory_space;
+ u32 get_memory_space_descriptor;
+ u32 set_memory_space_attributes;
+ u32 get_memory_space_map;
+ u32 add_io_space;
+ u32 allocate_io_space;
+ u32 free_io_space;
+ u32 remove_io_space;
+ u32 get_io_space_descriptor;
+ u32 get_io_space_map;
+ u32 dispatch;
+ u32 schedule;
+ u32 trust;
+ u32 process_firmware_volume;
+ u32 set_memory_space_capabilities;
+ } mixed_mode;
+};
+
+typedef union efi_memory_attribute_protocol efi_memory_attribute_protocol_t;
+
+union efi_memory_attribute_protocol {
+ struct {
+ efi_status_t (__efiapi *get_memory_attributes)(
+ efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64 *);
+
+ efi_status_t (__efiapi *set_memory_attributes)(
+ efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64);
+
+ efi_status_t (__efiapi *clear_memory_attributes)(
+ efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64);
+ };
+ struct {
+ u32 get_memory_attributes;
+ u32 set_memory_attributes;
+ u32 clear_memory_attributes;
+ } mixed_mode;
+};
+
+typedef union efi_uga_draw_protocol efi_uga_draw_protocol_t;
+
+union efi_uga_draw_protocol {
+ struct {
+ efi_status_t (__efiapi *get_mode)(efi_uga_draw_protocol_t *,
+ u32*, u32*, u32*, u32*);
+ void *set_mode;
+ void *blt;
+ };
+ struct {
+ u32 get_mode;
+ u32 set_mode;
+ u32 blt;
+ } mixed_mode;
+};
+
+typedef struct {
+ u16 scan_code;
+ efi_char16_t unicode_char;
+} efi_input_key_t;
+
+union efi_simple_text_input_protocol {
+ struct {
+ void *reset;
+ efi_status_t (__efiapi *read_keystroke)(efi_simple_text_input_protocol_t *,
+ efi_input_key_t *);
+ efi_event_t wait_for_key;
+ };
+ struct {
+ u32 reset;
+ u32 read_keystroke;
+ u32 wait_for_key;
+ } mixed_mode;
+};
+
+efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key);
+
+union efi_simple_text_output_protocol {
+ struct {
+ void *reset;
+ efi_status_t (__efiapi *output_string)(efi_simple_text_output_protocol_t *,
+ efi_char16_t *);
+ void *test_string;
+ };
+ struct {
+ u32 reset;
+ u32 output_string;
+ u32 test_string;
+ } mixed_mode;
+};
+
+#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0
+#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR 1
+#define PIXEL_BIT_MASK 2
+#define PIXEL_BLT_ONLY 3
+#define PIXEL_FORMAT_MAX 4
+
+typedef struct {
+ u32 red_mask;
+ u32 green_mask;
+ u32 blue_mask;
+ u32 reserved_mask;
+} efi_pixel_bitmask_t;
+
+typedef struct {
+ u32 version;
+ u32 horizontal_resolution;
+ u32 vertical_resolution;
+ int pixel_format;
+ efi_pixel_bitmask_t pixel_information;
+ u32 pixels_per_scan_line;
+} efi_graphics_output_mode_info_t;
+
+typedef union efi_graphics_output_protocol_mode efi_graphics_output_protocol_mode_t;
+
+union efi_graphics_output_protocol_mode {
+ struct {
+ u32 max_mode;
+ u32 mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long size_of_info;
+ efi_physical_addr_t frame_buffer_base;
+ unsigned long frame_buffer_size;
+ };
+ struct {
+ u32 max_mode;
+ u32 mode;
+ u32 info;
+ u32 size_of_info;
+ u64 frame_buffer_base;
+ u32 frame_buffer_size;
+ } mixed_mode;
+};
+
+typedef union efi_graphics_output_protocol efi_graphics_output_protocol_t;
+
+union efi_graphics_output_protocol {
+ struct {
+ efi_status_t (__efiapi *query_mode)(efi_graphics_output_protocol_t *,
+ u32, unsigned long *,
+ efi_graphics_output_mode_info_t **);
+ efi_status_t (__efiapi *set_mode) (efi_graphics_output_protocol_t *, u32);
+ void *blt;
+ efi_graphics_output_protocol_mode_t *mode;
+ };
+ struct {
+ u32 query_mode;
+ u32 set_mode;
+ u32 blt;
+ u32 mode;
+ } mixed_mode;
+};
+
+typedef union {
+ struct {
+ u32 revision;
+ efi_handle_t parent_handle;
+ efi_system_table_t *system_table;
+ efi_handle_t device_handle;
+ void *file_path;
+ void *reserved;
+ u32 load_options_size;
+ void *load_options;
+ void *image_base;
+ __aligned_u64 image_size;
+ unsigned int image_code_type;
+ unsigned int image_data_type;
+ efi_status_t (__efiapi *unload)(efi_handle_t image_handle);
+ };
+ struct {
+ u32 revision;
+ u32 parent_handle;
+ u32 system_table;
+ u32 device_handle;
+ u32 file_path;
+ u32 reserved;
+ u32 load_options_size;
+ u32 load_options;
+ u32 image_base;
+ __aligned_u64 image_size;
+ u32 image_code_type;
+ u32 image_data_type;
+ u32 unload;
+ } mixed_mode;
+} efi_loaded_image_t;
+
+typedef struct {
+ u64 size;
+ u64 file_size;
+ u64 phys_size;
+ efi_time_t create_time;
+ efi_time_t last_access_time;
+ efi_time_t modification_time;
+ __aligned_u64 attribute;
+ efi_char16_t filename[];
+} efi_file_info_t;
+
+typedef union efi_file_protocol efi_file_protocol_t;
+
+union efi_file_protocol {
+ struct {
+ u64 revision;
+ efi_status_t (__efiapi *open) (efi_file_protocol_t *,
+ efi_file_protocol_t **,
+ efi_char16_t *, u64,
+ u64);
+ efi_status_t (__efiapi *close) (efi_file_protocol_t *);
+ efi_status_t (__efiapi *delete) (efi_file_protocol_t *);
+ efi_status_t (__efiapi *read) (efi_file_protocol_t *,
+ unsigned long *,
+ void *);
+ efi_status_t (__efiapi *write) (efi_file_protocol_t *,
+ unsigned long, void *);
+ efi_status_t (__efiapi *get_position)(efi_file_protocol_t *,
+ u64 *);
+ efi_status_t (__efiapi *set_position)(efi_file_protocol_t *,
+ u64);
+ efi_status_t (__efiapi *get_info) (efi_file_protocol_t *,
+ efi_guid_t *,
+ unsigned long *,
+ void *);
+ efi_status_t (__efiapi *set_info) (efi_file_protocol_t *,
+ efi_guid_t *,
+ unsigned long,
+ void *);
+ efi_status_t (__efiapi *flush) (efi_file_protocol_t *);
+ };
+ struct {
+ u64 revision;
+ u32 open;
+ u32 close;
+ u32 delete;
+ u32 read;
+ u32 write;
+ u32 get_position;
+ u32 set_position;
+ u32 get_info;
+ u32 set_info;
+ u32 flush;
+ } mixed_mode;
+};
+
+typedef union efi_simple_file_system_protocol efi_simple_file_system_protocol_t;
+
+union efi_simple_file_system_protocol {
+ struct {
+ u64 revision;
+ efi_status_t (__efiapi *open_volume)(efi_simple_file_system_protocol_t *,
+ efi_file_protocol_t **);
+ };
+ struct {
+ u64 revision;
+ u32 open_volume;
+ } mixed_mode;
+};
+
+#define EFI_FILE_MODE_READ 0x0000000000000001
+#define EFI_FILE_MODE_WRITE 0x0000000000000002
+#define EFI_FILE_MODE_CREATE 0x8000000000000000
+
+typedef enum {
+ EfiPciIoWidthUint8,
+ EfiPciIoWidthUint16,
+ EfiPciIoWidthUint32,
+ EfiPciIoWidthUint64,
+ EfiPciIoWidthFifoUint8,
+ EfiPciIoWidthFifoUint16,
+ EfiPciIoWidthFifoUint32,
+ EfiPciIoWidthFifoUint64,
+ EfiPciIoWidthFillUint8,
+ EfiPciIoWidthFillUint16,
+ EfiPciIoWidthFillUint32,
+ EfiPciIoWidthFillUint64,
+ EfiPciIoWidthMaximum
+} EFI_PCI_IO_PROTOCOL_WIDTH;
+
+typedef enum {
+ EfiPciIoAttributeOperationGet,
+ EfiPciIoAttributeOperationSet,
+ EfiPciIoAttributeOperationEnable,
+ EfiPciIoAttributeOperationDisable,
+ EfiPciIoAttributeOperationSupported,
+ EfiPciIoAttributeOperationMaximum
+} EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION;
+
+typedef struct {
+ u32 read;
+ u32 write;
+} efi_pci_io_protocol_access_32_t;
+
+typedef union efi_pci_io_protocol efi_pci_io_protocol_t;
+
+typedef
+efi_status_t (__efiapi *efi_pci_io_protocol_cfg_t)(efi_pci_io_protocol_t *,
+ EFI_PCI_IO_PROTOCOL_WIDTH,
+ u32 offset,
+ unsigned long count,
+ void *buffer);
+
+typedef struct {
+ void *read;
+ void *write;
+} efi_pci_io_protocol_access_t;
+
+typedef struct {
+ efi_pci_io_protocol_cfg_t read;
+ efi_pci_io_protocol_cfg_t write;
+} efi_pci_io_protocol_config_access_t;
+
+union efi_pci_io_protocol {
+ struct {
+ void *poll_mem;
+ void *poll_io;
+ efi_pci_io_protocol_access_t mem;
+ efi_pci_io_protocol_access_t io;
+ efi_pci_io_protocol_config_access_t pci;
+ void *copy_mem;
+ void *map;
+ void *unmap;
+ void *allocate_buffer;
+ void *free_buffer;
+ void *flush;
+ efi_status_t (__efiapi *get_location)(efi_pci_io_protocol_t *,
+ unsigned long *segment_nr,
+ unsigned long *bus_nr,
+ unsigned long *device_nr,
+ unsigned long *func_nr);
+ void *attributes;
+ void *get_bar_attributes;
+ void *set_bar_attributes;
+ uint64_t romsize;
+ void *romimage;
+ };
+ struct {
+ u32 poll_mem;
+ u32 poll_io;
+ efi_pci_io_protocol_access_32_t mem;
+ efi_pci_io_protocol_access_32_t io;
+ efi_pci_io_protocol_access_32_t pci;
+ u32 copy_mem;
+ u32 map;
+ u32 unmap;
+ u32 allocate_buffer;
+ u32 free_buffer;
+ u32 flush;
+ u32 get_location;
+ u32 attributes;
+ u32 get_bar_attributes;
+ u32 set_bar_attributes;
+ u64 romsize;
+ u32 romimage;
+ } mixed_mode;
+};
+
+#define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001
+#define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002
+#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO 0x0004
+#define EFI_PCI_IO_ATTRIBUTE_VGA_MEMORY 0x0008
+#define EFI_PCI_IO_ATTRIBUTE_VGA_IO 0x0010
+#define EFI_PCI_IO_ATTRIBUTE_IDE_PRIMARY_IO 0x0020
+#define EFI_PCI_IO_ATTRIBUTE_IDE_SECONDARY_IO 0x0040
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY_WRITE_COMBINE 0x0080
+#define EFI_PCI_IO_ATTRIBUTE_IO 0x0100
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY 0x0200
+#define EFI_PCI_IO_ATTRIBUTE_BUS_MASTER 0x0400
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY_CACHED 0x0800
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY_DISABLE 0x1000
+#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_DEVICE 0x2000
+#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM 0x4000
+#define EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE 0x8000
+#define EFI_PCI_IO_ATTRIBUTE_ISA_IO_16 0x10000
+#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000
+#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000
+
+struct efi_dev_path;
+
+typedef union apple_properties_protocol apple_properties_protocol_t;
+
+union apple_properties_protocol {
+ struct {
+ unsigned long version;
+ efi_status_t (__efiapi *get)(apple_properties_protocol_t *,
+ struct efi_dev_path *,
+ efi_char16_t *, void *, u32 *);
+ efi_status_t (__efiapi *set)(apple_properties_protocol_t *,
+ struct efi_dev_path *,
+ efi_char16_t *, void *, u32);
+ efi_status_t (__efiapi *del)(apple_properties_protocol_t *,
+ struct efi_dev_path *,
+ efi_char16_t *);
+ efi_status_t (__efiapi *get_all)(apple_properties_protocol_t *,
+ void *buffer, u32 *);
+ };
+ struct {
+ u32 version;
+ u32 get;
+ u32 set;
+ u32 del;
+ u32 get_all;
+ } mixed_mode;
+};
+
+typedef u32 efi_tcg2_event_log_format;
+
+#define INITRD_EVENT_TAG_ID 0x8F3B22ECU
+#define LOAD_OPTIONS_EVENT_TAG_ID 0x8F3B22EDU
+#define EV_EVENT_TAG 0x00000006U
+#define EFI_TCG2_EVENT_HEADER_VERSION 0x1
+
+struct efi_tcg2_event {
+ u32 event_size;
+ struct {
+ u32 header_size;
+ u16 header_version;
+ u32 pcr_index;
+ u32 event_type;
+ } __packed event_header;
+ /* u8[] event follows here */
+} __packed;
+
+struct efi_tcg2_tagged_event {
+ u32 tagged_event_id;
+ u32 tagged_event_data_size;
+ /* u8 tagged event data follows here */
+} __packed;
+
+typedef struct efi_tcg2_event efi_tcg2_event_t;
+typedef struct efi_tcg2_tagged_event efi_tcg2_tagged_event_t;
+typedef union efi_tcg2_protocol efi_tcg2_protocol_t;
+
+union efi_tcg2_protocol {
+ struct {
+ void *get_capability;
+ efi_status_t (__efiapi *get_event_log)(efi_tcg2_protocol_t *,
+ efi_tcg2_event_log_format,
+ efi_physical_addr_t *,
+ efi_physical_addr_t *,
+ efi_bool_t *);
+ efi_status_t (__efiapi *hash_log_extend_event)(efi_tcg2_protocol_t *,
+ u64,
+ efi_physical_addr_t,
+ u64,
+ const efi_tcg2_event_t *);
+ void *submit_command;
+ void *get_active_pcr_banks;
+ void *set_active_pcr_banks;
+ void *get_result_of_set_active_pcr_banks;
+ };
+ struct {
+ u32 get_capability;
+ u32 get_event_log;
+ u32 hash_log_extend_event;
+ u32 submit_command;
+ u32 get_active_pcr_banks;
+ u32 set_active_pcr_banks;
+ u32 get_result_of_set_active_pcr_banks;
+ } mixed_mode;
+};
+
+struct riscv_efi_boot_protocol {
+ u64 revision;
+
+ efi_status_t (__efiapi *get_boot_hartid)(struct riscv_efi_boot_protocol *,
+ unsigned long *boot_hartid);
+};
+
+typedef union efi_load_file_protocol efi_load_file_protocol_t;
+typedef union efi_load_file_protocol efi_load_file2_protocol_t;
+
+union efi_load_file_protocol {
+ struct {
+ efi_status_t (__efiapi *load_file)(efi_load_file_protocol_t *,
+ efi_device_path_protocol_t *,
+ bool, unsigned long *, void *);
+ };
+ struct {
+ u32 load_file;
+ } mixed_mode;
+};
+
+typedef struct {
+ u32 attributes;
+ u16 file_path_list_length;
+ u8 variable_data[];
+ // efi_char16_t description[];
+ // efi_device_path_protocol_t file_path_list[];
+ // u8 optional_data[];
+} __packed efi_load_option_t;
+
+#define EFI_LOAD_OPTION_ACTIVE 0x0001U
+#define EFI_LOAD_OPTION_FORCE_RECONNECT 0x0002U
+#define EFI_LOAD_OPTION_HIDDEN 0x0008U
+#define EFI_LOAD_OPTION_CATEGORY 0x1f00U
+#define EFI_LOAD_OPTION_CATEGORY_BOOT 0x0000U
+#define EFI_LOAD_OPTION_CATEGORY_APP 0x0100U
+
+#define EFI_LOAD_OPTION_BOOT_MASK \
+ (EFI_LOAD_OPTION_ACTIVE|EFI_LOAD_OPTION_HIDDEN|EFI_LOAD_OPTION_CATEGORY)
+#define EFI_LOAD_OPTION_MASK (EFI_LOAD_OPTION_FORCE_RECONNECT|EFI_LOAD_OPTION_BOOT_MASK)
+
+typedef struct {
+ u32 attributes;
+ u16 file_path_list_length;
+ const efi_char16_t *description;
+ const efi_device_path_protocol_t *file_path_list;
+ u32 optional_data_size;
+ const void *optional_data;
+} efi_load_option_unpacked_t;
+
+void efi_pci_disable_bridge_busmaster(void);
+
+typedef efi_status_t (*efi_exit_boot_map_processing)(
+ struct efi_boot_memmap *map,
+ void *priv);
+
+efi_status_t efi_exit_boot_services(void *handle, void *priv,
+ efi_exit_boot_map_processing priv_func);
+
+efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
+ unsigned long kernel_addr, char *cmdline_ptr);
+
+void *get_fdt(unsigned long *fdt_size);
+
+efi_status_t efi_alloc_virtmap(efi_memory_desc_t **virtmap,
+ unsigned long *desc_size, u32 *desc_ver);
+void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
+ unsigned long desc_size, efi_memory_desc_t *runtime_map,
+ int *count);
+
+efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
+
+efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long random_seed,
+ int memory_type, unsigned long alloc_limit);
+
+efi_status_t efi_random_get_seed(void);
+
+efi_status_t check_platform_features(void);
+
+void *get_efi_config_table(efi_guid_t guid);
+
+/* NOTE: These functions do not print a trailing newline after the string */
+void efi_char16_puts(efi_char16_t *);
+void efi_puts(const char *str);
+
+__printf(1, 2) int efi_printk(char const *fmt, ...);
+
+void efi_free(unsigned long size, unsigned long addr);
+
+void efi_apply_loadoptions_quirk(const void **load_options, u32 *load_options_size);
+
+char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len);
+
+efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
+ bool install_cfg_tbl);
+
+efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
+ unsigned long max);
+
+efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
+ unsigned long max, unsigned long align,
+ int memory_type);
+
+efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long min);
+
+efi_status_t efi_relocate_kernel(unsigned long *image_addr,
+ unsigned long image_size,
+ unsigned long alloc_size,
+ unsigned long preferred_addr,
+ unsigned long alignment,
+ unsigned long min_addr);
+
+efi_status_t efi_parse_options(char const *cmdline);
+
+void efi_parse_option_graphics(char *option);
+
+efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
+ unsigned long size);
+
+efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
+ const efi_char16_t *optstr,
+ int optstr_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit,
+ unsigned long *load_addr,
+ unsigned long *load_size);
+
+
+static inline efi_status_t efi_load_dtb(efi_loaded_image_t *image,
+ unsigned long *load_addr,
+ unsigned long *load_size)
+{
+ return handle_cmdline_files(image, L"dtb=", sizeof(L"dtb=") - 2,
+ ULONG_MAX, ULONG_MAX, load_addr, load_size);
+}
+
+efi_status_t efi_load_initrd(efi_loaded_image_t *image,
+ unsigned long soft_limit,
+ unsigned long hard_limit,
+ const struct linux_efi_initrd **out);
+/*
+ * This function handles the architcture specific differences between arm and
+ * arm64 regarding where the kernel image must be loaded and any memory that
+ * must be reserved. On failure it is required to free all
+ * all allocations it has made.
+ */
+efi_status_t handle_kernel_image(unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ efi_loaded_image_t *image,
+ efi_handle_t image_handle);
+
+/* shared entrypoint between the normal stub and the zboot stub */
+efi_status_t efi_stub_common(efi_handle_t handle,
+ efi_loaded_image_t *image,
+ unsigned long image_addr,
+ char *cmdline_ptr);
+
+efi_status_t efi_handle_cmdline(efi_loaded_image_t *image, char **cmdline_ptr);
+
+asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
+ unsigned long fdt_addr,
+ unsigned long fdt_size);
+
+void efi_handle_post_ebs_state(void);
+
+enum efi_secureboot_mode efi_get_secureboot(void);
+
+#ifdef CONFIG_RESET_ATTACK_MITIGATION
+void efi_enable_reset_attack_mitigation(void);
+#else
+static inline void
+efi_enable_reset_attack_mitigation(void) { }
+#endif
+
+void efi_retrieve_tpm2_eventlog(void);
+
+struct screen_info *alloc_screen_info(void);
+struct screen_info *__alloc_screen_info(void);
+void free_screen_info(struct screen_info *si);
+
+void efi_cache_sync_image(unsigned long image_base,
+ unsigned long alloc_size);
+
+struct efi_smbios_record {
+ u8 type;
+ u8 length;
+ u16 handle;
+};
+
+const struct efi_smbios_record *efi_get_smbios_record(u8 type);
+
+struct efi_smbios_type1_record {
+ struct efi_smbios_record header;
+
+ u8 manufacturer;
+ u8 product_name;
+ u8 version;
+ u8 serial_number;
+ efi_guid_t uuid;
+ u8 wakeup_type;
+ u8 sku_number;
+ u8 family;
+};
+
+struct efi_smbios_type4_record {
+ struct efi_smbios_record header;
+
+ u8 socket;
+ u8 processor_type;
+ u8 processor_family;
+ u8 processor_manufacturer;
+ u8 processor_id[8];
+ u8 processor_version;
+ u8 voltage;
+ u16 external_clock;
+ u16 max_speed;
+ u16 current_speed;
+ u8 status;
+ u8 processor_upgrade;
+ u16 l1_cache_handle;
+ u16 l2_cache_handle;
+ u16 l3_cache_handle;
+ u8 serial_number;
+ u8 asset_tag;
+ u8 part_number;
+ u8 core_count;
+ u8 enabled_core_count;
+ u8 thread_count;
+ u16 processor_characteristics;
+ u16 processor_family2;
+ u16 core_count2;
+ u16 enabled_core_count2;
+ u16 thread_count2;
+ u16 thread_enabled;
+};
+
+#define efi_get_smbios_string(__record, __type, __name) ({ \
+ int off = offsetof(struct efi_smbios_type ## __type ## _record, \
+ __name); \
+ __efi_get_smbios_string((__record), __type, off); \
+})
+
+const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
+ u8 type, int offset);
+
+void efi_remap_image(unsigned long image_base, unsigned alloc_size,
+ unsigned long code_size);
+efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ unsigned long kernel_size,
+ unsigned long kernel_codesize,
+ unsigned long kernel_memsize,
+ u32 phys_seed);
+u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle);
+
+asmlinkage efi_status_t __efiapi
+efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab);
+
+efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc,
+ struct efi_boot_memmap *map);
+void process_unaccepted_memory(u64 start, u64 end);
+void accept_memory(phys_addr_t start, phys_addr_t end);
+void arch_accept_memory(phys_addr_t start, phys_addr_t end);
+
+#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
new file mode 100644
index 0000000000..70e9789ff9
--- /dev/null
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FDT related Helper functions used by the EFI stub on multiple
+ * architectures. This should be #included by the EFI stub
+ * implementation files.
+ *
+ * Copyright 2013 Linaro Limited; author Roy Franz
+ */
+
+#include <linux/efi.h>
+#include <linux/libfdt.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+#define EFI_DT_ADDR_CELLS_DEFAULT 2
+#define EFI_DT_SIZE_CELLS_DEFAULT 2
+
+static void fdt_update_cell_size(void *fdt)
+{
+ int offset;
+
+ offset = fdt_path_offset(fdt, "/");
+ /* Set the #address-cells and #size-cells values for an empty tree */
+
+ fdt_setprop_u32(fdt, offset, "#address-cells", EFI_DT_ADDR_CELLS_DEFAULT);
+ fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT);
+}
+
+static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
+ void *fdt, int new_fdt_size, char *cmdline_ptr)
+{
+ int node, num_rsv;
+ int status;
+ u32 fdt_val32;
+ u64 fdt_val64;
+
+ /* Do some checks on provided FDT, if it exists: */
+ if (orig_fdt) {
+ if (fdt_check_header(orig_fdt)) {
+ efi_err("Device Tree header not valid!\n");
+ return EFI_LOAD_ERROR;
+ }
+ /*
+ * We don't get the size of the FDT if we get if from a
+ * configuration table:
+ */
+ if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) {
+ efi_err("Truncated device tree! foo!\n");
+ return EFI_LOAD_ERROR;
+ }
+ }
+
+ if (orig_fdt) {
+ status = fdt_open_into(orig_fdt, fdt, new_fdt_size);
+ } else {
+ status = fdt_create_empty_tree(fdt, new_fdt_size);
+ if (status == 0) {
+ /*
+ * Any failure from the following function is
+ * non-critical:
+ */
+ fdt_update_cell_size(fdt);
+ }
+ }
+
+ if (status != 0)
+ goto fdt_set_fail;
+
+ /*
+ * Delete all memory reserve map entries. When booting via UEFI,
+ * kernel will use the UEFI memory map to find reserved regions.
+ */
+ num_rsv = fdt_num_mem_rsv(fdt);
+ while (num_rsv-- > 0)
+ fdt_del_mem_rsv(fdt, num_rsv);
+
+ node = fdt_subnode_offset(fdt, 0, "chosen");
+ if (node < 0) {
+ node = fdt_add_subnode(fdt, 0, "chosen");
+ if (node < 0) {
+ /* 'node' is an error code when negative: */
+ status = node;
+ goto fdt_set_fail;
+ }
+ }
+
+ if (cmdline_ptr != NULL && strlen(cmdline_ptr) > 0) {
+ status = fdt_setprop(fdt, node, "bootargs", cmdline_ptr,
+ strlen(cmdline_ptr) + 1);
+ if (status)
+ goto fdt_set_fail;
+ }
+
+ /* Add FDT entries for EFI runtime services in chosen node. */
+ node = fdt_subnode_offset(fdt, 0, "chosen");
+ fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table);
+
+ status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64);
+ if (status)
+ goto fdt_set_fail;
+
+ fdt_val64 = U64_MAX; /* placeholder */
+
+ status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-start", fdt_val64);
+ if (status)
+ goto fdt_set_fail;
+
+ fdt_val32 = U32_MAX; /* placeholder */
+
+ status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-size", fdt_val32);
+ if (status)
+ goto fdt_set_fail;
+
+ status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32);
+ if (status)
+ goto fdt_set_fail;
+
+ status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32);
+ if (status)
+ goto fdt_set_fail;
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && !efi_nokaslr) {
+ efi_status_t efi_status;
+
+ efi_status = efi_get_random_bytes(sizeof(fdt_val64),
+ (u8 *)&fdt_val64);
+ if (efi_status == EFI_SUCCESS) {
+ status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64);
+ if (status)
+ goto fdt_set_fail;
+ }
+ }
+
+ /* Shrink the FDT back to its minimum size: */
+ fdt_pack(fdt);
+
+ return EFI_SUCCESS;
+
+fdt_set_fail:
+ if (status == -FDT_ERR_NOSPACE)
+ return EFI_BUFFER_TOO_SMALL;
+
+ return EFI_LOAD_ERROR;
+}
+
+static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
+{
+ int node = fdt_path_offset(fdt, "/chosen");
+ u64 fdt_val64;
+ u32 fdt_val32;
+ int err;
+
+ if (node < 0)
+ return EFI_LOAD_ERROR;
+
+ fdt_val64 = cpu_to_fdt64((unsigned long)map->map);
+
+ err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-start", fdt_val64);
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ fdt_val32 = cpu_to_fdt32(map->map_size);
+
+ err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-size", fdt_val32);
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ fdt_val32 = cpu_to_fdt32(map->desc_size);
+
+ err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32);
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ fdt_val32 = cpu_to_fdt32(map->desc_ver);
+
+ err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32);
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ return EFI_SUCCESS;
+}
+
+struct exit_boot_struct {
+ struct efi_boot_memmap *boot_memmap;
+ efi_memory_desc_t *runtime_map;
+ int runtime_entry_count;
+ void *new_fdt_addr;
+};
+
+static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
+{
+ struct exit_boot_struct *p = priv;
+
+ p->boot_memmap = map;
+
+ /*
+ * Update the memory map with virtual addresses. The function will also
+ * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
+ * entries so that we can pass it straight to SetVirtualAddressMap()
+ */
+ efi_get_virtmap(map->map, map->map_size, map->desc_size,
+ p->runtime_map, &p->runtime_entry_count);
+
+ return update_fdt_memmap(p->new_fdt_addr, map);
+}
+
+#ifndef MAX_FDT_SIZE
+# define MAX_FDT_SIZE SZ_2M
+#endif
+
+/*
+ * Allocate memory for a new FDT, then add EFI and commandline related fields
+ * to the FDT. This routine increases the FDT allocation size until the
+ * allocated memory is large enough. EFI allocations are in EFI_PAGE_SIZE
+ * granules, which are fixed at 4K bytes, so in most cases the first allocation
+ * should succeed. EFI boot services are exited at the end of this function.
+ * There must be no allocations between the get_memory_map() call and the
+ * exit_boot_services() call, so the exiting of boot services is very tightly
+ * tied to the creation of the FDT with the final memory map in it.
+ */
+static
+efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
+ efi_loaded_image_t *image,
+ unsigned long *new_fdt_addr,
+ char *cmdline_ptr)
+{
+ unsigned long desc_size;
+ u32 desc_ver;
+ efi_status_t status;
+ struct exit_boot_struct priv;
+ unsigned long fdt_addr = 0;
+ unsigned long fdt_size = 0;
+
+ if (!efi_novamap) {
+ status = efi_alloc_virtmap(&priv.runtime_map, &desc_size,
+ &desc_ver);
+ if (status != EFI_SUCCESS) {
+ efi_err("Unable to retrieve UEFI memory map.\n");
+ return status;
+ }
+ }
+
+ /*
+ * Unauthenticated device tree data is a security hazard, so ignore
+ * 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure
+ * boot is enabled if we can't determine its state.
+ */
+ if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
+ efi_get_secureboot() != efi_secureboot_mode_disabled) {
+ if (strstr(cmdline_ptr, "dtb="))
+ efi_err("Ignoring DTB from command line.\n");
+ } else {
+ status = efi_load_dtb(image, &fdt_addr, &fdt_size);
+
+ if (status != EFI_SUCCESS && status != EFI_NOT_READY) {
+ efi_err("Failed to load device tree!\n");
+ goto fail;
+ }
+ }
+
+ if (fdt_addr) {
+ efi_info("Using DTB from command line\n");
+ } else {
+ /* Look for a device tree configuration table entry. */
+ fdt_addr = (uintptr_t)get_fdt(&fdt_size);
+ if (fdt_addr)
+ efi_info("Using DTB from configuration table\n");
+ }
+
+ if (!fdt_addr)
+ efi_info("Generating empty DTB\n");
+
+ efi_info("Exiting boot services...\n");
+
+ status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, ULONG_MAX);
+ if (status != EFI_SUCCESS) {
+ efi_err("Unable to allocate memory for new device tree.\n");
+ goto fail;
+ }
+
+ status = update_fdt((void *)fdt_addr, fdt_size,
+ (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Unable to construct new device tree.\n");
+ goto fail_free_new_fdt;
+ }
+
+ priv.new_fdt_addr = (void *)*new_fdt_addr;
+
+ status = efi_exit_boot_services(handle, &priv, exit_boot_func);
+
+ if (status == EFI_SUCCESS) {
+ efi_set_virtual_address_map_t *svam;
+
+ if (efi_novamap)
+ return EFI_SUCCESS;
+
+ /* Install the new virtual address map */
+ svam = efi_system_table->runtime->set_virtual_address_map;
+ status = svam(priv.runtime_entry_count * desc_size, desc_size,
+ desc_ver, priv.runtime_map);
+
+ /*
+ * We are beyond the point of no return here, so if the call to
+ * SetVirtualAddressMap() failed, we need to signal that to the
+ * incoming kernel but proceed normally otherwise.
+ */
+ if (status != EFI_SUCCESS) {
+ efi_memory_desc_t *p;
+ int l;
+
+ /*
+ * Set the virtual address field of all
+ * EFI_MEMORY_RUNTIME entries to U64_MAX. This will
+ * signal the incoming kernel that no virtual
+ * translation has been installed.
+ */
+ for (l = 0; l < priv.boot_memmap->map_size;
+ l += priv.boot_memmap->desc_size) {
+ p = (void *)priv.boot_memmap->map + l;
+
+ if (p->attribute & EFI_MEMORY_RUNTIME)
+ p->virt_addr = U64_MAX;
+ }
+ }
+ return EFI_SUCCESS;
+ }
+
+ efi_err("Exit boot services failed.\n");
+
+fail_free_new_fdt:
+ efi_free(MAX_FDT_SIZE, *new_fdt_addr);
+
+fail:
+ efi_free(fdt_size, fdt_addr);
+
+ efi_bs_call(free_pool, priv.runtime_map);
+
+ return EFI_LOAD_ERROR;
+}
+
+efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
+ unsigned long kernel_addr, char *cmdline_ptr)
+{
+ unsigned long fdt_addr;
+ efi_status_t status;
+
+ status = allocate_new_fdt_and_exit_boot(handle, image, &fdt_addr,
+ cmdline_ptr);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to update FDT and exit boot services\n");
+ return status;
+ }
+
+ if (IS_ENABLED(CONFIG_ARM))
+ efi_handle_post_ebs_state();
+
+ efi_enter_kernel(kernel_addr, fdt_addr, fdt_totalsize((void *)fdt_addr));
+ /* not reached */
+}
+
+void *get_fdt(unsigned long *fdt_size)
+{
+ void *fdt;
+
+ fdt = get_efi_config_table(DEVICE_TREE_GUID);
+
+ if (!fdt)
+ return NULL;
+
+ if (fdt_check_header(fdt) != 0) {
+ efi_err("Invalid header detected on UEFI supplied FDT, ignoring ...\n");
+ return NULL;
+ }
+ *fdt_size = fdt_totalsize(fdt);
+ return fdt;
+}
diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
new file mode 100644
index 0000000000..d6a025df07
--- /dev/null
+++ b/drivers/firmware/efi/libstub/file.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helper functions used by the EFI stub on multiple
+ * architectures. This should be #included by the EFI stub
+ * implementation files.
+ *
+ * Copyright 2011 Intel Corporation; author Matt Fleming
+ */
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+#define MAX_FILENAME_SIZE 256
+
+/*
+ * Some firmware implementations have problems reading files in one go.
+ * A read chunk size of 1MB seems to work for most platforms.
+ *
+ * Unfortunately, reading files in chunks triggers *other* bugs on some
+ * platforms, so we provide a way to disable this workaround, which can
+ * be done by passing "efi=nochunk" on the EFI boot stub command line.
+ *
+ * If you experience issues with initrd images being corrupt it's worth
+ * trying efi=nochunk, but chunking is enabled by default on x86 because
+ * there are far more machines that require the workaround than those that
+ * break with it enabled.
+ */
+#define EFI_READ_CHUNK_SIZE SZ_1M
+
+struct finfo {
+ efi_file_info_t info;
+ efi_char16_t filename[MAX_FILENAME_SIZE];
+};
+
+static efi_status_t efi_open_file(efi_file_protocol_t *volume,
+ struct finfo *fi,
+ efi_file_protocol_t **handle,
+ unsigned long *file_size)
+{
+ efi_guid_t info_guid = EFI_FILE_INFO_ID;
+ efi_file_protocol_t *fh;
+ unsigned long info_sz;
+ efi_status_t status;
+ efi_char16_t *c;
+
+ /* Replace UNIX dir separators with EFI standard ones */
+ for (c = fi->filename; *c != L'\0'; c++) {
+ if (*c == L'/')
+ *c = L'\\';
+ }
+
+ status = efi_call_proto(volume, open, &fh, fi->filename,
+ EFI_FILE_MODE_READ, 0);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to open file: %ls\n", fi->filename);
+ return status;
+ }
+
+ info_sz = sizeof(struct finfo);
+ status = efi_call_proto(fh, get_info, &info_guid, &info_sz, fi);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to get file info\n");
+ efi_call_proto(fh, close);
+ return status;
+ }
+
+ *handle = fh;
+ *file_size = fi->info.file_size;
+ return EFI_SUCCESS;
+}
+
+static efi_status_t efi_open_volume(efi_loaded_image_t *image,
+ efi_file_protocol_t **fh)
+{
+ efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+ efi_simple_file_system_protocol_t *io;
+ efi_status_t status;
+
+ status = efi_bs_call(handle_protocol, efi_table_attr(image, device_handle),
+ &fs_proto, (void **)&io);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to handle fs_proto\n");
+ return status;
+ }
+
+ status = efi_call_proto(io, open_volume, fh);
+ if (status != EFI_SUCCESS)
+ efi_err("Failed to open volume\n");
+
+ return status;
+}
+
+static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
+ const efi_char16_t *prefix, int prefix_size,
+ efi_char16_t *result, int result_len)
+{
+ int prefix_len = prefix_size / 2;
+ bool found = false;
+ int i;
+
+ for (i = prefix_len; i < cmdline_len; i++) {
+ if (!memcmp(&cmdline[i - prefix_len], prefix, prefix_size)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return 0;
+
+ /* Skip any leading slashes */
+ while (i < cmdline_len && (cmdline[i] == L'/' || cmdline[i] == L'\\'))
+ i++;
+
+ while (--result_len > 0 && i < cmdline_len) {
+ efi_char16_t c = cmdline[i++];
+
+ if (c == L'\0' || c == L'\n' || c == L' ')
+ break;
+ *result++ = c;
+ }
+ *result = L'\0';
+ return i;
+}
+
+static efi_status_t efi_open_device_path(efi_file_protocol_t **volume,
+ struct finfo *fi)
+{
+ efi_guid_t text_to_dp_guid = EFI_DEVICE_PATH_FROM_TEXT_PROTOCOL_GUID;
+ static efi_device_path_from_text_protocol_t *text_to_dp = NULL;
+ efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+ efi_device_path_protocol_t *initrd_dp;
+ efi_simple_file_system_protocol_t *io;
+ struct efi_file_path_dev_path *fpath;
+ efi_handle_t handle;
+ efi_status_t status;
+
+ /* See if the text to device path protocol exists */
+ if (!text_to_dp &&
+ efi_bs_call(locate_protocol, &text_to_dp_guid, NULL,
+ (void **)&text_to_dp) != EFI_SUCCESS)
+ return EFI_UNSUPPORTED;
+
+
+ /* Convert the filename wide string into a device path */
+ initrd_dp = efi_fn_call(text_to_dp, convert_text_to_device_path,
+ fi->filename);
+
+ /* Check whether the device path in question implements simple FS */
+ if ((efi_bs_call(locate_device_path, &fs_proto, &initrd_dp, &handle) ?:
+ efi_bs_call(handle_protocol, handle, &fs_proto, (void **)&io))
+ != EFI_SUCCESS)
+ return EFI_NOT_FOUND;
+
+ /* Check whether the remaining device path is a file device path */
+ if (initrd_dp->type != EFI_DEV_MEDIA ||
+ initrd_dp->sub_type != EFI_DEV_MEDIA_FILE) {
+ efi_warn("Unexpected device path node type: (%x, %x)\n",
+ initrd_dp->type, initrd_dp->sub_type);
+ return EFI_LOAD_ERROR;
+ }
+
+ /* Copy the remaining file path into the fi structure */
+ fpath = (struct efi_file_path_dev_path *)initrd_dp;
+ memcpy(fi->filename, fpath->filename,
+ min(sizeof(fi->filename),
+ fpath->header.length - sizeof(fpath->header)));
+
+ status = efi_call_proto(io, open_volume, volume);
+ if (status != EFI_SUCCESS)
+ efi_err("Failed to open volume\n");
+
+ return status;
+}
+
+/*
+ * Check the cmdline for a LILO-style file= arguments.
+ *
+ * We only support loading a file from the same filesystem as
+ * the kernel image.
+ */
+efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
+ const efi_char16_t *optstr,
+ int optstr_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit,
+ unsigned long *load_addr,
+ unsigned long *load_size)
+{
+ const efi_char16_t *cmdline = efi_table_attr(image, load_options);
+ u32 cmdline_len = efi_table_attr(image, load_options_size);
+ unsigned long efi_chunk_size = ULONG_MAX;
+ efi_file_protocol_t *volume = NULL;
+ efi_file_protocol_t *file;
+ unsigned long alloc_addr;
+ unsigned long alloc_size;
+ efi_status_t status;
+ int offset;
+
+ if (!load_addr || !load_size)
+ return EFI_INVALID_PARAMETER;
+
+ efi_apply_loadoptions_quirk((const void **)&cmdline, &cmdline_len);
+ cmdline_len /= sizeof(*cmdline);
+
+ if (IS_ENABLED(CONFIG_X86) && !efi_nochunk)
+ efi_chunk_size = EFI_READ_CHUNK_SIZE;
+
+ alloc_addr = alloc_size = 0;
+ do {
+ struct finfo fi;
+ unsigned long size;
+ void *addr;
+
+ offset = find_file_option(cmdline, cmdline_len,
+ optstr, optstr_size,
+ fi.filename, ARRAY_SIZE(fi.filename));
+
+ if (!offset)
+ break;
+
+ cmdline += offset;
+ cmdline_len -= offset;
+
+ status = efi_open_device_path(&volume, &fi);
+ if (status == EFI_UNSUPPORTED || status == EFI_NOT_FOUND)
+ /* try the volume that holds the kernel itself */
+ status = efi_open_volume(image, &volume);
+
+ if (status != EFI_SUCCESS)
+ goto err_free_alloc;
+
+ status = efi_open_file(volume, &fi, &file, &size);
+ if (status != EFI_SUCCESS)
+ goto err_close_volume;
+
+ /*
+ * Check whether the existing allocation can contain the next
+ * file. This condition will also trigger naturally during the
+ * first (and typically only) iteration of the loop, given that
+ * alloc_size == 0 in that case.
+ */
+ if (round_up(alloc_size + size, EFI_ALLOC_ALIGN) >
+ round_up(alloc_size, EFI_ALLOC_ALIGN)) {
+ unsigned long old_addr = alloc_addr;
+
+ status = EFI_OUT_OF_RESOURCES;
+ if (soft_limit < hard_limit)
+ status = efi_allocate_pages(alloc_size + size,
+ &alloc_addr,
+ soft_limit);
+ if (status == EFI_OUT_OF_RESOURCES)
+ status = efi_allocate_pages(alloc_size + size,
+ &alloc_addr,
+ hard_limit);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate memory for files\n");
+ goto err_close_file;
+ }
+
+ if (old_addr != 0) {
+ /*
+ * This is not the first time we've gone
+ * around this loop, and so we are loading
+ * multiple files that need to be concatenated
+ * and returned in a single buffer.
+ */
+ memcpy((void *)alloc_addr, (void *)old_addr, alloc_size);
+ efi_free(alloc_size, old_addr);
+ }
+ }
+
+ addr = (void *)alloc_addr + alloc_size;
+ alloc_size += size;
+
+ while (size) {
+ unsigned long chunksize = min(size, efi_chunk_size);
+
+ status = efi_call_proto(file, read, &chunksize, addr);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to read file\n");
+ goto err_close_file;
+ }
+ addr += chunksize;
+ size -= chunksize;
+ }
+ efi_call_proto(file, close);
+ efi_call_proto(volume, close);
+ } while (offset > 0);
+
+ *load_addr = alloc_addr;
+ *load_size = alloc_size;
+
+ if (*load_size == 0)
+ return EFI_NOT_READY;
+ return EFI_SUCCESS;
+
+err_close_file:
+ efi_call_proto(file, close);
+
+err_close_volume:
+ efi_call_proto(volume, close);
+
+err_free_alloc:
+ efi_free(alloc_size, alloc_addr);
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/find.c b/drivers/firmware/efi/libstub/find.c
new file mode 100644
index 0000000000..4e7740d289
--- /dev/null
+++ b/drivers/firmware/efi/libstub/find.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/bitmap.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+
+/*
+ * Common helper for find_next_bit() function family
+ * @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
+ * @MUNGE: The expression that post-processes a word containing found bit (may be empty)
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ */
+#define FIND_NEXT_BIT(FETCH, MUNGE, size, start) \
+({ \
+ unsigned long mask, idx, tmp, sz = (size), __start = (start); \
+ \
+ if (unlikely(__start >= sz)) \
+ goto out; \
+ \
+ mask = MUNGE(BITMAP_FIRST_WORD_MASK(__start)); \
+ idx = __start / BITS_PER_LONG; \
+ \
+ for (tmp = (FETCH) & mask; !tmp; tmp = (FETCH)) { \
+ if ((idx + 1) * BITS_PER_LONG >= sz) \
+ goto out; \
+ idx++; \
+ } \
+ \
+ sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(tmp)), sz); \
+out: \
+ sz; \
+})
+
+unsigned long _find_next_bit(const unsigned long *addr, unsigned long nbits, unsigned long start)
+{
+ return FIND_NEXT_BIT(addr[idx], /* nop */, nbits, start);
+}
+
+unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
+ unsigned long start)
+{
+ return FIND_NEXT_BIT(~addr[idx], /* nop */, nbits, start);
+}
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
new file mode 100644
index 0000000000..ea5da307d5
--- /dev/null
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -0,0 +1,580 @@
+// SPDX-License-Identifier: GPL-2.0
+/* -----------------------------------------------------------------------
+ *
+ * Copyright 2011 Intel Corporation; author Matt Fleming
+ *
+ * ----------------------------------------------------------------------- */
+
+#include <linux/bitops.h>
+#include <linux/ctype.h>
+#include <linux/efi.h>
+#include <linux/screen_info.h>
+#include <linux/string.h>
+#include <asm/efi.h>
+#include <asm/setup.h>
+
+#include "efistub.h"
+
+enum efi_cmdline_option {
+ EFI_CMDLINE_NONE,
+ EFI_CMDLINE_MODE_NUM,
+ EFI_CMDLINE_RES,
+ EFI_CMDLINE_AUTO,
+ EFI_CMDLINE_LIST
+};
+
+static struct {
+ enum efi_cmdline_option option;
+ union {
+ u32 mode;
+ struct {
+ u32 width, height;
+ int format;
+ u8 depth;
+ } res;
+ };
+} cmdline = { .option = EFI_CMDLINE_NONE };
+
+static bool parse_modenum(char *option, char **next)
+{
+ u32 m;
+
+ if (!strstarts(option, "mode="))
+ return false;
+ option += strlen("mode=");
+ m = simple_strtoull(option, &option, 0);
+ if (*option && *option++ != ',')
+ return false;
+ cmdline.option = EFI_CMDLINE_MODE_NUM;
+ cmdline.mode = m;
+
+ *next = option;
+ return true;
+}
+
+static bool parse_res(char *option, char **next)
+{
+ u32 w, h, d = 0;
+ int pf = -1;
+
+ if (!isdigit(*option))
+ return false;
+ w = simple_strtoull(option, &option, 10);
+ if (*option++ != 'x' || !isdigit(*option))
+ return false;
+ h = simple_strtoull(option, &option, 10);
+ if (*option == '-') {
+ option++;
+ if (strstarts(option, "rgb")) {
+ option += strlen("rgb");
+ pf = PIXEL_RGB_RESERVED_8BIT_PER_COLOR;
+ } else if (strstarts(option, "bgr")) {
+ option += strlen("bgr");
+ pf = PIXEL_BGR_RESERVED_8BIT_PER_COLOR;
+ } else if (isdigit(*option))
+ d = simple_strtoull(option, &option, 10);
+ else
+ return false;
+ }
+ if (*option && *option++ != ',')
+ return false;
+ cmdline.option = EFI_CMDLINE_RES;
+ cmdline.res.width = w;
+ cmdline.res.height = h;
+ cmdline.res.format = pf;
+ cmdline.res.depth = d;
+
+ *next = option;
+ return true;
+}
+
+static bool parse_auto(char *option, char **next)
+{
+ if (!strstarts(option, "auto"))
+ return false;
+ option += strlen("auto");
+ if (*option && *option++ != ',')
+ return false;
+ cmdline.option = EFI_CMDLINE_AUTO;
+
+ *next = option;
+ return true;
+}
+
+static bool parse_list(char *option, char **next)
+{
+ if (!strstarts(option, "list"))
+ return false;
+ option += strlen("list");
+ if (*option && *option++ != ',')
+ return false;
+ cmdline.option = EFI_CMDLINE_LIST;
+
+ *next = option;
+ return true;
+}
+
+void efi_parse_option_graphics(char *option)
+{
+ while (*option) {
+ if (parse_modenum(option, &option))
+ continue;
+ if (parse_res(option, &option))
+ continue;
+ if (parse_auto(option, &option))
+ continue;
+ if (parse_list(option, &option))
+ continue;
+
+ while (*option && *option++ != ',')
+ ;
+ }
+}
+
+static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
+{
+ efi_status_t status;
+
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long info_size;
+
+ u32 max_mode, cur_mode;
+ int pf;
+
+ mode = efi_table_attr(gop, mode);
+
+ cur_mode = efi_table_attr(mode, mode);
+ if (cmdline.mode == cur_mode)
+ return cur_mode;
+
+ max_mode = efi_table_attr(mode, max_mode);
+ if (cmdline.mode >= max_mode) {
+ efi_err("Requested mode is invalid\n");
+ return cur_mode;
+ }
+
+ status = efi_call_proto(gop, query_mode, cmdline.mode,
+ &info_size, &info);
+ if (status != EFI_SUCCESS) {
+ efi_err("Couldn't get mode information\n");
+ return cur_mode;
+ }
+
+ pf = info->pixel_format;
+
+ efi_bs_call(free_pool, info);
+
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX) {
+ efi_err("Invalid PixelFormat\n");
+ return cur_mode;
+ }
+
+ return cmdline.mode;
+}
+
+static u8 pixel_bpp(int pixel_format, efi_pixel_bitmask_t pixel_info)
+{
+ if (pixel_format == PIXEL_BIT_MASK) {
+ u32 mask = pixel_info.red_mask | pixel_info.green_mask |
+ pixel_info.blue_mask | pixel_info.reserved_mask;
+ if (!mask)
+ return 0;
+ return __fls(mask) - __ffs(mask) + 1;
+ } else
+ return 32;
+}
+
+static u32 choose_mode_res(efi_graphics_output_protocol_t *gop)
+{
+ efi_status_t status;
+
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long info_size;
+
+ u32 max_mode, cur_mode;
+ int pf;
+ efi_pixel_bitmask_t pi;
+ u32 m, w, h;
+
+ mode = efi_table_attr(gop, mode);
+
+ cur_mode = efi_table_attr(mode, mode);
+ info = efi_table_attr(mode, info);
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ if (w == cmdline.res.width && h == cmdline.res.height &&
+ (cmdline.res.format < 0 || cmdline.res.format == pf) &&
+ (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
+ return cur_mode;
+
+ max_mode = efi_table_attr(mode, max_mode);
+
+ for (m = 0; m < max_mode; m++) {
+ if (m == cur_mode)
+ continue;
+
+ status = efi_call_proto(gop, query_mode, m,
+ &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ efi_bs_call(free_pool, info);
+
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ continue;
+ if (w == cmdline.res.width && h == cmdline.res.height &&
+ (cmdline.res.format < 0 || cmdline.res.format == pf) &&
+ (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
+ return m;
+ }
+
+ efi_err("Couldn't find requested mode\n");
+
+ return cur_mode;
+}
+
+static u32 choose_mode_auto(efi_graphics_output_protocol_t *gop)
+{
+ efi_status_t status;
+
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long info_size;
+
+ u32 max_mode, cur_mode, best_mode, area;
+ u8 depth;
+ int pf;
+ efi_pixel_bitmask_t pi;
+ u32 m, w, h, a;
+ u8 d;
+
+ mode = efi_table_attr(gop, mode);
+
+ cur_mode = efi_table_attr(mode, mode);
+ max_mode = efi_table_attr(mode, max_mode);
+
+ info = efi_table_attr(mode, info);
+
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ best_mode = cur_mode;
+ area = w * h;
+ depth = pixel_bpp(pf, pi);
+
+ for (m = 0; m < max_mode; m++) {
+ if (m == cur_mode)
+ continue;
+
+ status = efi_call_proto(gop, query_mode, m,
+ &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ efi_bs_call(free_pool, info);
+
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ continue;
+ a = w * h;
+ if (a < area)
+ continue;
+ d = pixel_bpp(pf, pi);
+ if (a > area || d > depth) {
+ best_mode = m;
+ area = a;
+ depth = d;
+ }
+ }
+
+ return best_mode;
+}
+
+static u32 choose_mode_list(efi_graphics_output_protocol_t *gop)
+{
+ efi_status_t status;
+
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long info_size;
+
+ u32 max_mode, cur_mode;
+ int pf;
+ efi_pixel_bitmask_t pi;
+ u32 m, w, h;
+ u8 d;
+ const char *dstr;
+ bool valid;
+ efi_input_key_t key;
+
+ mode = efi_table_attr(gop, mode);
+
+ cur_mode = efi_table_attr(mode, mode);
+ max_mode = efi_table_attr(mode, max_mode);
+
+ efi_printk("Available graphics modes are 0-%u\n", max_mode-1);
+ efi_puts(" * = current mode\n"
+ " - = unusable mode\n");
+ for (m = 0; m < max_mode; m++) {
+ status = efi_call_proto(gop, query_mode, m,
+ &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ efi_bs_call(free_pool, info);
+
+ valid = !(pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX);
+ d = 0;
+ switch (pf) {
+ case PIXEL_RGB_RESERVED_8BIT_PER_COLOR:
+ dstr = "rgb";
+ break;
+ case PIXEL_BGR_RESERVED_8BIT_PER_COLOR:
+ dstr = "bgr";
+ break;
+ case PIXEL_BIT_MASK:
+ dstr = "";
+ d = pixel_bpp(pf, pi);
+ break;
+ case PIXEL_BLT_ONLY:
+ dstr = "blt";
+ break;
+ default:
+ dstr = "xxx";
+ break;
+ }
+
+ efi_printk("Mode %3u %c%c: Resolution %ux%u-%s%.0hhu\n",
+ m,
+ m == cur_mode ? '*' : ' ',
+ !valid ? '-' : ' ',
+ w, h, dstr, d);
+ }
+
+ efi_puts("\nPress any key to continue (or wait 10 seconds)\n");
+ status = efi_wait_for_key(10 * EFI_USEC_PER_SEC, &key);
+ if (status != EFI_SUCCESS && status != EFI_TIMEOUT) {
+ efi_err("Unable to read key, continuing in 10 seconds\n");
+ efi_bs_call(stall, 10 * EFI_USEC_PER_SEC);
+ }
+
+ return cur_mode;
+}
+
+static void set_mode(efi_graphics_output_protocol_t *gop)
+{
+ efi_graphics_output_protocol_mode_t *mode;
+ u32 cur_mode, new_mode;
+
+ switch (cmdline.option) {
+ case EFI_CMDLINE_MODE_NUM:
+ new_mode = choose_mode_modenum(gop);
+ break;
+ case EFI_CMDLINE_RES:
+ new_mode = choose_mode_res(gop);
+ break;
+ case EFI_CMDLINE_AUTO:
+ new_mode = choose_mode_auto(gop);
+ break;
+ case EFI_CMDLINE_LIST:
+ new_mode = choose_mode_list(gop);
+ break;
+ default:
+ return;
+ }
+
+ mode = efi_table_attr(gop, mode);
+ cur_mode = efi_table_attr(mode, mode);
+
+ if (new_mode == cur_mode)
+ return;
+
+ if (efi_call_proto(gop, set_mode, new_mode) != EFI_SUCCESS)
+ efi_err("Failed to set requested mode\n");
+}
+
+static void find_bits(u32 mask, u8 *pos, u8 *size)
+{
+ if (!mask) {
+ *pos = *size = 0;
+ return;
+ }
+
+ /* UEFI spec guarantees that the set bits are contiguous */
+ *pos = __ffs(mask);
+ *size = __fls(mask) - *pos + 1;
+}
+
+static void
+setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
+ efi_pixel_bitmask_t pixel_info, int pixel_format)
+{
+ if (pixel_format == PIXEL_BIT_MASK) {
+ find_bits(pixel_info.red_mask,
+ &si->red_pos, &si->red_size);
+ find_bits(pixel_info.green_mask,
+ &si->green_pos, &si->green_size);
+ find_bits(pixel_info.blue_mask,
+ &si->blue_pos, &si->blue_size);
+ find_bits(pixel_info.reserved_mask,
+ &si->rsvd_pos, &si->rsvd_size);
+ si->lfb_depth = si->red_size + si->green_size +
+ si->blue_size + si->rsvd_size;
+ si->lfb_linelength = (pixels_per_scan_line * si->lfb_depth) / 8;
+ } else {
+ if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
+ si->red_pos = 0;
+ si->blue_pos = 16;
+ } else /* PIXEL_BGR_RESERVED_8BIT_PER_COLOR */ {
+ si->blue_pos = 0;
+ si->red_pos = 16;
+ }
+
+ si->green_pos = 8;
+ si->rsvd_pos = 24;
+ si->red_size = si->green_size =
+ si->blue_size = si->rsvd_size = 8;
+
+ si->lfb_depth = 32;
+ si->lfb_linelength = pixels_per_scan_line * 4;
+ }
+}
+
+static efi_graphics_output_protocol_t *
+find_gop(efi_guid_t *proto, unsigned long size, void **handles)
+{
+ efi_graphics_output_protocol_t *first_gop;
+ efi_handle_t h;
+ int i;
+
+ first_gop = NULL;
+
+ for_each_efi_handle(h, handles, size, i) {
+ efi_status_t status;
+
+ efi_graphics_output_protocol_t *gop;
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+
+ efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
+ void *dummy = NULL;
+
+ status = efi_bs_call(handle_protocol, h, proto, (void **)&gop);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ mode = efi_table_attr(gop, mode);
+ info = efi_table_attr(mode, info);
+ if (info->pixel_format == PIXEL_BLT_ONLY ||
+ info->pixel_format >= PIXEL_FORMAT_MAX)
+ continue;
+
+ /*
+ * Systems that use the UEFI Console Splitter may
+ * provide multiple GOP devices, not all of which are
+ * backed by real hardware. The workaround is to search
+ * for a GOP implementing the ConOut protocol, and if
+ * one isn't found, to just fall back to the first GOP.
+ *
+ * Once we've found a GOP supporting ConOut,
+ * don't bother looking any further.
+ */
+ status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy);
+ if (status == EFI_SUCCESS)
+ return gop;
+
+ if (!first_gop)
+ first_gop = gop;
+ }
+
+ return first_gop;
+}
+
+static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
+ unsigned long size, void **handles)
+{
+ efi_graphics_output_protocol_t *gop;
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+
+ gop = find_gop(proto, size, handles);
+
+ /* Did we find any GOPs? */
+ if (!gop)
+ return EFI_NOT_FOUND;
+
+ /* Change mode if requested */
+ set_mode(gop);
+
+ /* EFI framebuffer */
+ mode = efi_table_attr(gop, mode);
+ info = efi_table_attr(mode, info);
+
+ si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+ si->lfb_width = info->horizontal_resolution;
+ si->lfb_height = info->vertical_resolution;
+
+ efi_set_u64_split(efi_table_attr(mode, frame_buffer_base),
+ &si->lfb_base, &si->ext_lfb_base);
+ if (si->ext_lfb_base)
+ si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+
+ si->pages = 1;
+
+ setup_pixel_info(si, info->pixels_per_scan_line,
+ info->pixel_information, info->pixel_format);
+
+ si->lfb_size = si->lfb_linelength * si->lfb_height;
+
+ si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+
+ return EFI_SUCCESS;
+}
+
+/*
+ * See if we have Graphics Output Protocol
+ */
+efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
+ unsigned long size)
+{
+ efi_status_t status;
+ void **gop_handle = NULL;
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&gop_handle);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL,
+ &size, gop_handle);
+ if (status != EFI_SUCCESS)
+ goto free_handle;
+
+ status = setup_gop(si, proto, size, gop_handle);
+
+free_handle:
+ efi_bs_call(free_pool, gop_handle);
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/intrinsics.c b/drivers/firmware/efi/libstub/intrinsics.c
new file mode 100644
index 0000000000..965e734f6f
--- /dev/null
+++ b/drivers/firmware/efi/libstub/intrinsics.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+#include <asm/string.h>
+
+#include "efistub.h"
+
+#ifdef CONFIG_KASAN
+#undef memcpy
+#undef memmove
+#undef memset
+void *__memcpy(void *__dest, const void *__src, size_t __n) __alias(memcpy);
+void *__memmove(void *__dest, const void *__src, size_t count) __alias(memmove);
+void *__memset(void *s, int c, size_t count) __alias(memset);
+#endif
+
+void *memcpy(void *dst, const void *src, size_t len)
+{
+ efi_bs_call(copy_mem, dst, src, len);
+ return dst;
+}
+
+extern void *memmove(void *dst, const void *src, size_t len) __alias(memcpy);
+
+void *memset(void *dst, int c, size_t len)
+{
+ efi_bs_call(set_mem, dst, len, c & U8_MAX);
+ return dst;
+}
+
+/**
+ * memcmp - Compare two areas of memory
+ * @cs: One area of memory
+ * @ct: Another area of memory
+ * @count: The size of the area.
+ */
+#undef memcmp
+int memcmp(const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res = 0;
+
+ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
+ if ((res = *su1 - *su2) != 0)
+ break;
+ return res;
+}
diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c
new file mode 100644
index 0000000000..62d63f7a26
--- /dev/null
+++ b/drivers/firmware/efi/libstub/kaslr.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helper functions used by the EFI stub on multiple
+ * architectures to deal with physical address space randomization.
+ */
+#include <linux/efi.h>
+
+#include "efistub.h"
+
+/**
+ * efi_kaslr_get_phys_seed() - Get random seed for physical kernel KASLR
+ * @image_handle: Handle to the image
+ *
+ * If KASLR is not disabled, obtain a random seed using EFI_RNG_PROTOCOL
+ * that will be used to move the kernel physical mapping.
+ *
+ * Return: the random seed
+ */
+u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle)
+{
+ efi_status_t status;
+ u32 phys_seed;
+ efi_guid_t li_fixed_proto = LINUX_EFI_LOADED_IMAGE_FIXED_GUID;
+ void *p;
+
+ if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return 0;
+
+ if (efi_nokaslr) {
+ efi_info("KASLR disabled on kernel command line\n");
+ } else if (efi_bs_call(handle_protocol, image_handle,
+ &li_fixed_proto, &p) == EFI_SUCCESS) {
+ efi_info("Image placement fixed by loader\n");
+ } else {
+ status = efi_get_random_bytes(sizeof(phys_seed),
+ (u8 *)&phys_seed);
+ if (status == EFI_SUCCESS) {
+ return phys_seed;
+ } else if (status == EFI_NOT_FOUND) {
+ efi_info("EFI_RNG_PROTOCOL unavailable\n");
+ efi_nokaslr = true;
+ } else if (status != EFI_SUCCESS) {
+ efi_err("efi_get_random_bytes() failed (0x%lx)\n",
+ status);
+ efi_nokaslr = true;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail
+ * to provide space, and fail to zero it). Check for this condition by double
+ * checking that the first and the last byte of the image are covered by the
+ * same EFI memory map entry.
+ */
+static bool check_image_region(u64 base, u64 size)
+{
+ struct efi_boot_memmap *map;
+ efi_status_t status;
+ bool ret = false;
+ int map_offset;
+
+ status = efi_get_memory_map(&map, false);
+ if (status != EFI_SUCCESS)
+ return false;
+
+ for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
+ efi_memory_desc_t *md = (void *)map->map + map_offset;
+ u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
+
+ /*
+ * Find the region that covers base, and return whether
+ * it covers base+size bytes.
+ */
+ if (base >= md->phys_addr && base < end) {
+ ret = (base + size) <= end;
+ break;
+ }
+ }
+
+ efi_bs_call(free_pool, map);
+
+ return ret;
+}
+
+/**
+ * efi_kaslr_relocate_kernel() - Relocate the kernel (random if KASLR enabled)
+ * @image_addr: Pointer to the current kernel location
+ * @reserve_addr: Pointer to the relocated kernel location
+ * @reserve_size: Size of the relocated kernel
+ * @kernel_size: Size of the text + data
+ * @kernel_codesize: Size of the text
+ * @kernel_memsize: Size of the text + data + bss
+ * @phys_seed: Random seed used for the relocation
+ *
+ * If KASLR is not enabled, this function relocates the kernel to a fixed
+ * address (or leave it as its current location). If KASLR is enabled, the
+ * kernel physical location is randomized using the seed in parameter.
+ *
+ * Return: status code, EFI_SUCCESS if relocation is successful
+ */
+efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ unsigned long kernel_size,
+ unsigned long kernel_codesize,
+ unsigned long kernel_memsize,
+ u32 phys_seed)
+{
+ efi_status_t status;
+ u64 min_kimg_align = efi_get_kimg_min_align();
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
+ /*
+ * If KASLR is enabled, and we have some randomness available,
+ * locate the kernel at a randomized offset in physical memory.
+ */
+ status = efi_random_alloc(*reserve_size, min_kimg_align,
+ reserve_addr, phys_seed,
+ EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
+ if (status != EFI_SUCCESS)
+ efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
+ } else {
+ status = EFI_OUT_OF_RESOURCES;
+ }
+
+ if (status != EFI_SUCCESS) {
+ if (!check_image_region(*image_addr, kernel_memsize)) {
+ efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
+ } else if (IS_ALIGNED(*image_addr, min_kimg_align) &&
+ (unsigned long)_end < EFI_ALLOC_LIMIT) {
+ /*
+ * Just execute from wherever we were loaded by the
+ * UEFI PE/COFF loader if the placement is suitable.
+ */
+ *reserve_size = 0;
+ return EFI_SUCCESS;
+ }
+
+ status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
+ ULONG_MAX, min_kimg_align,
+ EFI_LOADER_CODE);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to relocate kernel\n");
+ *reserve_size = 0;
+ return status;
+ }
+ }
+
+ memcpy((void *)*reserve_addr, (void *)*image_addr, kernel_size);
+ *image_addr = *reserve_addr;
+ efi_icache_sync(*image_addr, *image_addr + kernel_codesize);
+ efi_remap_image(*image_addr, *reserve_size, kernel_codesize);
+
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/loongarch-stub.c b/drivers/firmware/efi/libstub/loongarch-stub.c
new file mode 100644
index 0000000000..d6ec5d4b8d
--- /dev/null
+++ b/drivers/firmware/efi/libstub/loongarch-stub.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Yun Liu <liuyun@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/efi.h>
+#include <asm/addrspace.h>
+#include "efistub.h"
+
+extern int kernel_asize;
+extern int kernel_fsize;
+extern int kernel_offset;
+extern int kernel_entry;
+
+efi_status_t handle_kernel_image(unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ efi_loaded_image_t *image,
+ efi_handle_t image_handle)
+{
+ efi_status_t status;
+ unsigned long kernel_addr = 0;
+
+ kernel_addr = (unsigned long)&kernel_offset - kernel_offset;
+
+ status = efi_relocate_kernel(&kernel_addr, kernel_fsize, kernel_asize,
+ EFI_KIMG_PREFERRED_ADDRESS, efi_get_kimg_min_align(), 0x0);
+
+ *image_addr = kernel_addr;
+ *image_size = kernel_asize;
+
+ return status;
+}
+
+unsigned long kernel_entry_address(unsigned long kernel_addr)
+{
+ unsigned long base = (unsigned long)&kernel_offset - kernel_offset;
+
+ return (unsigned long)&kernel_entry - base + kernel_addr;
+}
diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c
new file mode 100644
index 0000000000..0e0aa6cda7
--- /dev/null
+++ b/drivers/firmware/efi/libstub/loongarch.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Yun Liu <liuyun@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/efi.h>
+#include <asm/addrspace.h>
+#include "efistub.h"
+
+typedef void __noreturn (*kernel_entry_t)(bool efi, unsigned long cmdline,
+ unsigned long systab);
+
+efi_status_t check_platform_features(void)
+{
+ return EFI_SUCCESS;
+}
+
+struct exit_boot_struct {
+ efi_memory_desc_t *runtime_map;
+ int runtime_entry_count;
+};
+
+static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
+{
+ struct exit_boot_struct *p = priv;
+
+ /*
+ * Update the memory map with virtual addresses. The function will also
+ * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
+ * entries so that we can pass it straight to SetVirtualAddressMap()
+ */
+ efi_get_virtmap(map->map, map->map_size, map->desc_size,
+ p->runtime_map, &p->runtime_entry_count);
+
+ return EFI_SUCCESS;
+}
+
+unsigned long __weak kernel_entry_address(unsigned long kernel_addr)
+{
+ return *(unsigned long *)(kernel_addr + 8) - VMLINUX_LOAD_ADDRESS + kernel_addr;
+}
+
+efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
+ unsigned long kernel_addr, char *cmdline_ptr)
+{
+ kernel_entry_t real_kernel_entry;
+ struct exit_boot_struct priv;
+ unsigned long desc_size;
+ efi_status_t status;
+ u32 desc_ver;
+
+ status = efi_alloc_virtmap(&priv.runtime_map, &desc_size, &desc_ver);
+ if (status != EFI_SUCCESS) {
+ efi_err("Unable to retrieve UEFI memory map.\n");
+ return status;
+ }
+
+ efi_info("Exiting boot services\n");
+
+ efi_novamap = false;
+ status = efi_exit_boot_services(handle, &priv, exit_boot_func);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ /* Install the new virtual address map */
+ efi_rt_call(set_virtual_address_map,
+ priv.runtime_entry_count * desc_size, desc_size,
+ desc_ver, priv.runtime_map);
+
+ /* Config Direct Mapping */
+ csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0);
+ csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1);
+
+ real_kernel_entry = (void *)kernel_entry_address(kernel_addr);
+
+ real_kernel_entry(true, (unsigned long)cmdline_ptr,
+ (unsigned long)efi_system_table);
+}
diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c
new file mode 100644
index 0000000000..4f1fa30223
--- /dev/null
+++ b/drivers/firmware/efi/libstub/mem.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/**
+ * efi_get_memory_map() - get memory map
+ * @map: pointer to memory map pointer to which to assign the
+ * newly allocated memory map
+ * @install_cfg_tbl: whether or not to install the boot memory map as a
+ * configuration table
+ *
+ * Retrieve the UEFI memory map. The allocated memory leaves room for
+ * up to EFI_MMAP_NR_SLACK_SLOTS additional memory map entries.
+ *
+ * Return: status code
+ */
+efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
+ bool install_cfg_tbl)
+{
+ int memtype = install_cfg_tbl ? EFI_ACPI_RECLAIM_MEMORY
+ : EFI_LOADER_DATA;
+ efi_guid_t tbl_guid = LINUX_EFI_BOOT_MEMMAP_GUID;
+ struct efi_boot_memmap *m, tmp;
+ efi_status_t status;
+ unsigned long size;
+
+ tmp.map_size = 0;
+ status = efi_bs_call(get_memory_map, &tmp.map_size, NULL, &tmp.map_key,
+ &tmp.desc_size, &tmp.desc_ver);
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return EFI_LOAD_ERROR;
+
+ size = tmp.map_size + tmp.desc_size * EFI_MMAP_NR_SLACK_SLOTS;
+ status = efi_bs_call(allocate_pool, memtype, sizeof(*m) + size,
+ (void **)&m);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ if (install_cfg_tbl) {
+ /*
+ * Installing a configuration table might allocate memory, and
+ * this may modify the memory map. This means we should install
+ * the configuration table first, and re-install or delete it
+ * as needed.
+ */
+ status = efi_bs_call(install_configuration_table, &tbl_guid, m);
+ if (status != EFI_SUCCESS)
+ goto free_map;
+ }
+
+ m->buff_size = m->map_size = size;
+ status = efi_bs_call(get_memory_map, &m->map_size, m->map, &m->map_key,
+ &m->desc_size, &m->desc_ver);
+ if (status != EFI_SUCCESS)
+ goto uninstall_table;
+
+ *map = m;
+ return EFI_SUCCESS;
+
+uninstall_table:
+ if (install_cfg_tbl)
+ efi_bs_call(install_configuration_table, &tbl_guid, NULL);
+free_map:
+ efi_bs_call(free_pool, m);
+ return status;
+}
+
+/**
+ * efi_allocate_pages() - Allocate memory pages
+ * @size: minimum number of bytes to allocate
+ * @addr: On return the address of the first allocated page. The first
+ * allocated page has alignment EFI_ALLOC_ALIGN which is an
+ * architecture dependent multiple of the page size.
+ * @max: the address that the last allocated memory page shall not
+ * exceed
+ *
+ * Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according
+ * to EFI_ALLOC_ALIGN. The last allocated page will not exceed the address
+ * given by @max.
+ *
+ * Return: status code
+ */
+efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
+ unsigned long max)
+{
+ efi_physical_addr_t alloc_addr;
+ efi_status_t status;
+
+ max = min(max, EFI_ALLOC_LIMIT);
+
+ if (EFI_ALLOC_ALIGN > EFI_PAGE_SIZE)
+ return efi_allocate_pages_aligned(size, addr, max,
+ EFI_ALLOC_ALIGN,
+ EFI_LOADER_DATA);
+
+ alloc_addr = ALIGN_DOWN(max + 1, EFI_ALLOC_ALIGN) - 1;
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
+ EFI_LOADER_DATA, DIV_ROUND_UP(size, EFI_PAGE_SIZE),
+ &alloc_addr);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ *addr = alloc_addr;
+ return EFI_SUCCESS;
+}
+
+/**
+ * efi_free() - free memory pages
+ * @size: size of the memory area to free in bytes
+ * @addr: start of the memory area to free (must be EFI_PAGE_SIZE
+ * aligned)
+ *
+ * @size is rounded up to a multiple of EFI_ALLOC_ALIGN which is an
+ * architecture specific multiple of EFI_PAGE_SIZE. So this function should
+ * only be used to return pages allocated with efi_allocate_pages() or
+ * efi_low_alloc_above().
+ */
+void efi_free(unsigned long size, unsigned long addr)
+{
+ unsigned long nr_pages;
+
+ if (!size)
+ return;
+
+ nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+ efi_bs_call(free_pages, addr, nr_pages);
+}
diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c
new file mode 100644
index 0000000000..99fb25d2bc
--- /dev/null
+++ b/drivers/firmware/efi/libstub/pci.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI-related functions used by the EFI stub on multiple
+ * architectures.
+ *
+ * Copyright 2019 Google, LLC
+ */
+
+#include <linux/efi.h>
+#include <linux/pci.h>
+
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+void efi_pci_disable_bridge_busmaster(void)
+{
+ efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
+ unsigned long pci_handle_size = 0;
+ efi_handle_t *pci_handle = NULL;
+ efi_handle_t handle;
+ efi_status_t status;
+ u16 class, command;
+ int i;
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
+ NULL, &pci_handle_size, NULL);
+
+ if (status != EFI_BUFFER_TOO_SMALL) {
+ if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
+ efi_err("Failed to locate PCI I/O handles'\n");
+ return;
+ }
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size,
+ (void **)&pci_handle);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate memory for 'pci_handle'\n");
+ return;
+ }
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
+ NULL, &pci_handle_size, pci_handle);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to locate PCI I/O handles'\n");
+ goto free_handle;
+ }
+
+ for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ efi_pci_io_protocol_t *pci;
+ unsigned long segment_nr, bus_nr, device_nr, func_nr;
+
+ status = efi_bs_call(handle_protocol, handle, &pci_proto,
+ (void **)&pci);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ /*
+ * Disregard devices living on bus 0 - these are not behind a
+ * bridge so no point in disconnecting them from their drivers.
+ */
+ status = efi_call_proto(pci, get_location, &segment_nr, &bus_nr,
+ &device_nr, &func_nr);
+ if (status != EFI_SUCCESS || bus_nr == 0)
+ continue;
+
+ /*
+ * Don't disconnect VGA controllers so we don't risk losing
+ * access to the framebuffer. Drivers for true PCIe graphics
+ * controllers that are behind a PCIe root port do not use
+ * DMA to implement the GOP framebuffer anyway [although they
+ * may use it in their implementation of Gop->Blt()], and so
+ * disabling DMA in the PCI bridge should not interfere with
+ * normal operation of the device.
+ */
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_CLASS_DEVICE, 1, &class);
+ if (status != EFI_SUCCESS || class == PCI_CLASS_DISPLAY_VGA)
+ continue;
+
+ /* Disconnect this handle from all its drivers */
+ efi_bs_call(disconnect_controller, handle, NULL, NULL);
+ }
+
+ for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ efi_pci_io_protocol_t *pci;
+
+ status = efi_bs_call(handle_protocol, handle, &pci_proto,
+ (void **)&pci);
+ if (status != EFI_SUCCESS || !pci)
+ continue;
+
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_CLASS_DEVICE, 1, &class);
+
+ if (status != EFI_SUCCESS || class != PCI_CLASS_BRIDGE_PCI)
+ continue;
+
+ /* Disable busmastering */
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_COMMAND, 1, &command);
+ if (status != EFI_SUCCESS || !(command & PCI_COMMAND_MASTER))
+ continue;
+
+ command &= ~PCI_COMMAND_MASTER;
+ status = efi_call_proto(pci, pci.write, EfiPciIoWidthUint16,
+ PCI_COMMAND, 1, &command);
+ if (status != EFI_SUCCESS)
+ efi_err("Failed to disable PCI busmastering\n");
+ }
+
+free_handle:
+ efi_bs_call(free_pool, pci_handle);
+}
diff --git a/drivers/firmware/efi/libstub/printk.c b/drivers/firmware/efi/libstub/printk.c
new file mode 100644
index 0000000000..3a67a2cea7
--- /dev/null
+++ b/drivers/firmware/efi/libstub/printk.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/stdarg.h>
+
+#include <linux/ctype.h>
+#include <linux/efi.h>
+#include <linux/kernel.h>
+#include <linux/printk.h> /* For CONSOLE_LOGLEVEL_* */
+#include <asm/efi.h>
+#include <asm/setup.h>
+
+#include "efistub.h"
+
+int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
+
+/**
+ * efi_char16_puts() - Write a UCS-2 encoded string to the console
+ * @str: UCS-2 encoded string
+ */
+void efi_char16_puts(efi_char16_t *str)
+{
+ efi_call_proto(efi_table_attr(efi_system_table, con_out),
+ output_string, str);
+}
+
+static
+u32 utf8_to_utf32(const u8 **s8)
+{
+ u32 c32;
+ u8 c0, cx;
+ size_t clen, i;
+
+ c0 = cx = *(*s8)++;
+ /*
+ * The position of the most-significant 0 bit gives us the length of
+ * a multi-octet encoding.
+ */
+ for (clen = 0; cx & 0x80; ++clen)
+ cx <<= 1;
+ /*
+ * If the 0 bit is in position 8, this is a valid single-octet
+ * encoding. If the 0 bit is in position 7 or positions 1-3, the
+ * encoding is invalid.
+ * In either case, we just return the first octet.
+ */
+ if (clen < 2 || clen > 4)
+ return c0;
+ /* Get the bits from the first octet. */
+ c32 = cx >> clen--;
+ for (i = 0; i < clen; ++i) {
+ /* Trailing octets must have 10 in most significant bits. */
+ cx = (*s8)[i] ^ 0x80;
+ if (cx & 0xc0)
+ return c0;
+ c32 = (c32 << 6) | cx;
+ }
+ /*
+ * Check for validity:
+ * - The character must be in the Unicode range.
+ * - It must not be a surrogate.
+ * - It must be encoded using the correct number of octets.
+ */
+ if (c32 > 0x10ffff ||
+ (c32 & 0xf800) == 0xd800 ||
+ clen != (c32 >= 0x80) + (c32 >= 0x800) + (c32 >= 0x10000))
+ return c0;
+ *s8 += clen;
+ return c32;
+}
+
+/**
+ * efi_puts() - Write a UTF-8 encoded string to the console
+ * @str: UTF-8 encoded string
+ */
+void efi_puts(const char *str)
+{
+ efi_char16_t buf[128];
+ size_t pos = 0, lim = ARRAY_SIZE(buf);
+ const u8 *s8 = (const u8 *)str;
+ u32 c32;
+
+ while (*s8) {
+ if (*s8 == '\n')
+ buf[pos++] = L'\r';
+ c32 = utf8_to_utf32(&s8);
+ if (c32 < 0x10000) {
+ /* Characters in plane 0 use a single word. */
+ buf[pos++] = c32;
+ } else {
+ /*
+ * Characters in other planes encode into a surrogate
+ * pair.
+ */
+ buf[pos++] = (0xd800 - (0x10000 >> 10)) + (c32 >> 10);
+ buf[pos++] = 0xdc00 + (c32 & 0x3ff);
+ }
+ if (*s8 == '\0' || pos >= lim - 2) {
+ buf[pos] = L'\0';
+ efi_char16_puts(buf);
+ pos = 0;
+ }
+ }
+}
+
+/**
+ * efi_printk() - Print a kernel message
+ * @fmt: format string
+ *
+ * The first letter of the format string is used to determine the logging level
+ * of the message. If the level is less then the current EFI logging level, the
+ * message is suppressed. The message will be truncated to 255 bytes.
+ *
+ * Return: number of printed characters
+ */
+int efi_printk(const char *fmt, ...)
+{
+ char printf_buf[256];
+ va_list args;
+ int printed;
+ int loglevel = printk_get_level(fmt);
+
+ switch (loglevel) {
+ case '0' ... '9':
+ loglevel -= '0';
+ break;
+ default:
+ /*
+ * Use loglevel -1 for cases where we just want to print to
+ * the screen.
+ */
+ loglevel = -1;
+ break;
+ }
+
+ if (loglevel >= efi_loglevel)
+ return 0;
+
+ if (loglevel >= 0)
+ efi_puts("EFI stub: ");
+
+ fmt = printk_skip_level(fmt);
+
+ va_start(args, fmt);
+ printed = vsnprintf(printf_buf, sizeof(printf_buf), fmt, args);
+ va_end(args);
+
+ efi_puts(printf_buf);
+ if (printed >= sizeof(printf_buf)) {
+ efi_puts("[Message truncated]\n");
+ return -1;
+ }
+
+ return printed;
+}
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
new file mode 100644
index 0000000000..7109b8a2dc
--- /dev/null
+++ b/drivers/firmware/efi/libstub/random.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Linaro Ltd; <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+typedef union efi_rng_protocol efi_rng_protocol_t;
+
+union efi_rng_protocol {
+ struct {
+ efi_status_t (__efiapi *get_info)(efi_rng_protocol_t *,
+ unsigned long *,
+ efi_guid_t *);
+ efi_status_t (__efiapi *get_rng)(efi_rng_protocol_t *,
+ efi_guid_t *, unsigned long,
+ u8 *out);
+ };
+ struct {
+ u32 get_info;
+ u32 get_rng;
+ } mixed_mode;
+};
+
+/**
+ * efi_get_random_bytes() - fill a buffer with random bytes
+ * @size: size of the buffer
+ * @out: caller allocated buffer to receive the random bytes
+ *
+ * The call will fail if either the firmware does not implement the
+ * EFI_RNG_PROTOCOL or there are not enough random bytes available to fill
+ * the buffer.
+ *
+ * Return: status code
+ */
+efi_status_t efi_get_random_bytes(unsigned long size, u8 *out)
+{
+ efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
+ efi_status_t status;
+ efi_rng_protocol_t *rng = NULL;
+
+ status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ return efi_call_proto(rng, get_rng, NULL, size, out);
+}
+
+/**
+ * efi_random_get_seed() - provide random seed as configuration table
+ *
+ * The EFI_RNG_PROTOCOL is used to read random bytes. These random bytes are
+ * saved as a configuration table which can be used as entropy by the kernel
+ * for the initialization of its pseudo random number generator.
+ *
+ * If the EFI_RNG_PROTOCOL is not available or there are not enough random bytes
+ * available, the configuration table will not be installed and an error code
+ * will be returned.
+ *
+ * Return: status code
+ */
+efi_status_t efi_random_get_seed(void)
+{
+ efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
+ efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW;
+ efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID;
+ struct linux_efi_random_seed *prev_seed, *seed = NULL;
+ int prev_seed_size = 0, seed_size = EFI_RANDOM_SEED_SIZE;
+ unsigned long nv_seed_size = 0, offset = 0;
+ efi_rng_protocol_t *rng = NULL;
+ efi_status_t status;
+
+ status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng);
+ if (status != EFI_SUCCESS)
+ seed_size = 0;
+
+ // Call GetVariable() with a zero length buffer to obtain the size
+ get_efi_var(L"RandomSeed", &rng_table_guid, NULL, &nv_seed_size, NULL);
+ if (!seed_size && !nv_seed_size)
+ return status;
+
+ seed_size += nv_seed_size;
+
+ /*
+ * Check whether a seed was provided by a prior boot stage. In that
+ * case, instead of overwriting it, let's create a new buffer that can
+ * hold both, and concatenate the existing and the new seeds.
+ * Note that we should read the seed size with caution, in case the
+ * table got corrupted in memory somehow.
+ */
+ prev_seed = get_efi_config_table(rng_table_guid);
+ if (prev_seed && prev_seed->size <= 512U) {
+ prev_seed_size = prev_seed->size;
+ seed_size += prev_seed_size;
+ }
+
+ /*
+ * Use EFI_ACPI_RECLAIM_MEMORY here so that it is guaranteed that the
+ * allocation will survive a kexec reboot (although we refresh the seed
+ * beforehand)
+ */
+ status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
+ struct_size(seed, bits, seed_size),
+ (void **)&seed);
+ if (status != EFI_SUCCESS) {
+ efi_warn("Failed to allocate memory for RNG seed.\n");
+ goto err_warn;
+ }
+
+ if (rng) {
+ status = efi_call_proto(rng, get_rng, &rng_algo_raw,
+ EFI_RANDOM_SEED_SIZE, seed->bits);
+
+ if (status == EFI_UNSUPPORTED)
+ /*
+ * Use whatever algorithm we have available if the raw algorithm
+ * is not implemented.
+ */
+ status = efi_call_proto(rng, get_rng, NULL,
+ EFI_RANDOM_SEED_SIZE, seed->bits);
+
+ if (status == EFI_SUCCESS)
+ offset = EFI_RANDOM_SEED_SIZE;
+ }
+
+ if (nv_seed_size) {
+ status = get_efi_var(L"RandomSeed", &rng_table_guid, NULL,
+ &nv_seed_size, seed->bits + offset);
+
+ if (status == EFI_SUCCESS)
+ /*
+ * We delete the seed here, and /hope/ that this causes
+ * EFI to also zero out its representation on disk.
+ * This is somewhat idealistic, but overwriting the
+ * variable with zeros is likely just as fraught too.
+ * TODO: in the future, maybe we can hash it forward
+ * instead, and write a new seed.
+ */
+ status = set_efi_var(L"RandomSeed", &rng_table_guid, 0,
+ 0, NULL);
+
+ if (status == EFI_SUCCESS)
+ offset += nv_seed_size;
+ else
+ memzero_explicit(seed->bits + offset, nv_seed_size);
+ }
+
+ if (!offset)
+ goto err_freepool;
+
+ if (prev_seed_size) {
+ memcpy(seed->bits + offset, prev_seed->bits, prev_seed_size);
+ offset += prev_seed_size;
+ }
+
+ seed->size = offset;
+ status = efi_bs_call(install_configuration_table, &rng_table_guid, seed);
+ if (status != EFI_SUCCESS)
+ goto err_freepool;
+
+ if (prev_seed_size) {
+ /* wipe and free the old seed if we managed to install the new one */
+ memzero_explicit(prev_seed->bits, prev_seed_size);
+ efi_bs_call(free_pool, prev_seed);
+ }
+ return EFI_SUCCESS;
+
+err_freepool:
+ memzero_explicit(seed, struct_size(seed, bits, seed_size));
+ efi_bs_call(free_pool, seed);
+ efi_warn("Failed to obtain seed from EFI_RNG_PROTOCOL or EFI variable\n");
+err_warn:
+ if (prev_seed)
+ efi_warn("Retaining bootloader-supplied seed only");
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
new file mode 100644
index 0000000000..674a064b8f
--- /dev/null
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Linaro Ltd; <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/efi.h>
+#include <linux/log2.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/*
+ * Return the number of slots covered by this entry, i.e., the number of
+ * addresses it covers that are suitably aligned and supply enough room
+ * for the allocation.
+ */
+static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+ unsigned long size,
+ unsigned long align_shift,
+ u64 alloc_limit)
+{
+ unsigned long align = 1UL << align_shift;
+ u64 first_slot, last_slot, region_end;
+
+ if (md->type != EFI_CONVENTIONAL_MEMORY)
+ return 0;
+
+ if (efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ return 0;
+
+ region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
+ alloc_limit);
+ if (region_end < size)
+ return 0;
+
+ first_slot = round_up(md->phys_addr, align);
+ last_slot = round_down(region_end - size + 1, align);
+
+ if (first_slot > last_slot)
+ return 0;
+
+ return ((unsigned long)(last_slot - first_slot) >> align_shift) + 1;
+}
+
+/*
+ * The UEFI memory descriptors have a virtual address field that is only used
+ * when installing the virtual mapping using SetVirtualAddressMap(). Since it
+ * is unused here, we can reuse it to keep track of each descriptor's slot
+ * count.
+ */
+#define MD_NUM_SLOTS(md) ((md)->virt_addr)
+
+efi_status_t efi_random_alloc(unsigned long size,
+ unsigned long align,
+ unsigned long *addr,
+ unsigned long random_seed,
+ int memory_type,
+ unsigned long alloc_limit)
+{
+ unsigned long total_slots = 0, target_slot;
+ unsigned long total_mirrored_slots = 0;
+ struct efi_boot_memmap *map;
+ efi_status_t status;
+ int map_offset;
+
+ status = efi_get_memory_map(&map, false);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
+
+ size = round_up(size, EFI_ALLOC_ALIGN);
+
+ /* count the suitable slots in each memory map entry */
+ for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
+ efi_memory_desc_t *md = (void *)map->map + map_offset;
+ unsigned long slots;
+
+ slots = get_entry_num_slots(md, size, ilog2(align), alloc_limit);
+ MD_NUM_SLOTS(md) = slots;
+ total_slots += slots;
+ if (md->attribute & EFI_MEMORY_MORE_RELIABLE)
+ total_mirrored_slots += slots;
+ }
+
+ /* consider only mirrored slots for randomization if any exist */
+ if (total_mirrored_slots > 0)
+ total_slots = total_mirrored_slots;
+
+ /* find a random number between 0 and total_slots */
+ target_slot = (total_slots * (u64)(random_seed & U32_MAX)) >> 32;
+
+ /*
+ * target_slot is now a value in the range [0, total_slots), and so
+ * it corresponds with exactly one of the suitable slots we recorded
+ * when iterating over the memory map the first time around.
+ *
+ * So iterate over the memory map again, subtracting the number of
+ * slots of each entry at each iteration, until we have found the entry
+ * that covers our chosen slot. Use the residual value of target_slot
+ * to calculate the randomly chosen address, and allocate it directly
+ * using EFI_ALLOCATE_ADDRESS.
+ */
+ status = EFI_OUT_OF_RESOURCES;
+ for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
+ efi_memory_desc_t *md = (void *)map->map + map_offset;
+ efi_physical_addr_t target;
+ unsigned long pages;
+
+ if (total_mirrored_slots > 0 &&
+ !(md->attribute & EFI_MEMORY_MORE_RELIABLE))
+ continue;
+
+ if (target_slot >= MD_NUM_SLOTS(md)) {
+ target_slot -= MD_NUM_SLOTS(md);
+ continue;
+ }
+
+ target = round_up(md->phys_addr, align) + target_slot * align;
+ pages = size / EFI_PAGE_SIZE;
+
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ memory_type, pages, &target);
+ if (status == EFI_SUCCESS)
+ *addr = target;
+ break;
+ }
+
+ efi_bs_call(free_pool, map);
+
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
new file mode 100644
index 0000000000..bf6fbd5d22
--- /dev/null
+++ b/drivers/firmware/efi/libstub/relocate.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/**
+ * efi_low_alloc_above() - allocate pages at or above given address
+ * @size: size of the memory area to allocate
+ * @align: minimum alignment of the allocated memory area. It should
+ * a power of two.
+ * @addr: on exit the address of the allocated memory
+ * @min: minimum address to used for the memory allocation
+ *
+ * Allocate at the lowest possible address that is not below @min as
+ * EFI_LOADER_DATA. The allocated pages are aligned according to @align but at
+ * least EFI_ALLOC_ALIGN. The first allocated page will not below the address
+ * given by @min.
+ *
+ * Return: status code
+ */
+efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long min)
+{
+ struct efi_boot_memmap *map;
+ efi_status_t status;
+ unsigned long nr_pages;
+ int i;
+
+ status = efi_get_memory_map(&map, false);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ /*
+ * Enforce minimum alignment that EFI or Linux requires when
+ * requesting a specific address. We are doing page-based (or
+ * larger) allocations, and both the address and size must meet
+ * alignment constraints.
+ */
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
+
+ size = round_up(size, EFI_ALLOC_ALIGN);
+ nr_pages = size / EFI_PAGE_SIZE;
+ for (i = 0; i < map->map_size / map->desc_size; i++) {
+ efi_memory_desc_t *desc;
+ unsigned long m = (unsigned long)map->map;
+ u64 start, end;
+
+ desc = efi_early_memdesc_ptr(m, map->desc_size, i);
+
+ if (desc->type != EFI_CONVENTIONAL_MEMORY)
+ continue;
+
+ if (efi_soft_reserve_enabled() &&
+ (desc->attribute & EFI_MEMORY_SP))
+ continue;
+
+ if (desc->num_pages < nr_pages)
+ continue;
+
+ start = desc->phys_addr;
+ end = start + desc->num_pages * EFI_PAGE_SIZE;
+
+ if (start < min)
+ start = min;
+
+ start = round_up(start, align);
+ if ((start + size) > end)
+ continue;
+
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &start);
+ if (status == EFI_SUCCESS) {
+ *addr = start;
+ break;
+ }
+ }
+
+ if (i == map->map_size / map->desc_size)
+ status = EFI_NOT_FOUND;
+
+ efi_bs_call(free_pool, map);
+fail:
+ return status;
+}
+
+/**
+ * efi_relocate_kernel() - copy memory area
+ * @image_addr: pointer to address of memory area to copy
+ * @image_size: size of memory area to copy
+ * @alloc_size: minimum size of memory to allocate, must be greater or
+ * equal to image_size
+ * @preferred_addr: preferred target address
+ * @alignment: minimum alignment of the allocated memory area. It
+ * should be a power of two.
+ * @min_addr: minimum target address
+ *
+ * Copy a memory area to a newly allocated memory area aligned according
+ * to @alignment but at least EFI_ALLOC_ALIGN. If the preferred address
+ * is not available, the allocated address will not be below @min_addr.
+ * On exit, @image_addr is updated to the target copy address that was used.
+ *
+ * This function is used to copy the Linux kernel verbatim. It does not apply
+ * any relocation changes.
+ *
+ * Return: status code
+ */
+efi_status_t efi_relocate_kernel(unsigned long *image_addr,
+ unsigned long image_size,
+ unsigned long alloc_size,
+ unsigned long preferred_addr,
+ unsigned long alignment,
+ unsigned long min_addr)
+{
+ unsigned long cur_image_addr;
+ unsigned long new_addr = 0;
+ efi_status_t status;
+ unsigned long nr_pages;
+ efi_physical_addr_t efi_addr = preferred_addr;
+
+ if (!image_addr || !image_size || !alloc_size)
+ return EFI_INVALID_PARAMETER;
+ if (alloc_size < image_size)
+ return EFI_INVALID_PARAMETER;
+
+ cur_image_addr = *image_addr;
+
+ /*
+ * The EFI firmware loader could have placed the kernel image
+ * anywhere in memory, but the kernel has restrictions on the
+ * max physical address it can run at. Some architectures
+ * also have a preferred address, so first try to relocate
+ * to the preferred address. If that fails, allocate as low
+ * as possible while respecting the required alignment.
+ */
+ nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &efi_addr);
+ new_addr = efi_addr;
+ /*
+ * If preferred address allocation failed allocate as low as
+ * possible.
+ */
+ if (status != EFI_SUCCESS) {
+ status = efi_low_alloc_above(alloc_size, alignment, &new_addr,
+ min_addr);
+ }
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate usable memory for kernel.\n");
+ return status;
+ }
+
+ /*
+ * We know source/dest won't overlap since both memory ranges
+ * have been allocated by UEFI, so we can safely use memcpy.
+ */
+ memcpy((void *)new_addr, (void *)cur_image_addr, image_size);
+
+ /* Return the new address of the relocated image. */
+ *image_addr = new_addr;
+
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c
new file mode 100644
index 0000000000..c96d6dcee8
--- /dev/null
+++ b/drivers/firmware/efi/libstub/riscv-stub.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/efi.h>
+
+#include <asm/efi.h>
+#include <asm/sections.h>
+#include <asm/unaligned.h>
+
+#include "efistub.h"
+
+unsigned long stext_offset(void)
+{
+ /*
+ * When built as part of the kernel, the EFI stub cannot branch to the
+ * kernel proper via the image header, as the PE/COFF header is
+ * strictly not part of the in-memory presentation of the image, only
+ * of the file representation. So instead, we need to jump to the
+ * actual entrypoint in the .text region of the image.
+ */
+ return _start_kernel - _start;
+}
+
+efi_status_t handle_kernel_image(unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ efi_loaded_image_t *image,
+ efi_handle_t image_handle)
+{
+ unsigned long kernel_size, kernel_codesize, kernel_memsize;
+ efi_status_t status;
+
+ kernel_size = _edata - _start;
+ kernel_codesize = __init_text_end - _start;
+ kernel_memsize = kernel_size + (_end - _edata);
+ *image_addr = (unsigned long)_start;
+ *image_size = kernel_memsize;
+ *reserve_size = *image_size;
+
+ status = efi_kaslr_relocate_kernel(image_addr,
+ reserve_addr, reserve_size,
+ kernel_size, kernel_codesize, kernel_memsize,
+ efi_kaslr_get_phys_seed(image_handle));
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to relocate kernel\n");
+ *image_size = 0;
+ }
+
+ return status;
+}
+
+void efi_icache_sync(unsigned long start, unsigned long end)
+{
+ asm volatile ("fence.i" ::: "memory");
+}
diff --git a/drivers/firmware/efi/libstub/riscv.c b/drivers/firmware/efi/libstub/riscv.c
new file mode 100644
index 0000000000..8022b104c3
--- /dev/null
+++ b/drivers/firmware/efi/libstub/riscv.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/efi.h>
+#include <linux/libfdt.h>
+
+#include <asm/efi.h>
+#include <asm/unaligned.h>
+
+#include "efistub.h"
+
+typedef void __noreturn (*jump_kernel_func)(unsigned long, unsigned long);
+
+static unsigned long hartid;
+
+static int get_boot_hartid_from_fdt(void)
+{
+ const void *fdt;
+ int chosen_node, len;
+ const void *prop;
+
+ fdt = get_efi_config_table(DEVICE_TREE_GUID);
+ if (!fdt)
+ return -EINVAL;
+
+ chosen_node = fdt_path_offset(fdt, "/chosen");
+ if (chosen_node < 0)
+ return -EINVAL;
+
+ prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len);
+ if (!prop)
+ return -EINVAL;
+
+ if (len == sizeof(u32))
+ hartid = (unsigned long) fdt32_to_cpu(*(fdt32_t *)prop);
+ else if (len == sizeof(u64))
+ hartid = (unsigned long) fdt64_to_cpu(__get_unaligned_t(fdt64_t, prop));
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static efi_status_t get_boot_hartid_from_efi(void)
+{
+ efi_guid_t boot_protocol_guid = RISCV_EFI_BOOT_PROTOCOL_GUID;
+ struct riscv_efi_boot_protocol *boot_protocol;
+ efi_status_t status;
+
+ status = efi_bs_call(locate_protocol, &boot_protocol_guid, NULL,
+ (void **)&boot_protocol);
+ if (status != EFI_SUCCESS)
+ return status;
+ return efi_call_proto(boot_protocol, get_boot_hartid, &hartid);
+}
+
+efi_status_t check_platform_features(void)
+{
+ efi_status_t status;
+ int ret;
+
+ status = get_boot_hartid_from_efi();
+ if (status != EFI_SUCCESS) {
+ ret = get_boot_hartid_from_fdt();
+ if (ret) {
+ efi_err("Failed to get boot hartid!\n");
+ return EFI_UNSUPPORTED;
+ }
+ }
+ return EFI_SUCCESS;
+}
+
+unsigned long __weak stext_offset(void)
+{
+ /*
+ * This fallback definition is used by the EFI zboot stub, which loads
+ * the entire image so it can branch via the image header at offset #0.
+ */
+ return 0;
+}
+
+void __noreturn efi_enter_kernel(unsigned long entrypoint, unsigned long fdt,
+ unsigned long fdt_size)
+{
+ unsigned long kernel_entry = entrypoint + stext_offset();
+ jump_kernel_func jump_kernel = (jump_kernel_func)kernel_entry;
+
+ /*
+ * Jump to real kernel here with following constraints.
+ * 1. MMU should be disabled.
+ * 2. a0 should contain hartid
+ * 3. a1 should DT address
+ */
+ csr_write(CSR_SATP, 0);
+ jump_kernel(hartid, fdt);
+}
diff --git a/drivers/firmware/efi/libstub/screen_info.c b/drivers/firmware/efi/libstub/screen_info.c
new file mode 100644
index 0000000000..a51ec201ca
--- /dev/null
+++ b/drivers/firmware/efi/libstub/screen_info.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <linux/screen_info.h>
+
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/*
+ * There are two ways of populating the core kernel's struct screen_info via the stub:
+ * - using a configuration table, like below, which relies on the EFI init code
+ * to locate the table and copy the contents;
+ * - by linking directly to the core kernel's copy of the global symbol.
+ *
+ * The latter is preferred because it makes the EFIFB earlycon available very
+ * early, but it only works if the EFI stub is part of the core kernel image
+ * itself. The zboot decompressor can only use the configuration table
+ * approach.
+ */
+
+static efi_guid_t screen_info_guid = LINUX_EFI_SCREEN_INFO_TABLE_GUID;
+
+struct screen_info *__alloc_screen_info(void)
+{
+ struct screen_info *si;
+ efi_status_t status;
+
+ status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
+ sizeof(*si), (void **)&si);
+
+ if (status != EFI_SUCCESS)
+ return NULL;
+
+ status = efi_bs_call(install_configuration_table,
+ &screen_info_guid, si);
+ if (status == EFI_SUCCESS)
+ return si;
+
+ efi_bs_call(free_pool, si);
+ return NULL;
+}
+
+void free_screen_info(struct screen_info *si)
+{
+ if (!si)
+ return;
+
+ efi_bs_call(install_configuration_table, &screen_info_guid, NULL);
+ efi_bs_call(free_pool, si);
+}
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
new file mode 100644
index 0000000000..516f4f0069
--- /dev/null
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Secure boot handling.
+ *
+ * Copyright (C) 2013,2014 Linaro Limited
+ * Roy Franz <roy.franz@linaro.org
+ * Copyright (C) 2013 Red Hat, Inc.
+ * Mark Salter <msalter@redhat.com>
+ */
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/* SHIM variables */
+static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
+static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT";
+
+static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
+ unsigned long *data_size, void *data)
+{
+ return get_efi_var(name, vendor, attr, data_size, data);
+}
+
+/*
+ * Determine whether we're in secure boot mode.
+ */
+enum efi_secureboot_mode efi_get_secureboot(void)
+{
+ u32 attr;
+ unsigned long size;
+ enum efi_secureboot_mode mode;
+ efi_status_t status;
+ u8 moksbstate;
+
+ mode = efi_get_secureboot_mode(get_var);
+ if (mode == efi_secureboot_mode_unknown) {
+ efi_err("Could not determine UEFI Secure Boot status.\n");
+ return efi_secureboot_mode_unknown;
+ }
+ if (mode != efi_secureboot_mode_enabled)
+ return mode;
+
+ /*
+ * See if a user has put the shim into insecure mode. If so, and if the
+ * variable doesn't have the non-volatile attribute set, we might as
+ * well honor that.
+ */
+ size = sizeof(moksbstate);
+ status = get_efi_var(shim_MokSBState_name, &shim_guid,
+ &attr, &size, &moksbstate);
+
+ /* If it fails, we don't care why. Default to secure */
+ if (status != EFI_SUCCESS)
+ goto secure_boot_enabled;
+ if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1)
+ return efi_secureboot_mode_disabled;
+
+secure_boot_enabled:
+ efi_info("UEFI Secure Boot is enabled.\n");
+ return efi_secureboot_mode_enabled;
+}
diff --git a/drivers/firmware/efi/libstub/skip_spaces.c b/drivers/firmware/efi/libstub/skip_spaces.c
new file mode 100644
index 0000000000..159fb4e456
--- /dev/null
+++ b/drivers/firmware/efi/libstub/skip_spaces.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+char *skip_spaces(const char *str)
+{
+ while (isspace(*str))
+ ++str;
+ return (char *)str;
+}
diff --git a/drivers/firmware/efi/libstub/smbios.c b/drivers/firmware/efi/libstub/smbios.c
new file mode 100644
index 0000000000..c217de2cc8
--- /dev/null
+++ b/drivers/firmware/efi/libstub/smbios.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2022 Google LLC
+// Author: Ard Biesheuvel <ardb@google.com>
+
+#include <linux/efi.h>
+
+#include "efistub.h"
+
+typedef struct efi_smbios_protocol efi_smbios_protocol_t;
+
+struct efi_smbios_protocol {
+ efi_status_t (__efiapi *add)(efi_smbios_protocol_t *, efi_handle_t,
+ u16 *, struct efi_smbios_record *);
+ efi_status_t (__efiapi *update_string)(efi_smbios_protocol_t *, u16 *,
+ unsigned long *, u8 *);
+ efi_status_t (__efiapi *remove)(efi_smbios_protocol_t *, u16);
+ efi_status_t (__efiapi *get_next)(efi_smbios_protocol_t *, u16 *, u8 *,
+ struct efi_smbios_record **,
+ efi_handle_t *);
+
+ u8 major_version;
+ u8 minor_version;
+};
+
+const struct efi_smbios_record *efi_get_smbios_record(u8 type)
+{
+ struct efi_smbios_record *record;
+ efi_smbios_protocol_t *smbios;
+ efi_status_t status;
+ u16 handle = 0xfffe;
+
+ status = efi_bs_call(locate_protocol, &EFI_SMBIOS_PROTOCOL_GUID, NULL,
+ (void **)&smbios) ?:
+ efi_call_proto(smbios, get_next, &handle, &type, &record, NULL);
+ if (status != EFI_SUCCESS)
+ return NULL;
+ return record;
+}
+
+const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
+ u8 type, int offset)
+{
+ const u8 *strtable;
+
+ if (!record)
+ return NULL;
+
+ strtable = (u8 *)record + record->length;
+ for (int i = 1; i < ((u8 *)record)[offset]; i++) {
+ int len = strlen(strtable);
+
+ if (!len)
+ return NULL;
+ strtable += len + 1;
+ }
+ return strtable;
+}
diff --git a/drivers/firmware/efi/libstub/string.c b/drivers/firmware/efi/libstub/string.c
new file mode 100644
index 0000000000..168fe8e79a
--- /dev/null
+++ b/drivers/firmware/efi/libstub/string.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Taken from:
+ * linux/lib/string.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+#ifndef EFI_HAVE_STRLEN
+/**
+ * strlen - Find the length of a string
+ * @s: The string to be sized
+ */
+size_t strlen(const char *s)
+{
+ const char *sc;
+
+ for (sc = s; *sc != '\0'; ++sc)
+ /* nothing */;
+ return sc - s;
+}
+#endif
+
+#ifndef EFI_HAVE_STRNLEN
+/**
+ * strnlen - Find the length of a length-limited string
+ * @s: The string to be sized
+ * @count: The maximum number of bytes to search
+ */
+size_t strnlen(const char *s, size_t count)
+{
+ const char *sc;
+
+ for (sc = s; count-- && *sc != '\0'; ++sc)
+ /* nothing */;
+ return sc - s;
+}
+#endif
+
+/**
+ * strstr - Find the first substring in a %NUL terminated string
+ * @s1: The string to be searched
+ * @s2: The string to search for
+ */
+char *strstr(const char *s1, const char *s2)
+{
+ size_t l1, l2;
+
+ l2 = strlen(s2);
+ if (!l2)
+ return (char *)s1;
+ l1 = strlen(s1);
+ while (l1 >= l2) {
+ l1--;
+ if (!memcmp(s1, s2, l2))
+ return (char *)s1;
+ s1++;
+ }
+ return NULL;
+}
+
+#ifndef EFI_HAVE_STRCMP
+/**
+ * strcmp - Compare two strings
+ * @cs: One string
+ * @ct: Another string
+ */
+int strcmp(const char *cs, const char *ct)
+{
+ unsigned char c1, c2;
+
+ while (1) {
+ c1 = *cs++;
+ c2 = *ct++;
+ if (c1 != c2)
+ return c1 < c2 ? -1 : 1;
+ if (!c1)
+ break;
+ }
+ return 0;
+}
+#endif
+
+/**
+ * strncmp - Compare two length-limited strings
+ * @cs: One string
+ * @ct: Another string
+ * @count: The maximum number of bytes to compare
+ */
+int strncmp(const char *cs, const char *ct, size_t count)
+{
+ unsigned char c1, c2;
+
+ while (count) {
+ c1 = *cs++;
+ c2 = *ct++;
+ if (c1 != c2)
+ return c1 < c2 ? -1 : 1;
+ if (!c1)
+ break;
+ count--;
+ }
+ return 0;
+}
+
+/* Works only for digits and letters, but small and fast */
+#define TOLOWER(x) ((x) | 0x20)
+
+static unsigned int simple_guess_base(const char *cp)
+{
+ if (cp[0] == '0') {
+ if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2]))
+ return 16;
+ else
+ return 8;
+ } else {
+ return 10;
+ }
+}
+
+/**
+ * simple_strtoull - convert a string to an unsigned long long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
+
+unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
+{
+ unsigned long long result = 0;
+
+ if (!base)
+ base = simple_guess_base(cp);
+
+ if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
+ cp += 2;
+
+ while (isxdigit(*cp)) {
+ unsigned int value;
+
+ value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
+ if (value >= base)
+ break;
+ result = result * base + value;
+ cp++;
+ }
+ if (endp)
+ *endp = (char *)cp;
+
+ return result;
+}
+
+long simple_strtol(const char *cp, char **endp, unsigned int base)
+{
+ if (*cp == '-')
+ return -simple_strtoull(cp + 1, endp, base);
+
+ return simple_strtoull(cp, endp, base);
+}
+
+#ifdef CONFIG_EFI_PARAMS_FROM_FDT
+#ifndef EFI_HAVE_STRRCHR
+/**
+ * strrchr - Find the last occurrence of a character in a string
+ * @s: The string to be searched
+ * @c: The character to search for
+ */
+char *strrchr(const char *s, int c)
+{
+ const char *last = NULL;
+ do {
+ if (*s == (char)c)
+ last = s;
+ } while (*s++);
+ return (char *)last;
+}
+#endif
+#ifndef EFI_HAVE_MEMCHR
+/**
+ * memchr - Find a character in an area of memory.
+ * @s: The memory area
+ * @c: The byte to search for
+ * @n: The size of the area.
+ *
+ * returns the address of the first occurrence of @c, or %NULL
+ * if @c is not found
+ */
+void *memchr(const void *s, int c, size_t n)
+{
+ const unsigned char *p = s;
+ while (n-- != 0) {
+ if ((unsigned char)c == *p++) {
+ return (void *)(p - 1);
+ }
+ }
+ return NULL;
+}
+#endif
+#endif
diff --git a/drivers/firmware/efi/libstub/systable.c b/drivers/firmware/efi/libstub/systable.c
new file mode 100644
index 0000000000..91d016b02f
--- /dev/null
+++ b/drivers/firmware/efi/libstub/systable.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+const efi_system_table_t *efi_system_table;
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
new file mode 100644
index 0000000000..7acbac16ea
--- /dev/null
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TPM handling.
+ *
+ * Copyright (C) 2016 CoreOS, Inc
+ * Copyright (C) 2017 Google, Inc.
+ * Matthew Garrett <mjg59@google.com>
+ * Thiebaud Weksteen <tweek@google.com>
+ */
+#include <linux/efi.h>
+#include <linux/tpm_eventlog.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+#ifdef CONFIG_RESET_ATTACK_MITIGATION
+static const efi_char16_t efi_MemoryOverWriteRequest_name[] =
+ L"MemoryOverwriteRequestControl";
+
+#define MEMORY_ONLY_RESET_CONTROL_GUID \
+ EFI_GUID(0xe20939be, 0x32d4, 0x41be, 0xa1, 0x50, 0x89, 0x7f, 0x85, 0xd4, 0x98, 0x29)
+
+/*
+ * Enable reboot attack mitigation. This requests that the firmware clear the
+ * RAM on next reboot before proceeding with boot, ensuring that any secrets
+ * are cleared. If userland has ensured that all secrets have been removed
+ * from RAM before reboot it can simply reset this variable.
+ */
+void efi_enable_reset_attack_mitigation(void)
+{
+ u8 val = 1;
+ efi_guid_t var_guid = MEMORY_ONLY_RESET_CONTROL_GUID;
+ efi_status_t status;
+ unsigned long datasize = 0;
+
+ status = get_efi_var(efi_MemoryOverWriteRequest_name, &var_guid,
+ NULL, &datasize, NULL);
+
+ if (status == EFI_NOT_FOUND)
+ return;
+
+ set_efi_var(efi_MemoryOverWriteRequest_name, &var_guid,
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS, sizeof(val), &val);
+}
+
+#endif
+
+void efi_retrieve_tpm2_eventlog(void)
+{
+ efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
+ efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
+ efi_status_t status;
+ efi_physical_addr_t log_location = 0, log_last_entry = 0;
+ struct linux_efi_tpm_eventlog *log_tbl = NULL;
+ struct efi_tcg2_final_events_table *final_events_table = NULL;
+ unsigned long first_entry_addr, last_entry_addr;
+ size_t log_size, last_entry_size;
+ efi_bool_t truncated;
+ int version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
+ efi_tcg2_protocol_t *tcg2_protocol = NULL;
+ int final_events_size = 0;
+
+ status = efi_bs_call(locate_protocol, &tcg2_guid, NULL,
+ (void **)&tcg2_protocol);
+ if (status != EFI_SUCCESS)
+ return;
+
+ status = efi_call_proto(tcg2_protocol, get_event_log, version,
+ &log_location, &log_last_entry, &truncated);
+
+ if (status != EFI_SUCCESS || !log_location) {
+ version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+ status = efi_call_proto(tcg2_protocol, get_event_log, version,
+ &log_location, &log_last_entry,
+ &truncated);
+ if (status != EFI_SUCCESS || !log_location)
+ return;
+
+ }
+
+ first_entry_addr = (unsigned long) log_location;
+
+ /*
+ * We populate the EFI table even if the logs are empty.
+ */
+ if (!log_last_entry) {
+ log_size = 0;
+ } else {
+ last_entry_addr = (unsigned long) log_last_entry;
+ /*
+ * get_event_log only returns the address of the last entry.
+ * We need to calculate its size to deduce the full size of
+ * the logs.
+ */
+ if (version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
+ /*
+ * The TCG2 log format has variable length entries,
+ * and the information to decode the hash algorithms
+ * back into a size is contained in the first entry -
+ * pass a pointer to the final entry (to calculate its
+ * size) and the first entry (so we know how long each
+ * digest is)
+ */
+ last_entry_size =
+ __calc_tpm2_event_size((void *)last_entry_addr,
+ (void *)(long)log_location,
+ false);
+ } else {
+ last_entry_size = sizeof(struct tcpa_event) +
+ ((struct tcpa_event *) last_entry_addr)->event_size;
+ }
+ log_size = log_last_entry - log_location + last_entry_size;
+ }
+
+ /* Allocate space for the logs and copy them. */
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ sizeof(*log_tbl) + log_size, (void **)&log_tbl);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Unable to allocate memory for event log\n");
+ return;
+ }
+
+ /*
+ * Figure out whether any events have already been logged to the
+ * final events structure, and if so how much space they take up
+ */
+ if (version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+ final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID);
+ if (final_events_table && final_events_table->nr_events) {
+ struct tcg_pcr_event2_head *header;
+ int offset;
+ void *data;
+ int event_size;
+ int i = final_events_table->nr_events;
+
+ data = (void *)final_events_table;
+ offset = sizeof(final_events_table->version) +
+ sizeof(final_events_table->nr_events);
+
+ while (i > 0) {
+ header = data + offset + final_events_size;
+ event_size = __calc_tpm2_event_size(header,
+ (void *)(long)log_location,
+ false);
+ final_events_size += event_size;
+ i--;
+ }
+ }
+
+ memset(log_tbl, 0, sizeof(*log_tbl) + log_size);
+ log_tbl->size = log_size;
+ log_tbl->final_events_preboot_size = final_events_size;
+ log_tbl->version = version;
+ memcpy(log_tbl->log, (void *) first_entry_addr, log_size);
+
+ status = efi_bs_call(install_configuration_table,
+ &linux_eventlog_guid, log_tbl);
+ if (status != EFI_SUCCESS)
+ goto err_free;
+ return;
+
+err_free:
+ efi_bs_call(free_pool, log_tbl);
+}
diff --git a/drivers/firmware/efi/libstub/unaccepted_memory.c b/drivers/firmware/efi/libstub/unaccepted_memory.c
new file mode 100644
index 0000000000..9a655f30ba
--- /dev/null
+++ b/drivers/firmware/efi/libstub/unaccepted_memory.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+#include "efistub.h"
+
+struct efi_unaccepted_memory *unaccepted_table;
+
+efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc,
+ struct efi_boot_memmap *map)
+{
+ efi_guid_t unaccepted_table_guid = LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID;
+ u64 unaccepted_start = ULLONG_MAX, unaccepted_end = 0, bitmap_size;
+ efi_status_t status;
+ int i;
+
+ /* Check if the table is already installed */
+ unaccepted_table = get_efi_config_table(unaccepted_table_guid);
+ if (unaccepted_table) {
+ if (unaccepted_table->version != 1) {
+ efi_err("Unknown version of unaccepted memory table\n");
+ return EFI_UNSUPPORTED;
+ }
+ return EFI_SUCCESS;
+ }
+
+ /* Check if there's any unaccepted memory and find the max address */
+ for (i = 0; i < nr_desc; i++) {
+ efi_memory_desc_t *d;
+ unsigned long m = (unsigned long)map->map;
+
+ d = efi_early_memdesc_ptr(m, map->desc_size, i);
+ if (d->type != EFI_UNACCEPTED_MEMORY)
+ continue;
+
+ unaccepted_start = min(unaccepted_start, d->phys_addr);
+ unaccepted_end = max(unaccepted_end,
+ d->phys_addr + d->num_pages * PAGE_SIZE);
+ }
+
+ if (unaccepted_start == ULLONG_MAX)
+ return EFI_SUCCESS;
+
+ unaccepted_start = round_down(unaccepted_start,
+ EFI_UNACCEPTED_UNIT_SIZE);
+ unaccepted_end = round_up(unaccepted_end, EFI_UNACCEPTED_UNIT_SIZE);
+
+ /*
+ * If unaccepted memory is present, allocate a bitmap to track what
+ * memory has to be accepted before access.
+ *
+ * One bit in the bitmap represents 2MiB in the address space:
+ * A 4k bitmap can track 64GiB of physical address space.
+ *
+ * In the worst case scenario -- a huge hole in the middle of the
+ * address space -- It needs 256MiB to handle 4PiB of the address
+ * space.
+ *
+ * The bitmap will be populated in setup_e820() according to the memory
+ * map after efi_exit_boot_services().
+ */
+ bitmap_size = DIV_ROUND_UP(unaccepted_end - unaccepted_start,
+ EFI_UNACCEPTED_UNIT_SIZE * BITS_PER_BYTE);
+
+ status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
+ sizeof(*unaccepted_table) + bitmap_size,
+ (void **)&unaccepted_table);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate unaccepted memory config table\n");
+ return status;
+ }
+
+ unaccepted_table->version = 1;
+ unaccepted_table->unit_size = EFI_UNACCEPTED_UNIT_SIZE;
+ unaccepted_table->phys_base = unaccepted_start;
+ unaccepted_table->size = bitmap_size;
+ memset(unaccepted_table->bitmap, 0, bitmap_size);
+
+ status = efi_bs_call(install_configuration_table,
+ &unaccepted_table_guid, unaccepted_table);
+ if (status != EFI_SUCCESS) {
+ efi_bs_call(free_pool, unaccepted_table);
+ efi_err("Failed to install unaccepted memory config table!\n");
+ }
+
+ return status;
+}
+
+/*
+ * The accepted memory bitmap only works at unit_size granularity. Take
+ * unaligned start/end addresses and either:
+ * 1. Accepts the memory immediately and in its entirety
+ * 2. Accepts unaligned parts, and marks *some* aligned part unaccepted
+ *
+ * The function will never reach the bitmap_set() with zero bits to set.
+ */
+void process_unaccepted_memory(u64 start, u64 end)
+{
+ u64 unit_size = unaccepted_table->unit_size;
+ u64 unit_mask = unaccepted_table->unit_size - 1;
+ u64 bitmap_size = unaccepted_table->size;
+
+ /*
+ * Ensure that at least one bit will be set in the bitmap by
+ * immediately accepting all regions under 2*unit_size. This is
+ * imprecise and may immediately accept some areas that could
+ * have been represented in the bitmap. But, results in simpler
+ * code below
+ *
+ * Consider case like this (assuming unit_size == 2MB):
+ *
+ * | 4k | 2044k | 2048k |
+ * ^ 0x0 ^ 2MB ^ 4MB
+ *
+ * Only the first 4k has been accepted. The 0MB->2MB region can not be
+ * represented in the bitmap. The 2MB->4MB region can be represented in
+ * the bitmap. But, the 0MB->4MB region is <2*unit_size and will be
+ * immediately accepted in its entirety.
+ */
+ if (end - start < 2 * unit_size) {
+ arch_accept_memory(start, end);
+ return;
+ }
+
+ /*
+ * No matter how the start and end are aligned, at least one unaccepted
+ * unit_size area will remain to be marked in the bitmap.
+ */
+
+ /* Immediately accept a <unit_size piece at the start: */
+ if (start & unit_mask) {
+ arch_accept_memory(start, round_up(start, unit_size));
+ start = round_up(start, unit_size);
+ }
+
+ /* Immediately accept a <unit_size piece at the end: */
+ if (end & unit_mask) {
+ arch_accept_memory(round_down(end, unit_size), end);
+ end = round_down(end, unit_size);
+ }
+
+ /*
+ * Accept part of the range that before phys_base and cannot be recorded
+ * into the bitmap.
+ */
+ if (start < unaccepted_table->phys_base) {
+ arch_accept_memory(start,
+ min(unaccepted_table->phys_base, end));
+ start = unaccepted_table->phys_base;
+ }
+
+ /* Nothing to record */
+ if (end < unaccepted_table->phys_base)
+ return;
+
+ /* Translate to offsets from the beginning of the bitmap */
+ start -= unaccepted_table->phys_base;
+ end -= unaccepted_table->phys_base;
+
+ /* Accept memory that doesn't fit into bitmap */
+ if (end > bitmap_size * unit_size * BITS_PER_BYTE) {
+ unsigned long phys_start, phys_end;
+
+ phys_start = bitmap_size * unit_size * BITS_PER_BYTE +
+ unaccepted_table->phys_base;
+ phys_end = end + unaccepted_table->phys_base;
+
+ arch_accept_memory(phys_start, phys_end);
+ end = bitmap_size * unit_size * BITS_PER_BYTE;
+ }
+
+ /*
+ * 'start' and 'end' are now both unit_size-aligned.
+ * Record the range as being unaccepted:
+ */
+ bitmap_set(unaccepted_table->bitmap,
+ start / unit_size, (end - start) / unit_size);
+}
+
+void accept_memory(phys_addr_t start, phys_addr_t end)
+{
+ unsigned long range_start, range_end;
+ unsigned long bitmap_size;
+ u64 unit_size;
+
+ if (!unaccepted_table)
+ return;
+
+ unit_size = unaccepted_table->unit_size;
+
+ /*
+ * Only care for the part of the range that is represented
+ * in the bitmap.
+ */
+ if (start < unaccepted_table->phys_base)
+ start = unaccepted_table->phys_base;
+ if (end < unaccepted_table->phys_base)
+ return;
+
+ /* Translate to offsets from the beginning of the bitmap */
+ start -= unaccepted_table->phys_base;
+ end -= unaccepted_table->phys_base;
+
+ /* Make sure not to overrun the bitmap */
+ if (end > unaccepted_table->size * unit_size * BITS_PER_BYTE)
+ end = unaccepted_table->size * unit_size * BITS_PER_BYTE;
+
+ range_start = start / unit_size;
+ bitmap_size = DIV_ROUND_UP(end, unit_size);
+
+ for_each_set_bitrange_from(range_start, range_end,
+ unaccepted_table->bitmap, bitmap_size) {
+ unsigned long phys_start, phys_end;
+
+ phys_start = range_start * unit_size + unaccepted_table->phys_base;
+ phys_end = range_end * unit_size + unaccepted_table->phys_base;
+
+ arch_accept_memory(phys_start, phys_end);
+ bitmap_clear(unaccepted_table->bitmap,
+ range_start, range_end - range_start);
+ }
+}
diff --git a/drivers/firmware/efi/libstub/vsprintf.c b/drivers/firmware/efi/libstub/vsprintf.c
new file mode 100644
index 0000000000..71c71c2223
--- /dev/null
+++ b/drivers/firmware/efi/libstub/vsprintf.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright 2007 rPath, Inc. - All Rights Reserved
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * Oh, it's a waste of space, but oh-so-yummy for debugging.
+ */
+
+#include <linux/stdarg.h>
+
+#include <linux/compiler.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+static
+int skip_atoi(const char **s)
+{
+ int i = 0;
+
+ while (isdigit(**s))
+ i = i * 10 + *((*s)++) - '0';
+ return i;
+}
+
+/*
+ * put_dec_full4 handles numbers in the range 0 <= r < 10000.
+ * The multiplier 0xccd is round(2^15/10), and the approximation
+ * r/10 == (r * 0xccd) >> 15 is exact for all r < 16389.
+ */
+static
+void put_dec_full4(char *end, unsigned int r)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ unsigned int q = (r * 0xccd) >> 15;
+ *--end = '0' + (r - q * 10);
+ r = q;
+ }
+ *--end = '0' + r;
+}
+
+/* put_dec is copied from lib/vsprintf.c with small modifications */
+
+/*
+ * Call put_dec_full4 on x % 10000, return x / 10000.
+ * The approximation x/10000 == (x * 0x346DC5D7) >> 43
+ * holds for all x < 1,128,869,999. The largest value this
+ * helper will ever be asked to convert is 1,125,520,955.
+ * (second call in the put_dec code, assuming n is all-ones).
+ */
+static
+unsigned int put_dec_helper4(char *end, unsigned int x)
+{
+ unsigned int q = (x * 0x346DC5D7ULL) >> 43;
+
+ put_dec_full4(end, x - q * 10000);
+ return q;
+}
+
+/* Based on code by Douglas W. Jones found at
+ * <http://www.cs.uiowa.edu/~jones/bcd/decimal.html#sixtyfour>
+ * (with permission from the author).
+ * Performs no 64-bit division and hence should be fast on 32-bit machines.
+ */
+static
+char *put_dec(char *end, unsigned long long n)
+{
+ unsigned int d3, d2, d1, q, h;
+ char *p = end;
+
+ d1 = ((unsigned int)n >> 16); /* implicit "& 0xffff" */
+ h = (n >> 32);
+ d2 = (h ) & 0xffff;
+ d3 = (h >> 16); /* implicit "& 0xffff" */
+
+ /* n = 2^48 d3 + 2^32 d2 + 2^16 d1 + d0
+ = 281_4749_7671_0656 d3 + 42_9496_7296 d2 + 6_5536 d1 + d0 */
+ q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((unsigned int)n & 0xffff);
+ q = put_dec_helper4(p, q);
+ p -= 4;
+
+ q += 7671 * d3 + 9496 * d2 + 6 * d1;
+ q = put_dec_helper4(p, q);
+ p -= 4;
+
+ q += 4749 * d3 + 42 * d2;
+ q = put_dec_helper4(p, q);
+ p -= 4;
+
+ q += 281 * d3;
+ q = put_dec_helper4(p, q);
+ p -= 4;
+
+ put_dec_full4(p, q);
+ p -= 4;
+
+ /* strip off the extra 0's we printed */
+ while (p < end && *p == '0')
+ ++p;
+
+ return p;
+}
+
+static
+char *number(char *end, unsigned long long num, int base, char locase)
+{
+ /*
+ * locase = 0 or 0x20. ORing digits or letters with 'locase'
+ * produces same digits or (maybe lowercased) letters
+ */
+
+ /* we are called with base 8, 10 or 16, only, thus don't need "G..." */
+ static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
+
+ switch (base) {
+ case 10:
+ if (num != 0)
+ end = put_dec(end, num);
+ break;
+ case 8:
+ for (; num != 0; num >>= 3)
+ *--end = '0' + (num & 07);
+ break;
+ case 16:
+ for (; num != 0; num >>= 4)
+ *--end = digits[num & 0xf] | locase;
+ break;
+ default:
+ unreachable();
+ }
+
+ return end;
+}
+
+#define ZEROPAD 1 /* pad with zero */
+#define SIGN 2 /* unsigned/signed long */
+#define PLUS 4 /* show plus */
+#define SPACE 8 /* space if plus */
+#define LEFT 16 /* left justified */
+#define SMALL 32 /* Must be 32 == 0x20 */
+#define SPECIAL 64 /* 0x */
+#define WIDE 128 /* UTF-16 string */
+
+static
+int get_flags(const char **fmt)
+{
+ int flags = 0;
+
+ do {
+ switch (**fmt) {
+ case '-':
+ flags |= LEFT;
+ break;
+ case '+':
+ flags |= PLUS;
+ break;
+ case ' ':
+ flags |= SPACE;
+ break;
+ case '#':
+ flags |= SPECIAL;
+ break;
+ case '0':
+ flags |= ZEROPAD;
+ break;
+ default:
+ return flags;
+ }
+ ++(*fmt);
+ } while (1);
+}
+
+static
+int get_int(const char **fmt, va_list *ap)
+{
+ if (isdigit(**fmt))
+ return skip_atoi(fmt);
+ if (**fmt == '*') {
+ ++(*fmt);
+ /* it's the next argument */
+ return va_arg(*ap, int);
+ }
+ return 0;
+}
+
+static
+unsigned long long get_number(int sign, int qualifier, va_list *ap)
+{
+ if (sign) {
+ switch (qualifier) {
+ case 'L':
+ return va_arg(*ap, long long);
+ case 'l':
+ return va_arg(*ap, long);
+ case 'h':
+ return (short)va_arg(*ap, int);
+ case 'H':
+ return (signed char)va_arg(*ap, int);
+ default:
+ return va_arg(*ap, int);
+ };
+ } else {
+ switch (qualifier) {
+ case 'L':
+ return va_arg(*ap, unsigned long long);
+ case 'l':
+ return va_arg(*ap, unsigned long);
+ case 'h':
+ return (unsigned short)va_arg(*ap, int);
+ case 'H':
+ return (unsigned char)va_arg(*ap, int);
+ default:
+ return va_arg(*ap, unsigned int);
+ }
+ }
+}
+
+static
+char get_sign(long long *num, int flags)
+{
+ if (!(flags & SIGN))
+ return 0;
+ if (*num < 0) {
+ *num = -(*num);
+ return '-';
+ }
+ if (flags & PLUS)
+ return '+';
+ if (flags & SPACE)
+ return ' ';
+ return 0;
+}
+
+static
+size_t utf16s_utf8nlen(const u16 *s16, size_t maxlen)
+{
+ size_t len, clen;
+
+ for (len = 0; len < maxlen && *s16; len += clen) {
+ u16 c0 = *s16++;
+
+ /* First, get the length for a BMP character */
+ clen = 1 + (c0 >= 0x80) + (c0 >= 0x800);
+ if (len + clen > maxlen)
+ break;
+ /*
+ * If this is a high surrogate, and we're already at maxlen, we
+ * can't include the character if it's a valid surrogate pair.
+ * Avoid accessing one extra word just to check if it's valid
+ * or not.
+ */
+ if ((c0 & 0xfc00) == 0xd800) {
+ if (len + clen == maxlen)
+ break;
+ if ((*s16 & 0xfc00) == 0xdc00) {
+ ++s16;
+ ++clen;
+ }
+ }
+ }
+
+ return len;
+}
+
+static
+u32 utf16_to_utf32(const u16 **s16)
+{
+ u16 c0, c1;
+
+ c0 = *(*s16)++;
+ /* not a surrogate */
+ if ((c0 & 0xf800) != 0xd800)
+ return c0;
+ /* invalid: low surrogate instead of high */
+ if (c0 & 0x0400)
+ return 0xfffd;
+ c1 = **s16;
+ /* invalid: missing low surrogate */
+ if ((c1 & 0xfc00) != 0xdc00)
+ return 0xfffd;
+ /* valid surrogate pair */
+ ++(*s16);
+ return (0x10000 - (0xd800 << 10) - 0xdc00) + (c0 << 10) + c1;
+}
+
+#define PUTC(c) \
+do { \
+ if (pos < size) \
+ buf[pos] = (c); \
+ ++pos; \
+} while (0);
+
+int vsnprintf(char *buf, size_t size, const char *fmt, va_list ap)
+{
+ /* The maximum space required is to print a 64-bit number in octal */
+ char tmp[(sizeof(unsigned long long) * 8 + 2) / 3];
+ char *tmp_end = &tmp[ARRAY_SIZE(tmp)];
+ long long num;
+ int base;
+ const char *s;
+ size_t len, pos;
+ char sign;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /* min. # of digits for integers; max
+ number of chars for from string */
+ int qualifier; /* 'h', 'hh', 'l' or 'll' for integer fields */
+
+ va_list args;
+
+ /*
+ * We want to pass our input va_list to helper functions by reference,
+ * but there's an annoying edge case. If va_list was originally passed
+ * to us by value, we could just pass &ap down to the helpers. This is
+ * the case on, for example, X86_32.
+ * However, on X86_64 (and possibly others), va_list is actually a
+ * size-1 array containing a structure. Our function parameter ap has
+ * decayed from T[1] to T*, and &ap has type T** rather than T(*)[1],
+ * which is what will be expected by a function taking a va_list *
+ * parameter.
+ * One standard way to solve this mess is by creating a copy in a local
+ * variable of type va_list and then passing a pointer to that local
+ * copy instead, which is what we do here.
+ */
+ va_copy(args, ap);
+
+ for (pos = 0; *fmt; ++fmt) {
+ if (*fmt != '%' || *++fmt == '%') {
+ PUTC(*fmt);
+ continue;
+ }
+
+ /* process flags */
+ flags = get_flags(&fmt);
+
+ /* get field width */
+ field_width = get_int(&fmt, &args);
+ if (field_width < 0) {
+ field_width = -field_width;
+ flags |= LEFT;
+ }
+
+ if (flags & LEFT)
+ flags &= ~ZEROPAD;
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ precision = get_int(&fmt, &args);
+ if (precision >= 0)
+ flags &= ~ZEROPAD;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l') {
+ qualifier = *fmt;
+ ++fmt;
+ if (qualifier == *fmt) {
+ qualifier -= 'a'-'A';
+ ++fmt;
+ }
+ }
+
+ sign = 0;
+
+ switch (*fmt) {
+ case 'c':
+ flags &= LEFT;
+ s = tmp;
+ if (qualifier == 'l') {
+ ((u16 *)tmp)[0] = (u16)va_arg(args, unsigned int);
+ ((u16 *)tmp)[1] = L'\0';
+ precision = INT_MAX;
+ goto wstring;
+ } else {
+ tmp[0] = (unsigned char)va_arg(args, int);
+ precision = len = 1;
+ }
+ goto output;
+
+ case 's':
+ flags &= LEFT;
+ if (precision < 0)
+ precision = INT_MAX;
+ s = va_arg(args, void *);
+ if (!s)
+ s = precision < 6 ? "" : "(null)";
+ else if (qualifier == 'l') {
+ wstring:
+ flags |= WIDE;
+ precision = len = utf16s_utf8nlen((const u16 *)s, precision);
+ goto output;
+ }
+ precision = len = strnlen(s, precision);
+ goto output;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'p':
+ if (precision < 0)
+ precision = 2 * sizeof(void *);
+ fallthrough;
+ case 'x':
+ flags |= SMALL;
+ fallthrough;
+ case 'X':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= SIGN;
+ fallthrough;
+ case 'u':
+ flags &= ~SPECIAL;
+ base = 10;
+ break;
+
+ default:
+ /*
+ * Bail out if the conversion specifier is invalid.
+ * There's probably a typo in the format string and the
+ * remaining specifiers are unlikely to match up with
+ * the arguments.
+ */
+ goto fail;
+ }
+ if (*fmt == 'p') {
+ num = (unsigned long)va_arg(args, void *);
+ } else {
+ num = get_number(flags & SIGN, qualifier, &args);
+ }
+
+ sign = get_sign(&num, flags);
+ if (sign)
+ --field_width;
+
+ s = number(tmp_end, num, base, flags & SMALL);
+ len = tmp_end - s;
+ /* default precision is 1 */
+ if (precision < 0)
+ precision = 1;
+ /* precision is minimum number of digits to print */
+ if (precision < len)
+ precision = len;
+ if (flags & SPECIAL) {
+ /*
+ * For octal, a leading 0 is printed only if necessary,
+ * i.e. if it's not already there because of the
+ * precision.
+ */
+ if (base == 8 && precision == len)
+ ++precision;
+ /*
+ * For hexadecimal, the leading 0x is skipped if the
+ * output is empty, i.e. both the number and the
+ * precision are 0.
+ */
+ if (base == 16 && precision > 0)
+ field_width -= 2;
+ else
+ flags &= ~SPECIAL;
+ }
+ /*
+ * For zero padding, increase the precision to fill the field
+ * width.
+ */
+ if ((flags & ZEROPAD) && field_width > precision)
+ precision = field_width;
+
+output:
+ /* Calculate the padding necessary */
+ field_width -= precision;
+ /* Leading padding with ' ' */
+ if (!(flags & LEFT))
+ while (field_width-- > 0)
+ PUTC(' ');
+ /* sign */
+ if (sign)
+ PUTC(sign);
+ /* 0x/0X for hexadecimal */
+ if (flags & SPECIAL) {
+ PUTC('0');
+ PUTC( 'X' | (flags & SMALL));
+ }
+ /* Zero padding and excess precision */
+ while (precision-- > len)
+ PUTC('0');
+ /* Actual output */
+ if (flags & WIDE) {
+ const u16 *ws = (const u16 *)s;
+
+ while (len-- > 0) {
+ u32 c32 = utf16_to_utf32(&ws);
+ u8 *s8;
+ size_t clen;
+
+ if (c32 < 0x80) {
+ PUTC(c32);
+ continue;
+ }
+
+ /* Number of trailing octets */
+ clen = 1 + (c32 >= 0x800) + (c32 >= 0x10000);
+
+ len -= clen;
+ s8 = (u8 *)&buf[pos];
+
+ /* Avoid writing partial character */
+ PUTC('\0');
+ pos += clen;
+ if (pos >= size)
+ continue;
+
+ /* Set high bits of leading octet */
+ *s8 = (0xf00 >> 1) >> clen;
+ /* Write trailing octets in reverse order */
+ for (s8 += clen; clen; --clen, c32 >>= 6)
+ *s8-- = 0x80 | (c32 & 0x3f);
+ /* Set low bits of leading octet */
+ *s8 |= c32;
+ }
+ } else {
+ while (len-- > 0)
+ PUTC(*s++);
+ }
+ /* Trailing padding with ' ' */
+ while (field_width-- > 0)
+ PUTC(' ');
+ }
+fail:
+ va_end(args);
+
+ if (size)
+ buf[min(pos, size-1)] = '\0';
+
+ return pos;
+}
+
+int snprintf(char *buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vsnprintf(buf, size, fmt, args);
+ va_end(args);
+ return i;
+}
diff --git a/drivers/firmware/efi/libstub/x86-5lvl.c b/drivers/firmware/efi/libstub/x86-5lvl.c
new file mode 100644
index 0000000000..479dd445ac
--- /dev/null
+++ b/drivers/firmware/efi/libstub/x86-5lvl.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/efi.h>
+
+#include <asm/boot.h>
+#include <asm/desc.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+#include "x86-stub.h"
+
+bool efi_no5lvl;
+
+static void (*la57_toggle)(void *cr3);
+
+static const struct desc_struct gdt[] = {
+ [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
+};
+
+/*
+ * Enabling (or disabling) 5 level paging is tricky, because it can only be
+ * done from 32-bit mode with paging disabled. This means not only that the
+ * code itself must be running from 32-bit addressable physical memory, but
+ * also that the root page table must be 32-bit addressable, as programming
+ * a 64-bit value into CR3 when running in 32-bit mode is not supported.
+ */
+efi_status_t efi_setup_5level_paging(void)
+{
+ u8 tmpl_size = (u8 *)&trampoline_ljmp_imm_offset - (u8 *)&trampoline_32bit_src;
+ efi_status_t status;
+ u8 *la57_code;
+
+ if (!efi_is_64bit())
+ return EFI_SUCCESS;
+
+ /* check for 5 level paging support */
+ if (native_cpuid_eax(0) < 7 ||
+ !(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31))))
+ return EFI_SUCCESS;
+
+ /* allocate some 32-bit addressable memory for code and a page table */
+ status = efi_allocate_pages(2 * PAGE_SIZE, (unsigned long *)&la57_code,
+ U32_MAX);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ la57_toggle = memcpy(la57_code, trampoline_32bit_src, tmpl_size);
+ memset(la57_code + tmpl_size, 0x90, PAGE_SIZE - tmpl_size);
+
+ /*
+ * To avoid the need to allocate a 32-bit addressable stack, the
+ * trampoline uses a LJMP instruction to switch back to long mode.
+ * LJMP takes an absolute destination address, which needs to be
+ * fixed up at runtime.
+ */
+ *(u32 *)&la57_code[trampoline_ljmp_imm_offset] += (unsigned long)la57_code;
+
+ efi_adjust_memory_range_protection((unsigned long)la57_toggle, PAGE_SIZE);
+
+ return EFI_SUCCESS;
+}
+
+void efi_5level_switch(void)
+{
+ bool want_la57 = IS_ENABLED(CONFIG_X86_5LEVEL) && !efi_no5lvl;
+ bool have_la57 = native_read_cr4() & X86_CR4_LA57;
+ bool need_toggle = want_la57 ^ have_la57;
+ u64 *pgt = (void *)la57_toggle + PAGE_SIZE;
+ u64 *cr3 = (u64 *)__native_read_cr3();
+ u64 *new_cr3;
+
+ if (!la57_toggle || !need_toggle)
+ return;
+
+ if (!have_la57) {
+ /*
+ * 5 level paging will be enabled, so a root level page needs
+ * to be allocated from the 32-bit addressable physical region,
+ * with its first entry referring to the existing hierarchy.
+ */
+ new_cr3 = memset(pgt, 0, PAGE_SIZE);
+ new_cr3[0] = (u64)cr3 | _PAGE_TABLE_NOENC;
+ } else {
+ /* take the new root table pointer from the current entry #0 */
+ new_cr3 = (u64 *)(cr3[0] & PAGE_MASK);
+
+ /* copy the new root table if it is not 32-bit addressable */
+ if ((u64)new_cr3 > U32_MAX)
+ new_cr3 = memcpy(pgt, new_cr3, PAGE_SIZE);
+ }
+
+ native_load_gdt(&(struct desc_ptr){ sizeof(gdt) - 1, (u64)gdt });
+
+ la57_toggle(new_cr3);
+}
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
new file mode 100644
index 0000000000..70b325a2f1
--- /dev/null
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -0,0 +1,1003 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* -----------------------------------------------------------------------
+ *
+ * Copyright 2011 Intel Corporation; author Matt Fleming
+ *
+ * ----------------------------------------------------------------------- */
+
+#include <linux/efi.h>
+#include <linux/pci.h>
+#include <linux/stddef.h>
+
+#include <asm/efi.h>
+#include <asm/e820/types.h>
+#include <asm/setup.h>
+#include <asm/desc.h>
+#include <asm/boot.h>
+#include <asm/kaslr.h>
+#include <asm/sev.h>
+
+#include "efistub.h"
+#include "x86-stub.h"
+
+const efi_system_table_t *efi_system_table;
+const efi_dxe_services_table_t *efi_dxe_table;
+static efi_loaded_image_t *image = NULL;
+static efi_memory_attribute_protocol_t *memattr;
+
+typedef union sev_memory_acceptance_protocol sev_memory_acceptance_protocol_t;
+union sev_memory_acceptance_protocol {
+ struct {
+ efi_status_t (__efiapi * allow_unaccepted_memory)(
+ sev_memory_acceptance_protocol_t *);
+ };
+ struct {
+ u32 allow_unaccepted_memory;
+ } mixed_mode;
+};
+
+static efi_status_t
+preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
+{
+ struct pci_setup_rom *rom = NULL;
+ efi_status_t status;
+ unsigned long size;
+ uint64_t romsize;
+ void *romimage;
+
+ /*
+ * Some firmware images contain EFI function pointers at the place where
+ * the romimage and romsize fields are supposed to be. Typically the EFI
+ * code is mapped at high addresses, translating to an unrealistically
+ * large romsize. The UEFI spec limits the size of option ROMs to 16
+ * MiB so we reject any ROMs over 16 MiB in size to catch this.
+ */
+ romimage = efi_table_attr(pci, romimage);
+ romsize = efi_table_attr(pci, romsize);
+ if (!romimage || !romsize || romsize > SZ_16M)
+ return EFI_INVALID_PARAMETER;
+
+ size = romsize + sizeof(*rom);
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&rom);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate memory for 'rom'\n");
+ return status;
+ }
+
+ memset(rom, 0, sizeof(*rom));
+
+ rom->data.type = SETUP_PCI;
+ rom->data.len = size - sizeof(struct setup_data);
+ rom->data.next = 0;
+ rom->pcilen = romsize;
+ *__rom = rom;
+
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_VENDOR_ID, 1, &rom->vendor);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to read rom->vendor\n");
+ goto free_struct;
+ }
+
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_DEVICE_ID, 1, &rom->devid);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to read rom->devid\n");
+ goto free_struct;
+ }
+
+ status = efi_call_proto(pci, get_location, &rom->segment, &rom->bus,
+ &rom->device, &rom->function);
+
+ if (status != EFI_SUCCESS)
+ goto free_struct;
+
+ memcpy(rom->romdata, romimage, romsize);
+ return status;
+
+free_struct:
+ efi_bs_call(free_pool, rom);
+ return status;
+}
+
+/*
+ * There's no way to return an informative status from this function,
+ * because any analysis (and printing of error messages) needs to be
+ * done directly at the EFI function call-site.
+ *
+ * For example, EFI_INVALID_PARAMETER could indicate a bug or maybe we
+ * just didn't find any PCI devices, but there's no way to tell outside
+ * the context of the call.
+ */
+static void setup_efi_pci(struct boot_params *params)
+{
+ efi_status_t status;
+ void **pci_handle = NULL;
+ efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
+ unsigned long size = 0;
+ struct setup_data *data;
+ efi_handle_t h;
+ int i;
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &size, pci_handle);
+
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&pci_handle);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate memory for 'pci_handle'\n");
+ return;
+ }
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &size, pci_handle);
+ }
+
+ if (status != EFI_SUCCESS)
+ goto free_handle;
+
+ data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+ while (data && data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+
+ for_each_efi_handle(h, pci_handle, size, i) {
+ efi_pci_io_protocol_t *pci = NULL;
+ struct pci_setup_rom *rom;
+
+ status = efi_bs_call(handle_protocol, h, &pci_proto,
+ (void **)&pci);
+ if (status != EFI_SUCCESS || !pci)
+ continue;
+
+ status = preserve_pci_rom_image(pci, &rom);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ if (data)
+ data->next = (unsigned long)rom;
+ else
+ params->hdr.setup_data = (unsigned long)rom;
+
+ data = (struct setup_data *)rom;
+ }
+
+free_handle:
+ efi_bs_call(free_pool, pci_handle);
+}
+
+static void retrieve_apple_device_properties(struct boot_params *boot_params)
+{
+ efi_guid_t guid = APPLE_PROPERTIES_PROTOCOL_GUID;
+ struct setup_data *data, *new;
+ efi_status_t status;
+ u32 size = 0;
+ apple_properties_protocol_t *p;
+
+ status = efi_bs_call(locate_protocol, &guid, NULL, (void **)&p);
+ if (status != EFI_SUCCESS)
+ return;
+
+ if (efi_table_attr(p, version) != 0x10000) {
+ efi_err("Unsupported properties proto version\n");
+ return;
+ }
+
+ efi_call_proto(p, get_all, NULL, &size);
+ if (!size)
+ return;
+
+ do {
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ size + sizeof(struct setup_data),
+ (void **)&new);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate memory for 'properties'\n");
+ return;
+ }
+
+ status = efi_call_proto(p, get_all, new->data, &size);
+
+ if (status == EFI_BUFFER_TOO_SMALL)
+ efi_bs_call(free_pool, new);
+ } while (status == EFI_BUFFER_TOO_SMALL);
+
+ new->type = SETUP_APPLE_PROPERTIES;
+ new->len = size;
+ new->next = 0;
+
+ data = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
+ if (!data) {
+ boot_params->hdr.setup_data = (unsigned long)new;
+ } else {
+ while (data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+ data->next = (unsigned long)new;
+ }
+}
+
+void efi_adjust_memory_range_protection(unsigned long start,
+ unsigned long size)
+{
+ efi_status_t status;
+ efi_gcd_memory_space_desc_t desc;
+ unsigned long end, next;
+ unsigned long rounded_start, rounded_end;
+ unsigned long unprotect_start, unprotect_size;
+
+ rounded_start = rounddown(start, EFI_PAGE_SIZE);
+ rounded_end = roundup(start + size, EFI_PAGE_SIZE);
+
+ if (memattr != NULL) {
+ efi_call_proto(memattr, clear_memory_attributes, rounded_start,
+ rounded_end - rounded_start, EFI_MEMORY_XP);
+ return;
+ }
+
+ if (efi_dxe_table == NULL)
+ return;
+
+ /*
+ * Don't modify memory region attributes, they are
+ * already suitable, to lower the possibility to
+ * encounter firmware bugs.
+ */
+
+ for (end = start + size; start < end; start = next) {
+
+ status = efi_dxe_call(get_memory_space_descriptor, start, &desc);
+
+ if (status != EFI_SUCCESS)
+ return;
+
+ next = desc.base_address + desc.length;
+
+ /*
+ * Only system memory is suitable for trampoline/kernel image placement,
+ * so only this type of memory needs its attributes to be modified.
+ */
+
+ if (desc.gcd_memory_type != EfiGcdMemoryTypeSystemMemory ||
+ (desc.attributes & (EFI_MEMORY_RO | EFI_MEMORY_XP)) == 0)
+ continue;
+
+ unprotect_start = max(rounded_start, (unsigned long)desc.base_address);
+ unprotect_size = min(rounded_end, next) - unprotect_start;
+
+ status = efi_dxe_call(set_memory_space_attributes,
+ unprotect_start, unprotect_size,
+ EFI_MEMORY_WB);
+
+ if (status != EFI_SUCCESS) {
+ efi_warn("Unable to unprotect memory range [%08lx,%08lx]: %lx\n",
+ unprotect_start,
+ unprotect_start + unprotect_size,
+ status);
+ }
+ }
+}
+
+static void setup_unaccepted_memory(void)
+{
+ efi_guid_t mem_acceptance_proto = OVMF_SEV_MEMORY_ACCEPTANCE_PROTOCOL_GUID;
+ sev_memory_acceptance_protocol_t *proto;
+ efi_status_t status;
+
+ if (!IS_ENABLED(CONFIG_UNACCEPTED_MEMORY))
+ return;
+
+ /*
+ * Enable unaccepted memory before calling exit boot services in order
+ * for the UEFI to not accept all memory on EBS.
+ */
+ status = efi_bs_call(locate_protocol, &mem_acceptance_proto, NULL,
+ (void **)&proto);
+ if (status != EFI_SUCCESS)
+ return;
+
+ status = efi_call_proto(proto, allow_unaccepted_memory);
+ if (status != EFI_SUCCESS)
+ efi_err("Memory acceptance protocol failed\n");
+}
+
+static efi_char16_t *efistub_fw_vendor(void)
+{
+ unsigned long vendor = efi_table_attr(efi_system_table, fw_vendor);
+
+ return (efi_char16_t *)vendor;
+}
+
+static const efi_char16_t apple[] = L"Apple";
+
+static void setup_quirks(struct boot_params *boot_params)
+{
+ if (IS_ENABLED(CONFIG_APPLE_PROPERTIES) &&
+ !memcmp(efistub_fw_vendor(), apple, sizeof(apple)))
+ retrieve_apple_device_properties(boot_params);
+}
+
+/*
+ * See if we have Universal Graphics Adapter (UGA) protocol
+ */
+static efi_status_t
+setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
+{
+ efi_status_t status;
+ u32 width, height;
+ void **uga_handle = NULL;
+ efi_uga_draw_protocol_t *uga = NULL, *first_uga;
+ efi_handle_t handle;
+ int i;
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&uga_handle);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ uga_proto, NULL, &size, uga_handle);
+ if (status != EFI_SUCCESS)
+ goto free_handle;
+
+ height = 0;
+ width = 0;
+
+ first_uga = NULL;
+ for_each_efi_handle(handle, uga_handle, size, i) {
+ efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
+ u32 w, h, depth, refresh;
+ void *pciio;
+
+ status = efi_bs_call(handle_protocol, handle, uga_proto,
+ (void **)&uga);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ pciio = NULL;
+ efi_bs_call(handle_protocol, handle, &pciio_proto, &pciio);
+
+ status = efi_call_proto(uga, get_mode, &w, &h, &depth, &refresh);
+ if (status == EFI_SUCCESS && (!first_uga || pciio)) {
+ width = w;
+ height = h;
+
+ /*
+ * Once we've found a UGA supporting PCIIO,
+ * don't bother looking any further.
+ */
+ if (pciio)
+ break;
+
+ first_uga = uga;
+ }
+ }
+
+ if (!width && !height)
+ goto free_handle;
+
+ /* EFI framebuffer */
+ si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+ si->lfb_depth = 32;
+ si->lfb_width = width;
+ si->lfb_height = height;
+
+ si->red_size = 8;
+ si->red_pos = 16;
+ si->green_size = 8;
+ si->green_pos = 8;
+ si->blue_size = 8;
+ si->blue_pos = 0;
+ si->rsvd_size = 8;
+ si->rsvd_pos = 24;
+
+free_handle:
+ efi_bs_call(free_pool, uga_handle);
+
+ return status;
+}
+
+static void setup_graphics(struct boot_params *boot_params)
+{
+ efi_guid_t graphics_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
+ struct screen_info *si;
+ efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
+ efi_status_t status;
+ unsigned long size;
+ void **gop_handle = NULL;
+ void **uga_handle = NULL;
+
+ si = &boot_params->screen_info;
+ memset(si, 0, sizeof(*si));
+
+ size = 0;
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &graphics_proto, NULL, &size, gop_handle);
+ if (status == EFI_BUFFER_TOO_SMALL)
+ status = efi_setup_gop(si, &graphics_proto, size);
+
+ if (status != EFI_SUCCESS) {
+ size = 0;
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &uga_proto, NULL, &size, uga_handle);
+ if (status == EFI_BUFFER_TOO_SMALL)
+ setup_uga(si, &uga_proto, size);
+ }
+}
+
+
+static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status)
+{
+ efi_bs_call(exit, handle, status, 0, NULL);
+ for(;;)
+ asm("hlt");
+}
+
+void __noreturn efi_stub_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params);
+
+/*
+ * Because the x86 boot code expects to be passed a boot_params we
+ * need to create one ourselves (usually the bootloader would create
+ * one for us).
+ */
+efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg)
+{
+ struct boot_params *boot_params;
+ struct setup_header *hdr;
+ void *image_base;
+ efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
+ int options_size = 0;
+ efi_status_t status;
+ char *cmdline_ptr;
+
+ efi_system_table = sys_table_arg;
+
+ /* Check if we were booted by the EFI firmware */
+ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+ efi_exit(handle, EFI_INVALID_PARAMETER);
+
+ status = efi_bs_call(handle_protocol, handle, &proto, (void **)&image);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
+ efi_exit(handle, status);
+ }
+
+ image_base = efi_table_attr(image, image_base);
+
+ status = efi_allocate_pages(sizeof(struct boot_params),
+ (unsigned long *)&boot_params, ULONG_MAX);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate lowmem for boot params\n");
+ efi_exit(handle, status);
+ }
+
+ memset(boot_params, 0x0, sizeof(struct boot_params));
+
+ hdr = &boot_params->hdr;
+
+ /* Copy the setup header from the second sector to boot_params */
+ memcpy(&hdr->jump, image_base + 512,
+ sizeof(struct setup_header) - offsetof(struct setup_header, jump));
+
+ /*
+ * Fill out some of the header fields ourselves because the
+ * EFI firmware loader doesn't load the first sector.
+ */
+ hdr->root_flags = 1;
+ hdr->vid_mode = 0xffff;
+ hdr->boot_flag = 0xAA55;
+
+ hdr->type_of_loader = 0x21;
+
+ /* Convert unicode cmdline to ascii */
+ cmdline_ptr = efi_convert_cmdline(image, &options_size);
+ if (!cmdline_ptr)
+ goto fail;
+
+ efi_set_u64_split((unsigned long)cmdline_ptr,
+ &hdr->cmd_line_ptr, &boot_params->ext_cmd_line_ptr);
+
+ hdr->ramdisk_image = 0;
+ hdr->ramdisk_size = 0;
+
+ /*
+ * Disregard any setup data that was provided by the bootloader:
+ * setup_data could be pointing anywhere, and we have no way of
+ * authenticating or validating the payload.
+ */
+ hdr->setup_data = 0;
+
+ efi_stub_entry(handle, sys_table_arg, boot_params);
+ /* not reached */
+
+fail:
+ efi_free(sizeof(struct boot_params), (unsigned long)boot_params);
+
+ efi_exit(handle, status);
+}
+
+static void add_e820ext(struct boot_params *params,
+ struct setup_data *e820ext, u32 nr_entries)
+{
+ struct setup_data *data;
+
+ e820ext->type = SETUP_E820_EXT;
+ e820ext->len = nr_entries * sizeof(struct boot_e820_entry);
+ e820ext->next = 0;
+
+ data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+ while (data && data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+
+ if (data)
+ data->next = (unsigned long)e820ext;
+ else
+ params->hdr.setup_data = (unsigned long)e820ext;
+}
+
+static efi_status_t
+setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_size)
+{
+ struct boot_e820_entry *entry = params->e820_table;
+ struct efi_info *efi = &params->efi_info;
+ struct boot_e820_entry *prev = NULL;
+ u32 nr_entries;
+ u32 nr_desc;
+ int i;
+
+ nr_entries = 0;
+ nr_desc = efi->efi_memmap_size / efi->efi_memdesc_size;
+
+ for (i = 0; i < nr_desc; i++) {
+ efi_memory_desc_t *d;
+ unsigned int e820_type = 0;
+ unsigned long m = efi->efi_memmap;
+
+#ifdef CONFIG_X86_64
+ m |= (u64)efi->efi_memmap_hi << 32;
+#endif
+
+ d = efi_early_memdesc_ptr(m, efi->efi_memdesc_size, i);
+ switch (d->type) {
+ case EFI_RESERVED_TYPE:
+ case EFI_RUNTIME_SERVICES_CODE:
+ case EFI_RUNTIME_SERVICES_DATA:
+ case EFI_MEMORY_MAPPED_IO:
+ case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+ case EFI_PAL_CODE:
+ e820_type = E820_TYPE_RESERVED;
+ break;
+
+ case EFI_UNUSABLE_MEMORY:
+ e820_type = E820_TYPE_UNUSABLE;
+ break;
+
+ case EFI_ACPI_RECLAIM_MEMORY:
+ e820_type = E820_TYPE_ACPI;
+ break;
+
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_CONVENTIONAL_MEMORY:
+ if (efi_soft_reserve_enabled() &&
+ (d->attribute & EFI_MEMORY_SP))
+ e820_type = E820_TYPE_SOFT_RESERVED;
+ else
+ e820_type = E820_TYPE_RAM;
+ break;
+
+ case EFI_ACPI_MEMORY_NVS:
+ e820_type = E820_TYPE_NVS;
+ break;
+
+ case EFI_PERSISTENT_MEMORY:
+ e820_type = E820_TYPE_PMEM;
+ break;
+
+ case EFI_UNACCEPTED_MEMORY:
+ if (!IS_ENABLED(CONFIG_UNACCEPTED_MEMORY))
+ continue;
+ e820_type = E820_TYPE_RAM;
+ process_unaccepted_memory(d->phys_addr,
+ d->phys_addr + PAGE_SIZE * d->num_pages);
+ break;
+ default:
+ continue;
+ }
+
+ /* Merge adjacent mappings */
+ if (prev && prev->type == e820_type &&
+ (prev->addr + prev->size) == d->phys_addr) {
+ prev->size += d->num_pages << 12;
+ continue;
+ }
+
+ if (nr_entries == ARRAY_SIZE(params->e820_table)) {
+ u32 need = (nr_desc - i) * sizeof(struct e820_entry) +
+ sizeof(struct setup_data);
+
+ if (!e820ext || e820ext_size < need)
+ return EFI_BUFFER_TOO_SMALL;
+
+ /* boot_params map full, switch to e820 extended */
+ entry = (struct boot_e820_entry *)e820ext->data;
+ }
+
+ entry->addr = d->phys_addr;
+ entry->size = d->num_pages << PAGE_SHIFT;
+ entry->type = e820_type;
+ prev = entry++;
+ nr_entries++;
+ }
+
+ if (nr_entries > ARRAY_SIZE(params->e820_table)) {
+ u32 nr_e820ext = nr_entries - ARRAY_SIZE(params->e820_table);
+
+ add_e820ext(params, e820ext, nr_e820ext);
+ nr_entries -= nr_e820ext;
+ }
+
+ params->e820_entries = (u8)nr_entries;
+
+ return EFI_SUCCESS;
+}
+
+static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
+ u32 *e820ext_size)
+{
+ efi_status_t status;
+ unsigned long size;
+
+ size = sizeof(struct setup_data) +
+ sizeof(struct e820_entry) * nr_desc;
+
+ if (*e820ext) {
+ efi_bs_call(free_pool, *e820ext);
+ *e820ext = NULL;
+ *e820ext_size = 0;
+ }
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)e820ext);
+ if (status == EFI_SUCCESS)
+ *e820ext_size = size;
+
+ return status;
+}
+
+static efi_status_t allocate_e820(struct boot_params *params,
+ struct setup_data **e820ext,
+ u32 *e820ext_size)
+{
+ struct efi_boot_memmap *map;
+ efi_status_t status;
+ __u32 nr_desc;
+
+ status = efi_get_memory_map(&map, false);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ nr_desc = map->map_size / map->desc_size;
+ if (nr_desc > ARRAY_SIZE(params->e820_table) - EFI_MMAP_NR_SLACK_SLOTS) {
+ u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table) +
+ EFI_MMAP_NR_SLACK_SLOTS;
+
+ status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size);
+ }
+
+ if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) && status == EFI_SUCCESS)
+ status = allocate_unaccepted_bitmap(nr_desc, map);
+
+ efi_bs_call(free_pool, map);
+ return status;
+}
+
+struct exit_boot_struct {
+ struct boot_params *boot_params;
+ struct efi_info *efi;
+};
+
+static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
+ void *priv)
+{
+ const char *signature;
+ struct exit_boot_struct *p = priv;
+
+ signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE
+ : EFI32_LOADER_SIGNATURE;
+ memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
+
+ efi_set_u64_split((unsigned long)efi_system_table,
+ &p->efi->efi_systab, &p->efi->efi_systab_hi);
+ p->efi->efi_memdesc_size = map->desc_size;
+ p->efi->efi_memdesc_version = map->desc_ver;
+ efi_set_u64_split((unsigned long)map->map,
+ &p->efi->efi_memmap, &p->efi->efi_memmap_hi);
+ p->efi->efi_memmap_size = map->map_size;
+
+ return EFI_SUCCESS;
+}
+
+static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
+{
+ struct setup_data *e820ext = NULL;
+ __u32 e820ext_size = 0;
+ efi_status_t status;
+ struct exit_boot_struct priv;
+
+ priv.boot_params = boot_params;
+ priv.efi = &boot_params->efi_info;
+
+ status = allocate_e820(boot_params, &e820ext, &e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ /* Might as well exit boot services now */
+ status = efi_exit_boot_services(handle, &priv, exit_boot_func);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ /* Historic? */
+ boot_params->alt_mem_k = 32 * 1024;
+
+ status = setup_e820(boot_params, e820ext, e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ return EFI_SUCCESS;
+}
+
+static bool have_unsupported_snp_features(void)
+{
+ u64 unsupported;
+
+ unsupported = snp_get_unsupported_features(sev_get_status());
+ if (unsupported) {
+ efi_err("Unsupported SEV-SNP features detected: 0x%llx\n",
+ unsupported);
+ return true;
+ }
+ return false;
+}
+
+static void efi_get_seed(void *seed, int size)
+{
+ efi_get_random_bytes(size, seed);
+
+ /*
+ * This only updates seed[0] when running on 32-bit, but in that case,
+ * seed[1] is not used anyway, as there is no virtual KASLR on 32-bit.
+ */
+ *(unsigned long *)seed ^= kaslr_get_random_long("EFI");
+}
+
+static void error(char *str)
+{
+ efi_warn("Decompression failed: %s\n", str);
+}
+
+static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
+{
+ unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
+ unsigned long addr, alloc_size, entry;
+ efi_status_t status;
+ u32 seed[2] = {};
+
+ /* determine the required size of the allocation */
+ alloc_size = ALIGN(max_t(unsigned long, output_len, kernel_total_size),
+ MIN_KERNEL_ALIGN);
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && !efi_nokaslr) {
+ u64 range = KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR - kernel_total_size;
+ static const efi_char16_t ami[] = L"American Megatrends";
+
+ efi_get_seed(seed, sizeof(seed));
+
+ virt_addr += (range * seed[1]) >> 32;
+ virt_addr &= ~(CONFIG_PHYSICAL_ALIGN - 1);
+
+ /*
+ * Older Dell systems with AMI UEFI firmware v2.0 may hang
+ * while decompressing the kernel if physical address
+ * randomization is enabled.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=218173
+ */
+ if (efi_system_table->hdr.revision <= EFI_2_00_SYSTEM_TABLE_REVISION &&
+ !memcmp(efistub_fw_vendor(), ami, sizeof(ami))) {
+ efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n");
+ seed[0] = 0;
+ }
+ }
+
+ status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
+ seed[0], EFI_LOADER_CODE,
+ EFI_X86_KERNEL_ALLOC_LIMIT);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ entry = decompress_kernel((void *)addr, virt_addr, error);
+ if (entry == ULONG_MAX) {
+ efi_free(alloc_size, addr);
+ return EFI_LOAD_ERROR;
+ }
+
+ *kernel_entry = addr + entry;
+
+ efi_adjust_memory_range_protection(addr, kernel_total_size);
+
+ return EFI_SUCCESS;
+}
+
+static void __noreturn enter_kernel(unsigned long kernel_addr,
+ struct boot_params *boot_params)
+{
+ /* enter decompressed kernel with boot_params pointer in RSI/ESI */
+ asm("jmp *%0"::"r"(kernel_addr), "S"(boot_params));
+
+ unreachable();
+}
+
+/*
+ * On success, this routine will jump to the relocated image directly and never
+ * return. On failure, it will exit to the firmware via efi_exit() instead of
+ * returning.
+ */
+void __noreturn efi_stub_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params)
+{
+ efi_guid_t guid = EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID;
+ struct setup_header *hdr = &boot_params->hdr;
+ const struct linux_efi_initrd *initrd = NULL;
+ unsigned long kernel_entry;
+ efi_status_t status;
+
+ boot_params_pointer = boot_params;
+
+ efi_system_table = sys_table_arg;
+ /* Check if we were booted by the EFI firmware */
+ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+ efi_exit(handle, EFI_INVALID_PARAMETER);
+
+ if (have_unsupported_snp_features())
+ efi_exit(handle, EFI_UNSUPPORTED);
+
+ if (IS_ENABLED(CONFIG_EFI_DXE_MEM_ATTRIBUTES)) {
+ efi_dxe_table = get_efi_config_table(EFI_DXE_SERVICES_TABLE_GUID);
+ if (efi_dxe_table &&
+ efi_dxe_table->hdr.signature != EFI_DXE_SERVICES_TABLE_SIGNATURE) {
+ efi_warn("Ignoring DXE services table: invalid signature\n");
+ efi_dxe_table = NULL;
+ }
+ }
+
+ /* grab the memory attributes protocol if it exists */
+ efi_bs_call(locate_protocol, &guid, NULL, (void **)&memattr);
+
+ status = efi_setup_5level_paging();
+ if (status != EFI_SUCCESS) {
+ efi_err("efi_setup_5level_paging() failed!\n");
+ goto fail;
+ }
+
+#ifdef CONFIG_CMDLINE_BOOL
+ status = efi_parse_options(CONFIG_CMDLINE);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail;
+ }
+#endif
+ if (!IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
+ unsigned long cmdline_paddr = ((u64)hdr->cmd_line_ptr |
+ ((u64)boot_params->ext_cmd_line_ptr << 32));
+ status = efi_parse_options((char *)cmdline_paddr);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail;
+ }
+ }
+
+ status = efi_decompress_kernel(&kernel_entry);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to decompress kernel\n");
+ goto fail;
+ }
+
+ /*
+ * At this point, an initrd may already have been loaded by the
+ * bootloader and passed via bootparams. We permit an initrd loaded
+ * from the LINUX_EFI_INITRD_MEDIA_GUID device path to supersede it.
+ *
+ * If the device path is not present, any command-line initrd=
+ * arguments will be processed only if image is not NULL, which will be
+ * the case only if we were loaded via the PE entry point.
+ */
+ status = efi_load_initrd(image, hdr->initrd_addr_max, ULONG_MAX,
+ &initrd);
+ if (status != EFI_SUCCESS)
+ goto fail;
+ if (initrd && initrd->size > 0) {
+ efi_set_u64_split(initrd->base, &hdr->ramdisk_image,
+ &boot_params->ext_ramdisk_image);
+ efi_set_u64_split(initrd->size, &hdr->ramdisk_size,
+ &boot_params->ext_ramdisk_size);
+ }
+
+
+ /*
+ * If the boot loader gave us a value for secure_boot then we use that,
+ * otherwise we ask the BIOS.
+ */
+ if (boot_params->secure_boot == efi_secureboot_mode_unset)
+ boot_params->secure_boot = efi_get_secureboot();
+
+ /* Ask the firmware to clear memory on unclean shutdown */
+ efi_enable_reset_attack_mitigation();
+
+ efi_random_get_seed();
+
+ efi_retrieve_tpm2_eventlog();
+
+ setup_graphics(boot_params);
+
+ setup_efi_pci(boot_params);
+
+ setup_quirks(boot_params);
+
+ setup_unaccepted_memory();
+
+ status = exit_boot(boot_params, handle);
+ if (status != EFI_SUCCESS) {
+ efi_err("exit_boot() failed!\n");
+ goto fail;
+ }
+
+ /*
+ * Call the SEV init code while still running with the firmware's
+ * GDT/IDT, so #VC exceptions will be handled by EFI.
+ */
+ sev_enable(boot_params);
+
+ efi_5level_switch();
+
+ enter_kernel(kernel_entry, boot_params);
+fail:
+ efi_err("efi_stub_entry() failed!\n");
+
+ efi_exit(handle, status);
+}
+
+#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
+void efi_handover_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params)
+{
+ extern char _bss[], _ebss[];
+
+ memset(_bss, 0, _ebss - _bss);
+ efi_stub_entry(handle, sys_table_arg, boot_params);
+}
+
+#ifndef CONFIG_EFI_MIXED
+extern __alias(efi_handover_entry)
+void efi32_stub_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params);
+
+extern __alias(efi_handover_entry)
+void efi64_stub_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params);
+#endif
+#endif
diff --git a/drivers/firmware/efi/libstub/x86-stub.h b/drivers/firmware/efi/libstub/x86-stub.h
new file mode 100644
index 0000000000..2748bca192
--- /dev/null
+++ b/drivers/firmware/efi/libstub/x86-stub.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/efi.h>
+
+extern struct boot_params *boot_params_pointer asm("boot_params");
+
+extern void trampoline_32bit_src(void *, bool);
+extern const u16 trampoline_ljmp_imm_offset;
+
+void efi_adjust_memory_range_protection(unsigned long start,
+ unsigned long size);
+
+#ifdef CONFIG_X86_64
+efi_status_t efi_setup_5level_paging(void);
+void efi_5level_switch(void);
+#else
+static inline efi_status_t efi_setup_5level_paging(void) { return EFI_SUCCESS; }
+static inline void efi_5level_switch(void) {}
+#endif
diff --git a/drivers/firmware/efi/libstub/zboot-header.S b/drivers/firmware/efi/libstub/zboot-header.S
new file mode 100644
index 0000000000..fb676ded47
--- /dev/null
+++ b/drivers/firmware/efi/libstub/zboot-header.S
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/pe.h>
+
+#ifdef CONFIG_64BIT
+ .set .Lextra_characteristics, 0x0
+ .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32PLUS
+#else
+ .set .Lextra_characteristics, IMAGE_FILE_32BIT_MACHINE
+ .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32
+#endif
+
+ .section ".head", "a"
+ .globl __efistub_efi_zboot_header
+__efistub_efi_zboot_header:
+.Ldoshdr:
+ .long MZ_MAGIC
+ .ascii "zimg" // image type
+ .long __efistub__gzdata_start - .Ldoshdr // payload offset
+ .long __efistub__gzdata_size - ZBOOT_SIZE_LEN // payload size
+ .long 0, 0 // reserved
+ .asciz COMP_TYPE // compression type
+ .org .Ldoshdr + 0x38
+ .long LINUX_PE_MAGIC
+ .long .Lpehdr - .Ldoshdr // PE header offset
+
+.Lpehdr:
+ .long PE_MAGIC
+ .short MACHINE_TYPE
+ .short .Lsection_count
+ .long 0
+ .long 0
+ .long 0
+ .short .Lsection_table - .Loptional_header
+ .short IMAGE_FILE_DEBUG_STRIPPED | \
+ IMAGE_FILE_EXECUTABLE_IMAGE | \
+ IMAGE_FILE_LINE_NUMS_STRIPPED |\
+ .Lextra_characteristics
+
+.Loptional_header:
+ .short .Lpe_opt_magic
+ .byte 0, 0
+ .long _etext - .Lefi_header_end
+ .long __data_size
+ .long 0
+ .long __efistub_efi_zboot_entry - .Ldoshdr
+ .long .Lefi_header_end - .Ldoshdr
+
+#ifdef CONFIG_64BIT
+ .quad 0
+#else
+ .long _etext - .Ldoshdr, 0x0
+#endif
+ .long 4096
+ .long 512
+ .short 0, 0
+ .short LINUX_EFISTUB_MAJOR_VERSION // MajorImageVersion
+ .short LINUX_EFISTUB_MINOR_VERSION // MinorImageVersion
+ .short 0, 0
+ .long 0
+ .long _end - .Ldoshdr
+
+ .long .Lefi_header_end - .Ldoshdr
+ .long 0
+ .short IMAGE_SUBSYSTEM_EFI_APPLICATION
+ .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
+#ifdef CONFIG_64BIT
+ .quad 0, 0, 0, 0
+#else
+ .long 0, 0, 0, 0
+#endif
+ .long 0
+ .long (.Lsection_table - .) / 8
+
+ .quad 0 // ExportTable
+ .quad 0 // ImportTable
+ .quad 0 // ResourceTable
+ .quad 0 // ExceptionTable
+ .quad 0 // CertificationTable
+ .quad 0 // BaseRelocationTable
+#if defined(PE_DLL_CHAR_EX) || defined(CONFIG_DEBUG_EFI)
+ .long .Lefi_debug_table - .Ldoshdr // DebugTable
+ .long .Lefi_debug_table_size
+
+ .section ".rodata", "a"
+ .p2align 2
+.Lefi_debug_table:
+ // EFI_IMAGE_DEBUG_DIRECTORY_ENTRY[]
+#ifdef PE_DLL_CHAR_EX
+ .long 0 // Characteristics
+ .long 0 // TimeDateStamp
+ .short 0 // MajorVersion
+ .short 0 // MinorVersion
+ .long IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS // Type
+ .long 4 // SizeOfData
+ .long 0 // RVA
+ .long .Lefi_dll_characteristics_ex - .Ldoshdr // FileOffset
+#endif
+#ifdef CONFIG_DEBUG_EFI
+ .long 0 // Characteristics
+ .long 0 // TimeDateStamp
+ .short 0 // MajorVersion
+ .short 0 // MinorVersion
+ .long IMAGE_DEBUG_TYPE_CODEVIEW // Type
+ .long .Lefi_debug_entry_size // SizeOfData
+ .long 0 // RVA
+ .long .Lefi_debug_entry - .Ldoshdr // FileOffset
+#endif
+ .set .Lefi_debug_table_size, . - .Lefi_debug_table
+ .previous
+#endif
+
+.Lsection_table:
+ .ascii ".text\0\0\0"
+ .long _etext - .Lefi_header_end
+ .long .Lefi_header_end - .Ldoshdr
+ .long _etext - .Lefi_header_end
+ .long .Lefi_header_end - .Ldoshdr
+
+ .long 0, 0
+ .short 0, 0
+ .long IMAGE_SCN_CNT_CODE | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_EXECUTE
+
+ .ascii ".data\0\0\0"
+ .long __data_size
+ .long _etext - .Ldoshdr
+ .long __data_rawsize
+ .long _etext - .Ldoshdr
+
+ .long 0, 0
+ .short 0, 0
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_WRITE
+
+ .set .Lsection_count, (. - .Lsection_table) / 40
+
+#ifdef PE_DLL_CHAR_EX
+.Lefi_dll_characteristics_ex:
+ .long PE_DLL_CHAR_EX
+#endif
+#ifdef CONFIG_DEBUG_EFI
+.Lefi_debug_entry:
+ // EFI_IMAGE_DEBUG_CODEVIEW_NB10_ENTRY
+ .ascii "NB10" // Signature
+ .long 0 // Unknown
+ .long 0 // Unknown2
+ .long 0 // Unknown3
+
+ .asciz ZBOOT_EFI_PATH
+
+ .set .Lefi_debug_entry_size, . - .Lefi_debug_entry
+#endif
+
+ .p2align 12
+.Lefi_header_end:
+
diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c
new file mode 100644
index 0000000000..bdb17eac0c
--- /dev/null
+++ b/drivers/firmware/efi/libstub/zboot.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <linux/pe.h>
+#include <asm/efi.h>
+#include <asm/unaligned.h>
+
+#include "efistub.h"
+
+static unsigned char zboot_heap[SZ_256K] __aligned(64);
+static unsigned long free_mem_ptr, free_mem_end_ptr;
+
+#define STATIC static
+#if defined(CONFIG_KERNEL_GZIP)
+#include "../../../../lib/decompress_inflate.c"
+#elif defined(CONFIG_KERNEL_LZ4)
+#include "../../../../lib/decompress_unlz4.c"
+#elif defined(CONFIG_KERNEL_LZMA)
+#include "../../../../lib/decompress_unlzma.c"
+#elif defined(CONFIG_KERNEL_LZO)
+#include "../../../../lib/decompress_unlzo.c"
+#elif defined(CONFIG_KERNEL_XZ)
+#undef memcpy
+#define memcpy memcpy
+#undef memmove
+#define memmove memmove
+#include "../../../../lib/decompress_unxz.c"
+#elif defined(CONFIG_KERNEL_ZSTD)
+#include "../../../../lib/decompress_unzstd.c"
+#endif
+
+extern char efi_zboot_header[];
+extern char _gzdata_start[], _gzdata_end[];
+
+static void error(char *x)
+{
+ efi_err("EFI decompressor: %s\n", x);
+}
+
+static unsigned long alloc_preferred_address(unsigned long alloc_size)
+{
+#ifdef EFI_KIMG_PREFERRED_ADDRESS
+ efi_physical_addr_t efi_addr = EFI_KIMG_PREFERRED_ADDRESS;
+
+ if (efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
+ alloc_size / EFI_PAGE_SIZE, &efi_addr) == EFI_SUCCESS)
+ return efi_addr;
+#endif
+ return ULONG_MAX;
+}
+
+void __weak efi_cache_sync_image(unsigned long image_base,
+ unsigned long alloc_size)
+{
+ // Provided by the arch to perform the cache maintenance necessary for
+ // executable code loaded into memory to be safe for execution.
+}
+
+struct screen_info *alloc_screen_info(void)
+{
+ return __alloc_screen_info();
+}
+
+asmlinkage efi_status_t __efiapi
+efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
+{
+ unsigned long compressed_size = _gzdata_end - _gzdata_start;
+ unsigned long image_base, alloc_size;
+ efi_loaded_image_t *image;
+ efi_status_t status;
+ char *cmdline_ptr;
+ int ret;
+
+ WRITE_ONCE(efi_system_table, systab);
+
+ free_mem_ptr = (unsigned long)&zboot_heap;
+ free_mem_end_ptr = free_mem_ptr + sizeof(zboot_heap);
+
+ status = efi_bs_call(handle_protocol, handle,
+ &LOADED_IMAGE_PROTOCOL_GUID, (void **)&image);
+ if (status != EFI_SUCCESS) {
+ error("Failed to locate parent's loaded image protocol");
+ return status;
+ }
+
+ status = efi_handle_cmdline(image, &cmdline_ptr);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ efi_info("Decompressing Linux Kernel...\n");
+
+ // SizeOfImage from the compressee's PE/COFF header
+ alloc_size = round_up(get_unaligned_le32(_gzdata_end - 4),
+ EFI_ALLOC_ALIGN);
+
+ // If the architecture has a preferred address for the image,
+ // try that first.
+ image_base = alloc_preferred_address(alloc_size);
+ if (image_base == ULONG_MAX) {
+ unsigned long min_kimg_align = efi_get_kimg_min_align();
+ u32 seed = U32_MAX;
+
+ if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ // Setting the random seed to 0x0 is the same as
+ // allocating as low as possible
+ seed = 0;
+ } else if (efi_nokaslr) {
+ efi_info("KASLR disabled on kernel command line\n");
+ } else {
+ status = efi_get_random_bytes(sizeof(seed), (u8 *)&seed);
+ if (status == EFI_NOT_FOUND) {
+ efi_info("EFI_RNG_PROTOCOL unavailable\n");
+ efi_nokaslr = true;
+ } else if (status != EFI_SUCCESS) {
+ efi_err("efi_get_random_bytes() failed (0x%lx)\n",
+ status);
+ efi_nokaslr = true;
+ }
+ }
+
+ status = efi_random_alloc(alloc_size, min_kimg_align, &image_base,
+ seed, EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate memory\n");
+ goto free_cmdline;
+ }
+ }
+
+ // Decompress the payload into the newly allocated buffer.
+ ret = __decompress(_gzdata_start, compressed_size, NULL, NULL,
+ (void *)image_base, alloc_size, NULL, error);
+ if (ret < 0) {
+ error("Decompression failed");
+ status = EFI_DEVICE_ERROR;
+ goto free_image;
+ }
+
+ efi_cache_sync_image(image_base, alloc_size);
+
+ status = efi_stub_common(handle, image, image_base, cmdline_ptr);
+
+free_image:
+ efi_free(alloc_size, image_base);
+free_cmdline:
+ efi_bs_call(free_pool, cmdline_ptr);
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
new file mode 100644
index 0000000000..ac8c0ef851
--- /dev/null
+++ b/drivers/firmware/efi/libstub/zboot.lds
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+ENTRY(__efistub_efi_zboot_header);
+
+PROVIDE(zboot_code_size = ABSOLUTE(0));
+
+SECTIONS
+{
+ .head : ALIGN(4096) {
+ *(.head)
+ }
+
+ .text : {
+ *(.text* .init.text*)
+ }
+
+ .rodata : ALIGN(8) {
+ __efistub__gzdata_start = .;
+ *(.gzdata)
+ __efistub__gzdata_end = .;
+ *(.rodata* .init.rodata* .srodata*)
+
+ . = ALIGN(4);
+ __efistub_code_size = .;
+ LONG(zboot_code_size);
+
+ _etext = ALIGN(4096);
+ . = _etext;
+ }
+
+ .data : ALIGN(4096) {
+ *(.data* .init.data*)
+ _edata = ALIGN(512);
+ . = _edata;
+ }
+
+ .bss : {
+ *(.bss* .init.bss*)
+ _end = ALIGN(512);
+ . = _end;
+ }
+
+ /DISCARD/ : {
+ *(.modinfo .init.modinfo)
+ }
+}
+
+PROVIDE(__efistub__gzdata_size =
+ ABSOLUTE(__efistub__gzdata_end - __efistub__gzdata_start));
+
+PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext));
+PROVIDE(__data_size = ABSOLUTE(_end - _etext));
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
new file mode 100644
index 0000000000..ab85bf8e16
--- /dev/null
+++ b/drivers/firmware/efi/memattr.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#define pr_fmt(fmt) "efi: memattr: " fmt
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#include <asm/early_ioremap.h>
+
+static int __initdata tbl_size;
+unsigned long __ro_after_init efi_mem_attr_table = EFI_INVALID_TABLE_ADDR;
+
+/*
+ * Reserve the memory associated with the Memory Attributes configuration
+ * table, if it exists.
+ */
+int __init efi_memattr_init(void)
+{
+ efi_memory_attributes_table_t *tbl;
+
+ if (efi_mem_attr_table == EFI_INVALID_TABLE_ADDR)
+ return 0;
+
+ tbl = early_memremap(efi_mem_attr_table, sizeof(*tbl));
+ if (!tbl) {
+ pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
+ efi_mem_attr_table);
+ return -ENOMEM;
+ }
+
+ if (tbl->version > 2) {
+ pr_warn("Unexpected EFI Memory Attributes table version %d\n",
+ tbl->version);
+ goto unmap;
+ }
+
+ tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
+ memblock_reserve(efi_mem_attr_table, tbl_size);
+ set_bit(EFI_MEM_ATTR, &efi.flags);
+
+unmap:
+ early_memunmap(tbl, sizeof(*tbl));
+ return 0;
+}
+
+/*
+ * Returns a copy @out of the UEFI memory descriptor @in if it is covered
+ * entirely by a UEFI memory map entry with matching attributes. The virtual
+ * address of @out is set according to the matching entry that was found.
+ */
+static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
+{
+ u64 in_paddr = in->phys_addr;
+ u64 in_size = in->num_pages << EFI_PAGE_SHIFT;
+ efi_memory_desc_t *md;
+
+ *out = *in;
+
+ if (in->type != EFI_RUNTIME_SERVICES_CODE &&
+ in->type != EFI_RUNTIME_SERVICES_DATA) {
+ pr_warn("Entry type should be RuntimeServiceCode/Data\n");
+ return false;
+ }
+
+ if (PAGE_SIZE > EFI_PAGE_SIZE &&
+ (!PAGE_ALIGNED(in->phys_addr) ||
+ !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
+ /*
+ * Since arm64 may execute with page sizes of up to 64 KB, the
+ * UEFI spec mandates that RuntimeServices memory regions must
+ * be 64 KB aligned. We need to validate this here since we will
+ * not be able to tighten permissions on such regions without
+ * affecting adjacent regions.
+ */
+ pr_warn("Entry address region misaligned\n");
+ return false;
+ }
+
+ for_each_efi_memory_desc(md) {
+ u64 md_paddr = md->phys_addr;
+ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
+
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0 && md->phys_addr != 0) {
+ /* no virtual mapping has been installed by the stub */
+ break;
+ }
+
+ if (md_paddr > in_paddr || (in_paddr - md_paddr) >= md_size)
+ continue;
+
+ /*
+ * This entry covers the start of @in, check whether
+ * it covers the end as well.
+ */
+ if (md_paddr + md_size < in_paddr + in_size) {
+ pr_warn("Entry covers multiple EFI memory map regions\n");
+ return false;
+ }
+
+ if (md->type != in->type) {
+ pr_warn("Entry type deviates from EFI memory map region type\n");
+ return false;
+ }
+
+ out->virt_addr = in_paddr + (md->virt_addr - md_paddr);
+
+ return true;
+ }
+
+ pr_warn("No matching entry found in the EFI memory map\n");
+ return false;
+}
+
+/*
+ * To be called after the EFI page tables have been populated. If a memory
+ * attributes table is available, its contents will be used to update the
+ * mappings with tightened permissions as described by the table.
+ * This requires the UEFI memory map to have already been populated with
+ * virtual addresses.
+ */
+int __init efi_memattr_apply_permissions(struct mm_struct *mm,
+ efi_memattr_perm_setter fn)
+{
+ efi_memory_attributes_table_t *tbl;
+ bool has_bti = false;
+ int i, ret;
+
+ if (tbl_size <= sizeof(*tbl))
+ return 0;
+
+ /*
+ * We need the EFI memory map to be setup so we can use it to
+ * lookup the virtual addresses of all entries in the of EFI
+ * Memory Attributes table. If it isn't available, this
+ * function should not be called.
+ */
+ if (WARN_ON(!efi_enabled(EFI_MEMMAP)))
+ return 0;
+
+ tbl = memremap(efi_mem_attr_table, tbl_size, MEMREMAP_WB);
+ if (!tbl) {
+ pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
+ efi_mem_attr_table);
+ return -ENOMEM;
+ }
+
+ if (tbl->version > 1 &&
+ (tbl->flags & EFI_MEMORY_ATTRIBUTES_FLAGS_RT_FORWARD_CONTROL_FLOW_GUARD))
+ has_bti = true;
+
+ if (efi_enabled(EFI_DBG))
+ pr_info("Processing EFI Memory Attributes table:\n");
+
+ for (i = ret = 0; ret == 0 && i < tbl->num_entries; i++) {
+ efi_memory_desc_t md;
+ unsigned long size;
+ bool valid;
+ char buf[64];
+
+ valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size,
+ &md);
+ size = md.num_pages << EFI_PAGE_SHIFT;
+ if (efi_enabled(EFI_DBG) || !valid)
+ pr_info("%s 0x%012llx-0x%012llx %s\n",
+ valid ? "" : "!", md.phys_addr,
+ md.phys_addr + size - 1,
+ efi_md_typeattr_format(buf, sizeof(buf), &md));
+
+ if (valid) {
+ ret = fn(mm, &md, has_bti);
+ if (ret)
+ pr_err("Error updating mappings, skipping subsequent md's\n");
+ }
+ }
+ memunmap(tbl);
+ return ret;
+}
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
new file mode 100644
index 0000000000..a1180461a4
--- /dev/null
+++ b/drivers/firmware/efi/memmap.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common EFI memory map functions.
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+
+#include <asm/early_ioremap.h>
+#include <asm/efi.h>
+
+#ifndef __efi_memmap_free
+#define __efi_memmap_free(phys, size, flags) do { } while (0)
+#endif
+
+/**
+ * __efi_memmap_init - Common code for mapping the EFI memory map
+ * @data: EFI memory map data
+ *
+ * This function takes care of figuring out which function to use to
+ * map the EFI memory map in efi.memmap based on how far into the boot
+ * we are.
+ *
+ * During bootup EFI_MEMMAP_LATE in data->flags should be clear since we
+ * only have access to the early_memremap*() functions as the vmalloc
+ * space isn't setup. Once the kernel is fully booted we can fallback
+ * to the more robust memremap*() API.
+ *
+ * Returns zero on success, a negative error code on failure.
+ */
+int __init __efi_memmap_init(struct efi_memory_map_data *data)
+{
+ struct efi_memory_map map;
+ phys_addr_t phys_map;
+
+ phys_map = data->phys_map;
+
+ if (data->flags & EFI_MEMMAP_LATE)
+ map.map = memremap(phys_map, data->size, MEMREMAP_WB);
+ else
+ map.map = early_memremap(phys_map, data->size);
+
+ if (!map.map) {
+ pr_err("Could not map the memory map!\n");
+ return -ENOMEM;
+ }
+
+ if (efi.memmap.flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB))
+ __efi_memmap_free(efi.memmap.phys_map,
+ efi.memmap.desc_size * efi.memmap.nr_map,
+ efi.memmap.flags);
+
+ map.phys_map = data->phys_map;
+ map.nr_map = data->size / data->desc_size;
+ map.map_end = map.map + data->size;
+
+ map.desc_version = data->desc_version;
+ map.desc_size = data->desc_size;
+ map.flags = data->flags;
+
+ set_bit(EFI_MEMMAP, &efi.flags);
+
+ efi.memmap = map;
+
+ return 0;
+}
+
+/**
+ * efi_memmap_init_early - Map the EFI memory map data structure
+ * @data: EFI memory map data
+ *
+ * Use early_memremap() to map the passed in EFI memory map and assign
+ * it to efi.memmap.
+ */
+int __init efi_memmap_init_early(struct efi_memory_map_data *data)
+{
+ /* Cannot go backwards */
+ WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
+
+ data->flags = 0;
+ return __efi_memmap_init(data);
+}
+
+void __init efi_memmap_unmap(void)
+{
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
+ if (!(efi.memmap.flags & EFI_MEMMAP_LATE)) {
+ unsigned long size;
+
+ size = efi.memmap.desc_size * efi.memmap.nr_map;
+ early_memunmap(efi.memmap.map, size);
+ } else {
+ memunmap(efi.memmap.map);
+ }
+
+ efi.memmap.map = NULL;
+ clear_bit(EFI_MEMMAP, &efi.flags);
+}
+
+/**
+ * efi_memmap_init_late - Map efi.memmap with memremap()
+ * @phys_addr: Physical address of the new EFI memory map
+ * @size: Size in bytes of the new EFI memory map
+ *
+ * Setup a mapping of the EFI memory map using ioremap_cache(). This
+ * function should only be called once the vmalloc space has been
+ * setup and is therefore not suitable for calling during early EFI
+ * initialise, e.g. in efi_init(). Additionally, it expects
+ * efi_memmap_init_early() to have already been called.
+ *
+ * The reason there are two EFI memmap initialisation
+ * (efi_memmap_init_early() and this late version) is because the
+ * early EFI memmap should be explicitly unmapped once EFI
+ * initialisation is complete as the fixmap space used to map the EFI
+ * memmap (via early_memremap()) is a scarce resource.
+ *
+ * This late mapping is intended to persist for the duration of
+ * runtime so that things like efi_mem_desc_lookup() and
+ * efi_mem_attributes() always work.
+ *
+ * Returns zero on success, a negative error code on failure.
+ */
+int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
+{
+ struct efi_memory_map_data data = {
+ .phys_map = addr,
+ .size = size,
+ .flags = EFI_MEMMAP_LATE,
+ };
+
+ /* Did we forget to unmap the early EFI memmap? */
+ WARN_ON(efi.memmap.map);
+
+ /* Were we already called? */
+ WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
+
+ /*
+ * It makes no sense to allow callers to register different
+ * values for the following fields. Copy them out of the
+ * existing early EFI memmap.
+ */
+ data.desc_version = efi.memmap.desc_version;
+ data.desc_size = efi.memmap.desc_size;
+
+ return __efi_memmap_init(&data);
+}
diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
new file mode 100644
index 0000000000..5ed0602c2f
--- /dev/null
+++ b/drivers/firmware/efi/mokvar-table.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mokvar-table.c
+ *
+ * Copyright (c) 2020 Red Hat
+ * Author: Lenny Szubowicz <lszubowi@redhat.com>
+ *
+ * This module contains the kernel support for the Linux EFI Machine
+ * Owner Key (MOK) variable configuration table, which is identified by
+ * the LINUX_EFI_MOK_VARIABLE_TABLE_GUID.
+ *
+ * This EFI configuration table provides a more robust alternative to
+ * EFI volatile variables by which an EFI boot loader can pass the
+ * contents of the Machine Owner Key (MOK) certificate stores to the
+ * kernel during boot. If both the EFI MOK config table and corresponding
+ * EFI MOK variables are present, the table should be considered as
+ * more authoritative.
+ *
+ * This module includes code that validates and maps the EFI MOK table,
+ * if it's presence was detected very early in boot.
+ *
+ * Kernel interface routines are provided to walk through all the
+ * entries in the MOK config table or to search for a specific named
+ * entry.
+ *
+ * The contents of the individual named MOK config table entries are
+ * made available to user space via read-only sysfs binary files under:
+ *
+ * /sys/firmware/efi/mok-variables/
+ *
+ */
+#define pr_fmt(fmt) "mokvar: " fmt
+
+#include <linux/capability.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <asm/early_ioremap.h>
+
+/*
+ * The LINUX_EFI_MOK_VARIABLE_TABLE_GUID config table is a packed
+ * sequence of struct efi_mokvar_table_entry, one for each named
+ * MOK variable. The sequence is terminated by an entry with a
+ * completely NULL name and 0 data size.
+ *
+ * efi_mokvar_table_size is set to the computed size of the
+ * MOK config table by efi_mokvar_table_init(). This will be
+ * non-zero if and only if the table if present and has been
+ * validated by efi_mokvar_table_init().
+ */
+static size_t efi_mokvar_table_size;
+
+/*
+ * efi_mokvar_table_va is the kernel virtual address at which the
+ * EFI MOK config table has been mapped by efi_mokvar_sysfs_init().
+ */
+static struct efi_mokvar_table_entry *efi_mokvar_table_va;
+
+/*
+ * Each /sys/firmware/efi/mok-variables/ sysfs file is represented by
+ * an instance of struct efi_mokvar_sysfs_attr on efi_mokvar_sysfs_list.
+ * bin_attr.private points to the associated EFI MOK config table entry.
+ *
+ * This list is created during boot and then remains unchanged.
+ * So no synchronization is currently required to walk the list.
+ */
+struct efi_mokvar_sysfs_attr {
+ struct bin_attribute bin_attr;
+ struct list_head node;
+};
+
+static LIST_HEAD(efi_mokvar_sysfs_list);
+static struct kobject *mokvar_kobj;
+
+/*
+ * efi_mokvar_table_init() - Early boot validation of EFI MOK config table
+ *
+ * If present, validate and compute the size of the EFI MOK variable
+ * configuration table. This table may be provided by an EFI boot loader
+ * as an alternative to ordinary EFI variables, due to platform-dependent
+ * limitations. The memory occupied by this table is marked as reserved.
+ *
+ * This routine must be called before efi_free_boot_services() in order
+ * to guarantee that it can mark the table as reserved.
+ *
+ * Implicit inputs:
+ * efi.mokvar_table: Physical address of EFI MOK variable config table
+ * or special value that indicates no such table.
+ *
+ * Implicit outputs:
+ * efi_mokvar_table_size: Computed size of EFI MOK variable config table.
+ * The table is considered present and valid if this
+ * is non-zero.
+ */
+void __init efi_mokvar_table_init(void)
+{
+ efi_memory_desc_t md;
+ void *va = NULL;
+ unsigned long cur_offset = 0;
+ unsigned long offset_limit;
+ unsigned long map_size = 0;
+ unsigned long map_size_needed = 0;
+ unsigned long size;
+ struct efi_mokvar_table_entry *mokvar_entry;
+ int err;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
+ if (efi.mokvar_table == EFI_INVALID_TABLE_ADDR)
+ return;
+ /*
+ * The EFI MOK config table must fit within a single EFI memory
+ * descriptor range.
+ */
+ err = efi_mem_desc_lookup(efi.mokvar_table, &md);
+ if (err) {
+ pr_warn("EFI MOKvar config table is not within the EFI memory map\n");
+ return;
+ }
+
+ offset_limit = efi_mem_desc_end(&md) - efi.mokvar_table;
+
+ /*
+ * Validate the MOK config table. Since there is no table header
+ * from which we could get the total size of the MOK config table,
+ * we compute the total size as we validate each variably sized
+ * entry, remapping as necessary.
+ */
+ err = -EINVAL;
+ while (cur_offset + sizeof(*mokvar_entry) <= offset_limit) {
+ mokvar_entry = va + cur_offset;
+ map_size_needed = cur_offset + sizeof(*mokvar_entry);
+ if (map_size_needed > map_size) {
+ if (va)
+ early_memunmap(va, map_size);
+ /*
+ * Map a little more than the fixed size entry
+ * header, anticipating some data. It's safe to
+ * do so as long as we stay within current memory
+ * descriptor.
+ */
+ map_size = min(map_size_needed + 2*EFI_PAGE_SIZE,
+ offset_limit);
+ va = early_memremap(efi.mokvar_table, map_size);
+ if (!va) {
+ pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%lu.\n",
+ efi.mokvar_table, map_size);
+ return;
+ }
+ mokvar_entry = va + cur_offset;
+ }
+
+ /* Check for last sentinel entry */
+ if (mokvar_entry->name[0] == '\0') {
+ if (mokvar_entry->data_size != 0)
+ break;
+ err = 0;
+ break;
+ }
+
+ /* Sanity check that the name is null terminated */
+ size = strnlen(mokvar_entry->name,
+ sizeof(mokvar_entry->name));
+ if (size >= sizeof(mokvar_entry->name))
+ break;
+
+ /* Advance to the next entry */
+ cur_offset = map_size_needed + mokvar_entry->data_size;
+ }
+
+ if (va)
+ early_memunmap(va, map_size);
+ if (err) {
+ pr_err("EFI MOKvar config table is not valid\n");
+ return;
+ }
+
+ if (md.type == EFI_BOOT_SERVICES_DATA)
+ efi_mem_reserve(efi.mokvar_table, map_size_needed);
+
+ efi_mokvar_table_size = map_size_needed;
+}
+
+/*
+ * efi_mokvar_entry_next() - Get next entry in the EFI MOK config table
+ *
+ * mokvar_entry: Pointer to current EFI MOK config table entry
+ * or null. Null indicates get first entry.
+ * Passed by reference. This is updated to the
+ * same value as the return value.
+ *
+ * Returns: Pointer to next EFI MOK config table entry
+ * or null, if there are no more entries.
+ * Same value is returned in the mokvar_entry
+ * parameter.
+ *
+ * This routine depends on the EFI MOK config table being entirely
+ * mapped with it's starting virtual address in efi_mokvar_table_va.
+ */
+struct efi_mokvar_table_entry *efi_mokvar_entry_next(
+ struct efi_mokvar_table_entry **mokvar_entry)
+{
+ struct efi_mokvar_table_entry *mokvar_cur;
+ struct efi_mokvar_table_entry *mokvar_next;
+ size_t size_cur;
+
+ mokvar_cur = *mokvar_entry;
+ *mokvar_entry = NULL;
+
+ if (efi_mokvar_table_va == NULL)
+ return NULL;
+
+ if (mokvar_cur == NULL) {
+ mokvar_next = efi_mokvar_table_va;
+ } else {
+ if (mokvar_cur->name[0] == '\0')
+ return NULL;
+ size_cur = sizeof(*mokvar_cur) + mokvar_cur->data_size;
+ mokvar_next = (void *)mokvar_cur + size_cur;
+ }
+
+ if (mokvar_next->name[0] == '\0')
+ return NULL;
+
+ *mokvar_entry = mokvar_next;
+ return mokvar_next;
+}
+
+/*
+ * efi_mokvar_entry_find() - Find EFI MOK config entry by name
+ *
+ * name: Name of the entry to look for.
+ *
+ * Returns: Pointer to EFI MOK config table entry if found;
+ * null otherwise.
+ *
+ * This routine depends on the EFI MOK config table being entirely
+ * mapped with it's starting virtual address in efi_mokvar_table_va.
+ */
+struct efi_mokvar_table_entry *efi_mokvar_entry_find(const char *name)
+{
+ struct efi_mokvar_table_entry *mokvar_entry = NULL;
+
+ while (efi_mokvar_entry_next(&mokvar_entry)) {
+ if (!strncmp(name, mokvar_entry->name,
+ sizeof(mokvar_entry->name)))
+ return mokvar_entry;
+ }
+ return NULL;
+}
+
+/*
+ * efi_mokvar_sysfs_read() - sysfs binary file read routine
+ *
+ * Returns: Count of bytes read.
+ *
+ * Copy EFI MOK config table entry data for this mokvar sysfs binary file
+ * to the supplied buffer, starting at the specified offset into mokvar table
+ * entry data, for the specified count bytes. The copy is limited by the
+ * amount of data in this mokvar config table entry.
+ */
+static ssize_t efi_mokvar_sysfs_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct efi_mokvar_table_entry *mokvar_entry = bin_attr->private;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return 0;
+
+ if (off >= mokvar_entry->data_size)
+ return 0;
+ if (count > mokvar_entry->data_size - off)
+ count = mokvar_entry->data_size - off;
+
+ memcpy(buf, mokvar_entry->data + off, count);
+ return count;
+}
+
+/*
+ * efi_mokvar_sysfs_init() - Map EFI MOK config table and create sysfs
+ *
+ * Map the EFI MOK variable config table for run-time use by the kernel
+ * and create the sysfs entries in /sys/firmware/efi/mok-variables/
+ *
+ * This routine just returns if a valid EFI MOK variable config table
+ * was not found earlier during boot.
+ *
+ * This routine must be called during a "middle" initcall phase, i.e.
+ * after efi_mokvar_table_init() but before UEFI certs are loaded
+ * during late init.
+ *
+ * Implicit inputs:
+ * efi.mokvar_table: Physical address of EFI MOK variable config table
+ * or special value that indicates no such table.
+ *
+ * efi_mokvar_table_size: Computed size of EFI MOK variable config table.
+ * The table is considered present and valid if this
+ * is non-zero.
+ *
+ * Implicit outputs:
+ * efi_mokvar_table_va: Start virtual address of the EFI MOK config table.
+ */
+static int __init efi_mokvar_sysfs_init(void)
+{
+ void *config_va;
+ struct efi_mokvar_table_entry *mokvar_entry = NULL;
+ struct efi_mokvar_sysfs_attr *mokvar_sysfs = NULL;
+ int err = 0;
+
+ if (efi_mokvar_table_size == 0)
+ return -ENOENT;
+
+ config_va = memremap(efi.mokvar_table, efi_mokvar_table_size,
+ MEMREMAP_WB);
+ if (!config_va) {
+ pr_err("Failed to map EFI MOKvar config table\n");
+ return -ENOMEM;
+ }
+ efi_mokvar_table_va = config_va;
+
+ mokvar_kobj = kobject_create_and_add("mok-variables", efi_kobj);
+ if (!mokvar_kobj) {
+ pr_err("Failed to create EFI mok-variables sysfs entry\n");
+ return -ENOMEM;
+ }
+
+ while (efi_mokvar_entry_next(&mokvar_entry)) {
+ mokvar_sysfs = kzalloc(sizeof(*mokvar_sysfs), GFP_KERNEL);
+ if (!mokvar_sysfs) {
+ err = -ENOMEM;
+ break;
+ }
+
+ sysfs_bin_attr_init(&mokvar_sysfs->bin_attr);
+ mokvar_sysfs->bin_attr.private = mokvar_entry;
+ mokvar_sysfs->bin_attr.attr.name = mokvar_entry->name;
+ mokvar_sysfs->bin_attr.attr.mode = 0400;
+ mokvar_sysfs->bin_attr.size = mokvar_entry->data_size;
+ mokvar_sysfs->bin_attr.read = efi_mokvar_sysfs_read;
+
+ err = sysfs_create_bin_file(mokvar_kobj,
+ &mokvar_sysfs->bin_attr);
+ if (err)
+ break;
+
+ list_add_tail(&mokvar_sysfs->node, &efi_mokvar_sysfs_list);
+ }
+
+ if (err) {
+ pr_err("Failed to create some EFI mok-variables sysfs entries\n");
+ kfree(mokvar_sysfs);
+ }
+ return err;
+}
+fs_initcall(efi_mokvar_sysfs_init);
diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c
new file mode 100644
index 0000000000..de1a9a1f9f
--- /dev/null
+++ b/drivers/firmware/efi/rci2-table.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Export Runtime Configuration Interface Table Version 2 (RCI2)
+ * to sysfs
+ *
+ * Copyright (C) 2019 Dell Inc
+ * by Narendra K <Narendra.K@dell.com>
+ *
+ * System firmware advertises the address of the RCI2 Table via
+ * an EFI Configuration Table entry. This code retrieves the RCI2
+ * table from the address and exports it to sysfs as a binary
+ * attribute 'rci2' under /sys/firmware/efi/tables directory.
+ */
+
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/efi.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+#define RCI_SIGNATURE "_RC_"
+
+struct rci2_table_global_hdr {
+ u16 type;
+ u16 resvd0;
+ u16 hdr_len;
+ u8 rci2_sig[4];
+ u16 resvd1;
+ u32 resvd2;
+ u32 resvd3;
+ u8 major_rev;
+ u8 minor_rev;
+ u16 num_of_structs;
+ u32 rci2_len;
+ u16 rci2_chksum;
+} __packed;
+
+static u8 *rci2_base;
+static u32 rci2_table_len;
+unsigned long rci2_table_phys __ro_after_init = EFI_INVALID_TABLE_ADDR;
+
+static ssize_t raw_table_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count)
+{
+ memcpy(buf, attr->private + pos, count);
+ return count;
+}
+
+static BIN_ATTR(rci2, S_IRUSR, raw_table_read, NULL, 0);
+
+static u16 checksum(void)
+{
+ u8 len_is_odd = rci2_table_len % 2;
+ u32 chksum_len = rci2_table_len;
+ u16 *base = (u16 *)rci2_base;
+ u8 buf[2] = {0};
+ u32 offset = 0;
+ u16 chksum = 0;
+
+ if (len_is_odd)
+ chksum_len -= 1;
+
+ while (offset < chksum_len) {
+ chksum += *base;
+ offset += 2;
+ base++;
+ }
+
+ if (len_is_odd) {
+ buf[0] = *(u8 *)base;
+ chksum += *(u16 *)(buf);
+ }
+
+ return chksum;
+}
+
+static int __init efi_rci2_sysfs_init(void)
+{
+ struct kobject *tables_kobj;
+ int ret = -ENOMEM;
+
+ if (rci2_table_phys == EFI_INVALID_TABLE_ADDR)
+ return 0;
+
+ rci2_base = memremap(rci2_table_phys,
+ sizeof(struct rci2_table_global_hdr),
+ MEMREMAP_WB);
+ if (!rci2_base) {
+ pr_debug("RCI2 table init failed - could not map RCI2 table\n");
+ goto err;
+ }
+
+ if (strncmp(rci2_base +
+ offsetof(struct rci2_table_global_hdr, rci2_sig),
+ RCI_SIGNATURE, 4)) {
+ pr_debug("RCI2 table init failed - incorrect signature\n");
+ ret = -ENODEV;
+ goto err_unmap;
+ }
+
+ rci2_table_len = *(u32 *)(rci2_base +
+ offsetof(struct rci2_table_global_hdr,
+ rci2_len));
+
+ memunmap(rci2_base);
+
+ if (!rci2_table_len) {
+ pr_debug("RCI2 table init failed - incorrect table length\n");
+ goto err;
+ }
+
+ rci2_base = memremap(rci2_table_phys, rci2_table_len, MEMREMAP_WB);
+ if (!rci2_base) {
+ pr_debug("RCI2 table - could not map RCI2 table\n");
+ goto err;
+ }
+
+ if (checksum() != 0) {
+ pr_debug("RCI2 table - incorrect checksum\n");
+ ret = -ENODEV;
+ goto err_unmap;
+ }
+
+ tables_kobj = kobject_create_and_add("tables", efi_kobj);
+ if (!tables_kobj) {
+ pr_debug("RCI2 table - tables_kobj creation failed\n");
+ goto err_unmap;
+ }
+
+ bin_attr_rci2.size = rci2_table_len;
+ bin_attr_rci2.private = rci2_base;
+ ret = sysfs_create_bin_file(tables_kobj, &bin_attr_rci2);
+ if (ret != 0) {
+ pr_debug("RCI2 table - rci2 sysfs bin file creation failed\n");
+ kobject_del(tables_kobj);
+ kobject_put(tables_kobj);
+ goto err_unmap;
+ }
+
+ return 0;
+
+ err_unmap:
+ memunmap(rci2_base);
+ err:
+ pr_debug("RCI2 table - sysfs initialization failed\n");
+ return ret;
+}
+late_initcall(efi_rci2_sysfs_init);
diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c
new file mode 100644
index 0000000000..ceae84c19d
--- /dev/null
+++ b/drivers/firmware/efi/reboot.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014 Intel Corporation; author Matt Fleming
+ * Copyright (c) 2014 Red Hat, Inc., Mark Salter <msalter@redhat.com>
+ */
+#include <linux/efi.h>
+#include <linux/reboot.h>
+
+static struct sys_off_handler *efi_sys_off_handler;
+
+int efi_reboot_quirk_mode = -1;
+
+void efi_reboot(enum reboot_mode reboot_mode, const char *__unused)
+{
+ const char *str[] = { "cold", "warm", "shutdown", "platform" };
+ int efi_mode, cap_reset_mode;
+
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_RESET_SYSTEM))
+ return;
+
+ switch (reboot_mode) {
+ case REBOOT_WARM:
+ case REBOOT_SOFT:
+ efi_mode = EFI_RESET_WARM;
+ break;
+ default:
+ efi_mode = EFI_RESET_COLD;
+ break;
+ }
+
+ /*
+ * If a quirk forced an EFI reset mode, always use that.
+ */
+ if (efi_reboot_quirk_mode != -1)
+ efi_mode = efi_reboot_quirk_mode;
+
+ if (efi_capsule_pending(&cap_reset_mode)) {
+ if (efi_mode != cap_reset_mode)
+ printk(KERN_CRIT "efi: %s reset requested but pending "
+ "capsule update requires %s reset... Performing "
+ "%s reset.\n", str[efi_mode], str[cap_reset_mode],
+ str[cap_reset_mode]);
+ efi_mode = cap_reset_mode;
+ }
+
+ efi.reset_system(efi_mode, EFI_SUCCESS, 0, NULL);
+}
+
+bool __weak efi_poweroff_required(void)
+{
+ return false;
+}
+
+static int efi_power_off(struct sys_off_data *data)
+{
+ efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
+
+ return NOTIFY_DONE;
+}
+
+static int __init efi_shutdown_init(void)
+{
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_RESET_SYSTEM))
+ return -ENODEV;
+
+ if (efi_poweroff_required()) {
+ /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */
+ efi_sys_off_handler =
+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_FIRMWARE + 1,
+ efi_power_off, NULL);
+ if (IS_ERR(efi_sys_off_handler))
+ return PTR_ERR(efi_sys_off_handler);
+ }
+
+ return 0;
+}
+late_initcall(efi_shutdown_init);
diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c
new file mode 100644
index 0000000000..09525fb5c2
--- /dev/null
+++ b/drivers/firmware/efi/riscv-runtime.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Extensible Firmware Interface
+ *
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ * Adapted from drivers/firmware/efi/arm-runtime.c
+ *
+ */
+
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+#include <linux/mm_types.h>
+#include <linux/preempt.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/pgtable.h>
+
+#include <asm/cacheflush.h>
+#include <asm/efi.h>
+#include <asm/mmu.h>
+#include <asm/pgalloc.h>
+
+static bool __init efi_virtmap_init(void)
+{
+ efi_memory_desc_t *md;
+
+ efi_mm.pgd = pgd_alloc(&efi_mm);
+ mm_init_cpumask(&efi_mm);
+ init_new_context(NULL, &efi_mm);
+
+ for_each_efi_memory_desc(md) {
+ phys_addr_t phys = md->phys_addr;
+ int ret;
+
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == U64_MAX)
+ return false;
+
+ ret = efi_create_mapping(&efi_mm, md);
+ if (ret) {
+ pr_warn(" EFI remap %pa: failed to create mapping (%d)\n",
+ &phys, ret);
+ return false;
+ }
+ }
+
+ if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions))
+ return false;
+
+ return true;
+}
+
+/*
+ * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
+ * non-early mapping of the UEFI system table and virtual mappings for all
+ * EFI_MEMORY_RUNTIME regions.
+ */
+static int __init riscv_enable_runtime_services(void)
+{
+ u64 mapsize;
+
+ if (!efi_enabled(EFI_BOOT)) {
+ pr_info("EFI services will not be available.\n");
+ return 0;
+ }
+
+ efi_memmap_unmap();
+
+ mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
+
+ if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
+ pr_err("Failed to remap EFI memory map\n");
+ return 0;
+ }
+
+ if (efi_soft_reserve_enabled()) {
+ efi_memory_desc_t *md;
+
+ for_each_efi_memory_desc(md) {
+ int md_size = md->num_pages << EFI_PAGE_SHIFT;
+ struct resource *res;
+
+ if (!(md->attribute & EFI_MEMORY_SP))
+ continue;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (WARN_ON(!res))
+ break;
+
+ res->start = md->phys_addr;
+ res->end = md->phys_addr + md_size - 1;
+ res->name = "Soft Reserved";
+ res->flags = IORESOURCE_MEM;
+ res->desc = IORES_DESC_SOFT_RESERVED;
+
+ insert_resource(&iomem_resource, res);
+ }
+ }
+
+ if (efi_runtime_disabled()) {
+ pr_info("EFI runtime services will be disabled.\n");
+ return 0;
+ }
+
+ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
+ pr_info("EFI runtime services access via paravirt.\n");
+ return 0;
+ }
+
+ pr_info("Remapping and enabling EFI services.\n");
+
+ if (!efi_virtmap_init()) {
+ pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
+ return -ENOMEM;
+ }
+
+ /* Set up runtime services function pointers */
+ efi_native_runtime_setup();
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
+ return 0;
+}
+early_initcall(riscv_enable_runtime_services);
+
+static void efi_virtmap_load(void)
+{
+ preempt_disable();
+ switch_mm(current->active_mm, &efi_mm, NULL);
+}
+
+static void efi_virtmap_unload(void)
+{
+ switch_mm(&efi_mm, current->active_mm, NULL);
+ preempt_enable();
+}
+
+void arch_efi_call_virt_setup(void)
+{
+ sync_kernel_mappings(efi_mm.pgd);
+ efi_virtmap_load();
+}
+
+void arch_efi_call_virt_teardown(void)
+{
+ efi_virtmap_unload();
+}
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
new file mode 100644
index 0000000000..5d56bc40a7
--- /dev/null
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -0,0 +1,590 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * runtime-wrappers.c - Runtime Services function call wrappers
+ *
+ * Implementation summary:
+ * -----------------------
+ * 1. When user/kernel thread requests to execute efi_runtime_service(),
+ * enqueue work to efi_rts_wq.
+ * 2. Caller thread waits for completion until the work is finished
+ * because it's dependent on the return status and execution of
+ * efi_runtime_service().
+ * For instance, get_variable() and get_next_variable().
+ *
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * Split off from arch/x86/platform/efi/efi.c
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 1999-2002 Hewlett-Packard Co.
+ * Copyright (C) 2005-2008 Intel Co.
+ * Copyright (C) 2013 SuSE Labs
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/bug.h>
+#include <linux/efi.h>
+#include <linux/irqflags.h>
+#include <linux/mutex.h>
+#include <linux/semaphore.h>
+#include <linux/stringify.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
+#include <asm/efi.h>
+
+/*
+ * Wrap around the new efi_call_virt_generic() macros so that the
+ * code doesn't get too cluttered:
+ */
+#define efi_call_virt(f, args...) \
+ arch_efi_call_virt(efi.runtime, f, args)
+
+union efi_rts_args {
+ struct {
+ efi_time_t *time;
+ efi_time_cap_t *capabilities;
+ } GET_TIME;
+
+ struct {
+ efi_time_t *time;
+ } SET_TIME;
+
+ struct {
+ efi_bool_t *enabled;
+ efi_bool_t *pending;
+ efi_time_t *time;
+ } GET_WAKEUP_TIME;
+
+ struct {
+ efi_bool_t enable;
+ efi_time_t *time;
+ } SET_WAKEUP_TIME;
+
+ struct {
+ efi_char16_t *name;
+ efi_guid_t *vendor;
+ u32 *attr;
+ unsigned long *data_size;
+ void *data;
+ } GET_VARIABLE;
+
+ struct {
+ unsigned long *name_size;
+ efi_char16_t *name;
+ efi_guid_t *vendor;
+ } GET_NEXT_VARIABLE;
+
+ struct {
+ efi_char16_t *name;
+ efi_guid_t *vendor;
+ u32 attr;
+ unsigned long data_size;
+ void *data;
+ } SET_VARIABLE;
+
+ struct {
+ u32 attr;
+ u64 *storage_space;
+ u64 *remaining_space;
+ u64 *max_variable_size;
+ } QUERY_VARIABLE_INFO;
+
+ struct {
+ u32 *high_count;
+ } GET_NEXT_HIGH_MONO_COUNT;
+
+ struct {
+ efi_capsule_header_t **capsules;
+ unsigned long count;
+ unsigned long sg_list;
+ } UPDATE_CAPSULE;
+
+ struct {
+ efi_capsule_header_t **capsules;
+ unsigned long count;
+ u64 *max_size;
+ int *reset_type;
+ } QUERY_CAPSULE_CAPS;
+
+ struct {
+ efi_status_t (__efiapi *acpi_prm_handler)(u64, void *);
+ u64 param_buffer_addr;
+ void *context;
+ } ACPI_PRM_HANDLER;
+};
+
+struct efi_runtime_work efi_rts_work;
+
+/*
+ * efi_queue_work: Queue EFI runtime service call and wait for completion
+ * @_rts: EFI runtime service function identifier
+ * @_args: Arguments to pass to the EFI runtime service
+ *
+ * Accesses to efi_runtime_services() are serialized by a binary
+ * semaphore (efi_runtime_lock) and caller waits until the work is
+ * finished, hence _only_ one work is queued at a time and the caller
+ * thread waits for completion.
+ */
+#define efi_queue_work(_rts, _args...) \
+ __efi_queue_work(EFI_ ## _rts, \
+ &(union efi_rts_args){ ._rts = { _args }})
+
+#ifndef arch_efi_save_flags
+#define arch_efi_save_flags(state_flags) local_save_flags(state_flags)
+#define arch_efi_restore_flags(state_flags) local_irq_restore(state_flags)
+#endif
+
+unsigned long efi_call_virt_save_flags(void)
+{
+ unsigned long flags;
+
+ arch_efi_save_flags(flags);
+ return flags;
+}
+
+void efi_call_virt_check_flags(unsigned long flags, const void *caller)
+{
+ unsigned long cur_flags, mismatch;
+
+ cur_flags = efi_call_virt_save_flags();
+
+ mismatch = flags ^ cur_flags;
+ if (!WARN_ON_ONCE(mismatch & ARCH_EFI_IRQ_FLAGS_MASK))
+ return;
+
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_NOW_UNRELIABLE);
+ pr_err_ratelimited(FW_BUG "IRQ flags corrupted (0x%08lx=>0x%08lx) by EFI call from %pS\n",
+ flags, cur_flags, caller ?: __builtin_return_address(0));
+ arch_efi_restore_flags(flags);
+}
+
+/*
+ * According to section 7.1 of the UEFI spec, Runtime Services are not fully
+ * reentrant, and there are particular combinations of calls that need to be
+ * serialized. (source: UEFI Specification v2.4A)
+ *
+ * Table 31. Rules for Reentry Into Runtime Services
+ * +------------------------------------+-------------------------------+
+ * | If previous call is busy in | Forbidden to call |
+ * +------------------------------------+-------------------------------+
+ * | Any | SetVirtualAddressMap() |
+ * +------------------------------------+-------------------------------+
+ * | ConvertPointer() | ConvertPointer() |
+ * +------------------------------------+-------------------------------+
+ * | SetVariable() | ResetSystem() |
+ * | UpdateCapsule() | |
+ * | SetTime() | |
+ * | SetWakeupTime() | |
+ * | GetNextHighMonotonicCount() | |
+ * +------------------------------------+-------------------------------+
+ * | GetVariable() | GetVariable() |
+ * | GetNextVariableName() | GetNextVariableName() |
+ * | SetVariable() | SetVariable() |
+ * | QueryVariableInfo() | QueryVariableInfo() |
+ * | UpdateCapsule() | UpdateCapsule() |
+ * | QueryCapsuleCapabilities() | QueryCapsuleCapabilities() |
+ * | GetNextHighMonotonicCount() | GetNextHighMonotonicCount() |
+ * +------------------------------------+-------------------------------+
+ * | GetTime() | GetTime() |
+ * | SetTime() | SetTime() |
+ * | GetWakeupTime() | GetWakeupTime() |
+ * | SetWakeupTime() | SetWakeupTime() |
+ * +------------------------------------+-------------------------------+
+ *
+ * Due to the fact that the EFI pstore may write to the variable store in
+ * interrupt context, we need to use a lock for at least the groups that
+ * contain SetVariable() and QueryVariableInfo(). That leaves little else, as
+ * none of the remaining functions are actually ever called at runtime.
+ * So let's just use a single lock to serialize all Runtime Services calls.
+ */
+static DEFINE_SEMAPHORE(efi_runtime_lock, 1);
+
+/*
+ * Expose the EFI runtime lock to the UV platform
+ */
+#ifdef CONFIG_X86_UV
+extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
+#endif
+
+/*
+ * Calls the appropriate efi_runtime_service() with the appropriate
+ * arguments.
+ */
+static void efi_call_rts(struct work_struct *work)
+{
+ const union efi_rts_args *args = efi_rts_work.args;
+ efi_status_t status = EFI_NOT_FOUND;
+ unsigned long flags;
+
+ arch_efi_call_virt_setup();
+ flags = efi_call_virt_save_flags();
+
+ switch (efi_rts_work.efi_rts_id) {
+ case EFI_GET_TIME:
+ status = efi_call_virt(get_time,
+ args->GET_TIME.time,
+ args->GET_TIME.capabilities);
+ break;
+ case EFI_SET_TIME:
+ status = efi_call_virt(set_time,
+ args->SET_TIME.time);
+ break;
+ case EFI_GET_WAKEUP_TIME:
+ status = efi_call_virt(get_wakeup_time,
+ args->GET_WAKEUP_TIME.enabled,
+ args->GET_WAKEUP_TIME.pending,
+ args->GET_WAKEUP_TIME.time);
+ break;
+ case EFI_SET_WAKEUP_TIME:
+ status = efi_call_virt(set_wakeup_time,
+ args->SET_WAKEUP_TIME.enable,
+ args->SET_WAKEUP_TIME.time);
+ break;
+ case EFI_GET_VARIABLE:
+ status = efi_call_virt(get_variable,
+ args->GET_VARIABLE.name,
+ args->GET_VARIABLE.vendor,
+ args->GET_VARIABLE.attr,
+ args->GET_VARIABLE.data_size,
+ args->GET_VARIABLE.data);
+ break;
+ case EFI_GET_NEXT_VARIABLE:
+ status = efi_call_virt(get_next_variable,
+ args->GET_NEXT_VARIABLE.name_size,
+ args->GET_NEXT_VARIABLE.name,
+ args->GET_NEXT_VARIABLE.vendor);
+ break;
+ case EFI_SET_VARIABLE:
+ status = efi_call_virt(set_variable,
+ args->SET_VARIABLE.name,
+ args->SET_VARIABLE.vendor,
+ args->SET_VARIABLE.attr,
+ args->SET_VARIABLE.data_size,
+ args->SET_VARIABLE.data);
+ break;
+ case EFI_QUERY_VARIABLE_INFO:
+ status = efi_call_virt(query_variable_info,
+ args->QUERY_VARIABLE_INFO.attr,
+ args->QUERY_VARIABLE_INFO.storage_space,
+ args->QUERY_VARIABLE_INFO.remaining_space,
+ args->QUERY_VARIABLE_INFO.max_variable_size);
+ break;
+ case EFI_GET_NEXT_HIGH_MONO_COUNT:
+ status = efi_call_virt(get_next_high_mono_count,
+ args->GET_NEXT_HIGH_MONO_COUNT.high_count);
+ break;
+ case EFI_UPDATE_CAPSULE:
+ status = efi_call_virt(update_capsule,
+ args->UPDATE_CAPSULE.capsules,
+ args->UPDATE_CAPSULE.count,
+ args->UPDATE_CAPSULE.sg_list);
+ break;
+ case EFI_QUERY_CAPSULE_CAPS:
+ status = efi_call_virt(query_capsule_caps,
+ args->QUERY_CAPSULE_CAPS.capsules,
+ args->QUERY_CAPSULE_CAPS.count,
+ args->QUERY_CAPSULE_CAPS.max_size,
+ args->QUERY_CAPSULE_CAPS.reset_type);
+ break;
+ case EFI_ACPI_PRM_HANDLER:
+#ifdef CONFIG_ACPI_PRMT
+ status = arch_efi_call_virt(args, ACPI_PRM_HANDLER.acpi_prm_handler,
+ args->ACPI_PRM_HANDLER.param_buffer_addr,
+ args->ACPI_PRM_HANDLER.context);
+ break;
+#endif
+ default:
+ /*
+ * Ideally, we should never reach here because a caller of this
+ * function should have put the right efi_runtime_service()
+ * function identifier into efi_rts_work->efi_rts_id
+ */
+ pr_err("Requested executing invalid EFI Runtime Service.\n");
+ }
+
+ efi_call_virt_check_flags(flags, efi_rts_work.caller);
+ arch_efi_call_virt_teardown();
+
+ efi_rts_work.status = status;
+ complete(&efi_rts_work.efi_rts_comp);
+}
+
+static efi_status_t __efi_queue_work(enum efi_rts_ids id,
+ union efi_rts_args *args)
+{
+ efi_rts_work.efi_rts_id = id;
+ efi_rts_work.args = args;
+ efi_rts_work.caller = __builtin_return_address(0);
+ efi_rts_work.status = EFI_ABORTED;
+
+ if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
+ pr_warn_once("EFI Runtime Services are disabled!\n");
+ efi_rts_work.status = EFI_DEVICE_ERROR;
+ goto exit;
+ }
+
+ init_completion(&efi_rts_work.efi_rts_comp);
+ INIT_WORK(&efi_rts_work.work, efi_call_rts);
+
+ /*
+ * queue_work() returns 0 if work was already on queue,
+ * _ideally_ this should never happen.
+ */
+ if (queue_work(efi_rts_wq, &efi_rts_work.work))
+ wait_for_completion(&efi_rts_work.efi_rts_comp);
+ else
+ pr_err("Failed to queue work to efi_rts_wq.\n");
+
+ WARN_ON_ONCE(efi_rts_work.status == EFI_ABORTED);
+exit:
+ efi_rts_work.efi_rts_id = EFI_NONE;
+ return efi_rts_work.status;
+}
+
+static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(GET_TIME, tm, tc);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t virt_efi_set_time(efi_time_t *tm)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(SET_TIME, tm);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
+ efi_bool_t *pending,
+ efi_time_t *tm)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(SET_WAKEUP_TIME, enabled, tm);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t virt_efi_get_variable(efi_char16_t *name,
+ efi_guid_t *vendor,
+ u32 *attr,
+ unsigned long *data_size,
+ void *data)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
+ data);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name,
+ efi_guid_t *vendor)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t virt_efi_set_variable(efi_char16_t *name,
+ efi_guid_t *vendor,
+ u32 attr,
+ unsigned long data_size,
+ void *data)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(SET_VARIABLE, name, vendor, attr, data_size,
+ data);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t
+virt_efi_set_variable_nb(efi_char16_t *name, efi_guid_t *vendor, u32 attr,
+ unsigned long data_size, void *data)
+{
+ efi_status_t status;
+
+ if (down_trylock(&efi_runtime_lock))
+ return EFI_NOT_READY;
+
+ status = efi_call_virt_pointer(efi.runtime, set_variable, name, vendor,
+ attr, data_size, data);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+
+static efi_status_t virt_efi_query_variable_info(u32 attr,
+ u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size)
+{
+ efi_status_t status;
+
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(QUERY_VARIABLE_INFO, attr, storage_space,
+ remaining_space, max_variable_size);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t
+virt_efi_query_variable_info_nb(u32 attr, u64 *storage_space,
+ u64 *remaining_space, u64 *max_variable_size)
+{
+ efi_status_t status;
+
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ if (down_trylock(&efi_runtime_lock))
+ return EFI_NOT_READY;
+
+ status = efi_call_virt_pointer(efi.runtime, query_variable_info, attr,
+ storage_space, remaining_space,
+ max_variable_size);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static void virt_efi_reset_system(int reset_type,
+ efi_status_t status,
+ unsigned long data_size,
+ efi_char16_t *data)
+{
+ if (down_trylock(&efi_runtime_lock)) {
+ pr_warn("failed to invoke the reset_system() runtime service:\n"
+ "could not get exclusive access to the firmware\n");
+ return;
+ }
+
+ arch_efi_call_virt_setup();
+ efi_rts_work.efi_rts_id = EFI_RESET_SYSTEM;
+ arch_efi_call_virt(efi.runtime, reset_system, reset_type, status,
+ data_size, data);
+ arch_efi_call_virt_teardown();
+
+ up(&efi_runtime_lock);
+}
+
+static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
+ unsigned long count,
+ unsigned long sg_list)
+{
+ efi_status_t status;
+
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(UPDATE_CAPSULE, capsules, count, sg_list);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
+ unsigned long count,
+ u64 *max_size,
+ int *reset_type)
+{
+ efi_status_t status;
+
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, count,
+ max_size, reset_type);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+void __init efi_native_runtime_setup(void)
+{
+ efi.get_time = virt_efi_get_time;
+ efi.set_time = virt_efi_set_time;
+ efi.get_wakeup_time = virt_efi_get_wakeup_time;
+ efi.set_wakeup_time = virt_efi_set_wakeup_time;
+ efi.get_variable = virt_efi_get_variable;
+ efi.get_next_variable = virt_efi_get_next_variable;
+ efi.set_variable = virt_efi_set_variable;
+ efi.set_variable_nonblocking = virt_efi_set_variable_nb;
+ efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
+ efi.reset_system = virt_efi_reset_system;
+ efi.query_variable_info = virt_efi_query_variable_info;
+ efi.query_variable_info_nonblocking = virt_efi_query_variable_info_nb;
+ efi.update_capsule = virt_efi_update_capsule;
+ efi.query_capsule_caps = virt_efi_query_capsule_caps;
+}
+
+#ifdef CONFIG_ACPI_PRMT
+
+efi_status_t
+efi_call_acpi_prm_handler(efi_status_t (__efiapi *handler_addr)(u64, void *),
+ u64 param_buffer_addr, void *context)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(ACPI_PRM_HANDLER, handler_addr,
+ param_buffer_addr, context);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+#endif
diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
new file mode 100644
index 0000000000..456d0e5eaf
--- /dev/null
+++ b/drivers/firmware/efi/sysfb_efi.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Generic System Framebuffers
+ * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * EFI Quirks Copyright (c) 2006 Edgar Hucek <gimli@dark-green.com>
+ */
+
+/*
+ * EFI Quirks
+ * Several EFI systems do not correctly advertise their boot framebuffers.
+ * Hence, we use this static table of known broken machines and fix up the
+ * information so framebuffer drivers can load correctly.
+ */
+
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+#include <linux/sysfb.h>
+#include <video/vga.h>
+
+enum {
+ OVERRIDE_NONE = 0x0,
+ OVERRIDE_BASE = 0x1,
+ OVERRIDE_STRIDE = 0x2,
+ OVERRIDE_HEIGHT = 0x4,
+ OVERRIDE_WIDTH = 0x8,
+};
+
+struct efifb_dmi_info efifb_dmi_list[] = {
+ [M_I17] = { "i17", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
+ [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050, OVERRIDE_NONE }, /* guess */
+ [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050, OVERRIDE_NONE },
+ [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE }, /* guess */
+ [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
+ [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080, OVERRIDE_NONE },
+ [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440, OVERRIDE_NONE },
+ [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768, OVERRIDE_NONE },
+ [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768, OVERRIDE_NONE },
+ [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
+ [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
+ [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
+ [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
+ [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
+ [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
+ /* 11" Macbook Air 3,1 passes the wrong stride */
+ [M_MBA_3] = { "mba3", 0, 2048 * 4, 0, 0, OVERRIDE_STRIDE },
+ [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
+ [M_MBP_2] = { "mbp2", 0, 0, 0, 0, OVERRIDE_NONE }, /* placeholder */
+ [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
+ [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900, OVERRIDE_NONE },
+ [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
+ [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900, OVERRIDE_NONE },
+ [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
+ [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900, OVERRIDE_NONE },
+ [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
+ [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050, OVERRIDE_NONE },
+ [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
+ [M_MBP_8_2] = { "mbp82", 0x90010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
+ [M_UNKNOWN] = { NULL, 0, 0, 0, 0, OVERRIDE_NONE }
+};
+
+void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
+{
+ int i;
+
+ for (i = 0; i < M_UNKNOWN; i++) {
+ if (efifb_dmi_list[i].base != 0 &&
+ !strcmp(opt, efifb_dmi_list[i].optname)) {
+ si->lfb_base = efifb_dmi_list[i].base;
+ si->lfb_linelength = efifb_dmi_list[i].stride;
+ si->lfb_width = efifb_dmi_list[i].width;
+ si->lfb_height = efifb_dmi_list[i].height;
+ }
+ }
+}
+
+#define choose_value(dmivalue, fwvalue, field, flags) ({ \
+ typeof(fwvalue) _ret_ = fwvalue; \
+ if ((flags) & (field)) \
+ _ret_ = dmivalue; \
+ else if ((fwvalue) == 0) \
+ _ret_ = dmivalue; \
+ _ret_; \
+ })
+
+static int __init efifb_set_system(const struct dmi_system_id *id)
+{
+ struct efifb_dmi_info *info = id->driver_data;
+
+ if (info->base == 0 && info->height == 0 && info->width == 0 &&
+ info->stride == 0)
+ return 0;
+
+ /* Trust the bootloader over the DMI tables */
+ if (screen_info.lfb_base == 0) {
+#if defined(CONFIG_PCI)
+ struct pci_dev *dev = NULL;
+ int found_bar = 0;
+#endif
+ if (info->base) {
+ screen_info.lfb_base = choose_value(info->base,
+ screen_info.lfb_base, OVERRIDE_BASE,
+ info->flags);
+
+#if defined(CONFIG_PCI)
+ /* make sure that the address in the table is actually
+ * on a VGA device's PCI BAR */
+
+ for_each_pci_dev(dev) {
+ int i;
+ if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ continue;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ resource_size_t start, end;
+ unsigned long flags;
+
+ flags = pci_resource_flags(dev, i);
+ if (!(flags & IORESOURCE_MEM))
+ continue;
+
+ if (flags & IORESOURCE_UNSET)
+ continue;
+
+ if (pci_resource_len(dev, i) == 0)
+ continue;
+
+ start = pci_resource_start(dev, i);
+ end = pci_resource_end(dev, i);
+ if (screen_info.lfb_base >= start &&
+ screen_info.lfb_base < end) {
+ found_bar = 1;
+ break;
+ }
+ }
+ }
+ if (!found_bar)
+ screen_info.lfb_base = 0;
+#endif
+ }
+ }
+ if (screen_info.lfb_base) {
+ screen_info.lfb_linelength = choose_value(info->stride,
+ screen_info.lfb_linelength, OVERRIDE_STRIDE,
+ info->flags);
+ screen_info.lfb_width = choose_value(info->width,
+ screen_info.lfb_width, OVERRIDE_WIDTH,
+ info->flags);
+ screen_info.lfb_height = choose_value(info->height,
+ screen_info.lfb_height, OVERRIDE_HEIGHT,
+ info->flags);
+ if (screen_info.orig_video_isVGA == 0)
+ screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
+ } else {
+ screen_info.lfb_linelength = 0;
+ screen_info.lfb_width = 0;
+ screen_info.lfb_height = 0;
+ screen_info.orig_video_isVGA = 0;
+ return 0;
+ }
+
+ printk(KERN_INFO "efifb: dmi detected %s - framebuffer at 0x%08x "
+ "(%dx%d, stride %d)\n", id->ident,
+ screen_info.lfb_base, screen_info.lfb_width,
+ screen_info.lfb_height, screen_info.lfb_linelength);
+
+ return 1;
+}
+
+#define EFIFB_DMI_SYSTEM_ID(vendor, name, enumid) \
+ { \
+ efifb_set_system, \
+ name, \
+ { \
+ DMI_MATCH(DMI_BIOS_VENDOR, vendor), \
+ DMI_MATCH(DMI_PRODUCT_NAME, name) \
+ }, \
+ &efifb_dmi_list[enumid] \
+ }
+
+static const struct dmi_system_id efifb_dmi_system_table[] __initconst = {
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac4,1", M_I17),
+ /* At least one of these two will be right; maybe both? */
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac5,1", M_I20),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac5,1", M_I20),
+ /* At least one of these two will be right; maybe both? */
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
+ /* At least one of these two will be right; maybe both? */
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook2,1", M_MB),
+ /* At least one of these two will be right; maybe both? */
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir3,1", M_MBA_3),
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro8,2", M_MBP_8_2),
+ {},
+};
+
+/*
+ * Some devices have a portrait LCD but advertise a landscape resolution (and
+ * pitch). We simply swap width and height for these devices so that we can
+ * correctly deal with some of them coming with multiple resolutions.
+ */
+static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
+ {
+ /*
+ * Lenovo MIIX310-10ICR, only some batches have the troublesome
+ * 800x1280 portrait screen. Luckily the portrait version has
+ * its own BIOS version, so we match on that.
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"),
+ },
+ },
+ {
+ /* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+ "Lenovo MIIX 320-10ICR"),
+ },
+ },
+ {
+ /* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+ "Lenovo ideapad D330-10IGM"),
+ },
+ },
+ {
+ /* Lenovo IdeaPad Duet 3 10IGL5 with 1200x1920 portrait screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+ "IdeaPad Duet 3 10IGL5"),
+ },
+ },
+ {
+ /* Lenovo Yoga Book X91F / X91L */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ /* Non exact match to match F + L versions */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
+ },
+ },
+ {},
+};
+
+static bool efifb_overlaps_pci_range(const struct of_pci_range *range)
+{
+ u64 fb_base = screen_info.lfb_base;
+
+ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32;
+
+ return fb_base >= range->cpu_addr &&
+ fb_base < (range->cpu_addr + range->size);
+}
+
+static struct device_node *find_pci_overlap_node(void)
+{
+ struct device_node *np;
+
+ for_each_node_by_type(np, "pci") {
+ struct of_pci_range_parser parser;
+ struct of_pci_range range;
+ int err;
+
+ err = of_pci_range_parser_init(&parser, np);
+ if (err) {
+ pr_warn("of_pci_range_parser_init() failed: %d\n", err);
+ continue;
+ }
+
+ for_each_of_pci_range(&parser, &range)
+ if (efifb_overlaps_pci_range(&range))
+ return np;
+ }
+ return NULL;
+}
+
+/*
+ * If the efifb framebuffer is backed by a PCI graphics controller, we have
+ * to ensure that this relation is expressed using a device link when
+ * running in DT mode, or the probe order may be reversed, resulting in a
+ * resource reservation conflict on the memory window that the efifb
+ * framebuffer steals from the PCIe host bridge.
+ */
+static int efifb_add_links(struct fwnode_handle *fwnode)
+{
+ struct device_node *sup_np;
+
+ sup_np = find_pci_overlap_node();
+
+ /*
+ * If there's no PCI graphics controller backing the efifb, we are
+ * done here.
+ */
+ if (!sup_np)
+ return 0;
+
+ fwnode_link_add(fwnode, of_fwnode_handle(sup_np));
+ of_node_put(sup_np);
+
+ return 0;
+}
+
+static const struct fwnode_operations efifb_fwnode_ops = {
+ .add_links = efifb_add_links,
+};
+
+#ifdef CONFIG_EFI
+static struct fwnode_handle efifb_fwnode;
+
+__init void sysfb_apply_efi_quirks(void)
+{
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
+ !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
+ dmi_check_system(efifb_dmi_system_table);
+
+ if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
+ dmi_check_system(efifb_dmi_swap_width_height)) {
+ u16 temp = screen_info.lfb_width;
+
+ screen_info.lfb_width = screen_info.lfb_height;
+ screen_info.lfb_height = temp;
+ screen_info.lfb_linelength = 4 * screen_info.lfb_width;
+ }
+}
+
+__init void sysfb_set_efifb_fwnode(struct platform_device *pd)
+{
+ if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && IS_ENABLED(CONFIG_PCI)) {
+ fwnode_init(&efifb_fwnode, &efifb_fwnode_ops);
+ pd->dev.fwnode = &efifb_fwnode;
+ }
+}
+#endif
diff --git a/drivers/firmware/efi/test/Makefile b/drivers/firmware/efi/test/Makefile
new file mode 100644
index 0000000000..4197088550
--- /dev/null
+++ b/drivers/firmware/efi/test/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_EFI_TEST) += efi_test.o
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
new file mode 100644
index 0000000000..47d67bb0a5
--- /dev/null
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -0,0 +1,782 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * EFI Test Driver for Runtime Services
+ *
+ * Copyright(C) 2012-2016 Canonical Ltd.
+ *
+ * This driver exports EFI runtime services interfaces into userspace, which
+ * allow to use and test UEFI runtime services provided by firmware.
+ *
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/efi.h>
+#include <linux/security.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "efi_test.h"
+
+MODULE_AUTHOR("Ivan Hu <ivan.hu@canonical.com>");
+MODULE_DESCRIPTION("EFI Test Driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Count the bytes in 'str', including the terminating NULL.
+ *
+ * Note this function returns the number of *bytes*, not the number of
+ * ucs2 characters.
+ */
+static inline size_t user_ucs2_strsize(efi_char16_t __user *str)
+{
+ efi_char16_t *s = str, c;
+ size_t len;
+
+ if (!str)
+ return 0;
+
+ /* Include terminating NULL */
+ len = sizeof(efi_char16_t);
+
+ if (get_user(c, s++)) {
+ /* Can't read userspace memory for size */
+ return 0;
+ }
+
+ while (c != 0) {
+ if (get_user(c, s++)) {
+ /* Can't read userspace memory for size */
+ return 0;
+ }
+ len += sizeof(efi_char16_t);
+ }
+ return len;
+}
+
+/*
+ * Allocate a buffer and copy a ucs2 string from user space into it.
+ */
+static inline int
+copy_ucs2_from_user_len(efi_char16_t **dst, efi_char16_t __user *src,
+ size_t len)
+{
+ efi_char16_t *buf;
+
+ if (!src) {
+ *dst = NULL;
+ return 0;
+ }
+
+ buf = memdup_user(src, len);
+ if (IS_ERR(buf)) {
+ *dst = NULL;
+ return PTR_ERR(buf);
+ }
+ *dst = buf;
+
+ return 0;
+}
+
+/*
+ * Count the bytes in 'str', including the terminating NULL.
+ *
+ * Just a wrap for user_ucs2_strsize
+ */
+static inline int
+get_ucs2_strsize_from_user(efi_char16_t __user *src, size_t *len)
+{
+ *len = user_ucs2_strsize(src);
+ if (*len == 0)
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Calculate the required buffer allocation size and copy a ucs2 string
+ * from user space into it.
+ *
+ * This function differs from copy_ucs2_from_user_len() because it
+ * calculates the size of the buffer to allocate by taking the length of
+ * the string 'src'.
+ *
+ * If a non-zero value is returned, the caller MUST NOT access 'dst'.
+ *
+ * It is the caller's responsibility to free 'dst'.
+ */
+static inline int
+copy_ucs2_from_user(efi_char16_t **dst, efi_char16_t __user *src)
+{
+ size_t len;
+
+ len = user_ucs2_strsize(src);
+ if (len == 0)
+ return -EFAULT;
+ return copy_ucs2_from_user_len(dst, src, len);
+}
+
+/*
+ * Copy a ucs2 string to a user buffer.
+ *
+ * This function is a simple wrapper around copy_to_user() that does
+ * nothing if 'src' is NULL, which is useful for reducing the amount of
+ * NULL checking the caller has to do.
+ *
+ * 'len' specifies the number of bytes to copy.
+ */
+static inline int
+copy_ucs2_to_user_len(efi_char16_t __user *dst, efi_char16_t *src, size_t len)
+{
+ if (!src)
+ return 0;
+
+ return copy_to_user(dst, src, len);
+}
+
+static long efi_runtime_get_variable(unsigned long arg)
+{
+ struct efi_getvariable __user *getvariable_user;
+ struct efi_getvariable getvariable;
+ unsigned long datasize = 0, prev_datasize, *dz;
+ efi_guid_t vendor_guid, *vd = NULL;
+ efi_status_t status;
+ efi_char16_t *name = NULL;
+ u32 attr, *at;
+ void *data = NULL;
+ int rv = 0;
+
+ getvariable_user = (struct efi_getvariable __user *)arg;
+
+ if (copy_from_user(&getvariable, getvariable_user,
+ sizeof(getvariable)))
+ return -EFAULT;
+ if (getvariable.data_size &&
+ get_user(datasize, getvariable.data_size))
+ return -EFAULT;
+ if (getvariable.vendor_guid) {
+ if (copy_from_user(&vendor_guid, getvariable.vendor_guid,
+ sizeof(vendor_guid)))
+ return -EFAULT;
+ vd = &vendor_guid;
+ }
+
+ if (getvariable.variable_name) {
+ rv = copy_ucs2_from_user(&name, getvariable.variable_name);
+ if (rv)
+ return rv;
+ }
+
+ at = getvariable.attributes ? &attr : NULL;
+ dz = getvariable.data_size ? &datasize : NULL;
+
+ if (getvariable.data_size && getvariable.data) {
+ data = kmalloc(datasize, GFP_KERNEL);
+ if (!data) {
+ kfree(name);
+ return -ENOMEM;
+ }
+ }
+
+ prev_datasize = datasize;
+ status = efi.get_variable(name, vd, at, dz, data);
+ kfree(name);
+
+ if (put_user(status, getvariable.status)) {
+ rv = -EFAULT;
+ goto out;
+ }
+
+ if (status != EFI_SUCCESS) {
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ if (dz && put_user(datasize, getvariable.data_size)) {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (prev_datasize < datasize) {
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (data) {
+ if (copy_to_user(getvariable.data, data, datasize)) {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+
+ if (at && put_user(attr, getvariable.attributes)) {
+ rv = -EFAULT;
+ goto out;
+ }
+
+ if (dz && put_user(datasize, getvariable.data_size))
+ rv = -EFAULT;
+
+out:
+ kfree(data);
+ return rv;
+
+}
+
+static long efi_runtime_set_variable(unsigned long arg)
+{
+ struct efi_setvariable __user *setvariable_user;
+ struct efi_setvariable setvariable;
+ efi_guid_t vendor_guid;
+ efi_status_t status;
+ efi_char16_t *name = NULL;
+ void *data;
+ int rv = 0;
+
+ setvariable_user = (struct efi_setvariable __user *)arg;
+
+ if (copy_from_user(&setvariable, setvariable_user, sizeof(setvariable)))
+ return -EFAULT;
+ if (copy_from_user(&vendor_guid, setvariable.vendor_guid,
+ sizeof(vendor_guid)))
+ return -EFAULT;
+
+ if (setvariable.variable_name) {
+ rv = copy_ucs2_from_user(&name, setvariable.variable_name);
+ if (rv)
+ return rv;
+ }
+
+ data = memdup_user(setvariable.data, setvariable.data_size);
+ if (IS_ERR(data)) {
+ kfree(name);
+ return PTR_ERR(data);
+ }
+
+ status = efi.set_variable(name, &vendor_guid,
+ setvariable.attributes,
+ setvariable.data_size, data);
+
+ if (put_user(status, setvariable.status)) {
+ rv = -EFAULT;
+ goto out;
+ }
+
+ rv = status == EFI_SUCCESS ? 0 : -EINVAL;
+
+out:
+ kfree(data);
+ kfree(name);
+
+ return rv;
+}
+
+static long efi_runtime_get_time(unsigned long arg)
+{
+ struct efi_gettime __user *gettime_user;
+ struct efi_gettime gettime;
+ efi_status_t status;
+ efi_time_cap_t cap;
+ efi_time_t efi_time;
+
+ gettime_user = (struct efi_gettime __user *)arg;
+ if (copy_from_user(&gettime, gettime_user, sizeof(gettime)))
+ return -EFAULT;
+
+ status = efi.get_time(gettime.time ? &efi_time : NULL,
+ gettime.capabilities ? &cap : NULL);
+
+ if (put_user(status, gettime.status))
+ return -EFAULT;
+
+ if (status != EFI_SUCCESS)
+ return -EINVAL;
+
+ if (gettime.capabilities) {
+ efi_time_cap_t __user *cap_local;
+
+ cap_local = (efi_time_cap_t *)gettime.capabilities;
+ if (put_user(cap.resolution, &(cap_local->resolution)) ||
+ put_user(cap.accuracy, &(cap_local->accuracy)) ||
+ put_user(cap.sets_to_zero, &(cap_local->sets_to_zero)))
+ return -EFAULT;
+ }
+ if (gettime.time) {
+ if (copy_to_user(gettime.time, &efi_time, sizeof(efi_time_t)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static long efi_runtime_set_time(unsigned long arg)
+{
+ struct efi_settime __user *settime_user;
+ struct efi_settime settime;
+ efi_status_t status;
+ efi_time_t efi_time;
+
+ settime_user = (struct efi_settime __user *)arg;
+ if (copy_from_user(&settime, settime_user, sizeof(settime)))
+ return -EFAULT;
+ if (copy_from_user(&efi_time, settime.time,
+ sizeof(efi_time_t)))
+ return -EFAULT;
+ status = efi.set_time(&efi_time);
+
+ if (put_user(status, settime.status))
+ return -EFAULT;
+
+ return status == EFI_SUCCESS ? 0 : -EINVAL;
+}
+
+static long efi_runtime_get_waketime(unsigned long arg)
+{
+ struct efi_getwakeuptime __user *getwakeuptime_user;
+ struct efi_getwakeuptime getwakeuptime;
+ efi_bool_t enabled, pending;
+ efi_status_t status;
+ efi_time_t efi_time;
+
+ getwakeuptime_user = (struct efi_getwakeuptime __user *)arg;
+ if (copy_from_user(&getwakeuptime, getwakeuptime_user,
+ sizeof(getwakeuptime)))
+ return -EFAULT;
+
+ status = efi.get_wakeup_time(
+ getwakeuptime.enabled ? (efi_bool_t *)&enabled : NULL,
+ getwakeuptime.pending ? (efi_bool_t *)&pending : NULL,
+ getwakeuptime.time ? &efi_time : NULL);
+
+ if (put_user(status, getwakeuptime.status))
+ return -EFAULT;
+
+ if (status != EFI_SUCCESS)
+ return -EINVAL;
+
+ if (getwakeuptime.enabled && put_user(enabled,
+ getwakeuptime.enabled))
+ return -EFAULT;
+
+ if (getwakeuptime.time) {
+ if (copy_to_user(getwakeuptime.time, &efi_time,
+ sizeof(efi_time_t)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static long efi_runtime_set_waketime(unsigned long arg)
+{
+ struct efi_setwakeuptime __user *setwakeuptime_user;
+ struct efi_setwakeuptime setwakeuptime;
+ efi_bool_t enabled;
+ efi_status_t status;
+ efi_time_t efi_time;
+
+ setwakeuptime_user = (struct efi_setwakeuptime __user *)arg;
+
+ if (copy_from_user(&setwakeuptime, setwakeuptime_user,
+ sizeof(setwakeuptime)))
+ return -EFAULT;
+
+ enabled = setwakeuptime.enabled;
+ if (setwakeuptime.time) {
+ if (copy_from_user(&efi_time, setwakeuptime.time,
+ sizeof(efi_time_t)))
+ return -EFAULT;
+
+ status = efi.set_wakeup_time(enabled, &efi_time);
+ } else
+ status = efi.set_wakeup_time(enabled, NULL);
+
+ if (put_user(status, setwakeuptime.status))
+ return -EFAULT;
+
+ return status == EFI_SUCCESS ? 0 : -EINVAL;
+}
+
+static long efi_runtime_get_nextvariablename(unsigned long arg)
+{
+ struct efi_getnextvariablename __user *getnextvariablename_user;
+ struct efi_getnextvariablename getnextvariablename;
+ unsigned long name_size, prev_name_size = 0, *ns = NULL;
+ efi_status_t status;
+ efi_guid_t *vd = NULL;
+ efi_guid_t vendor_guid;
+ efi_char16_t *name = NULL;
+ int rv = 0;
+
+ getnextvariablename_user = (struct efi_getnextvariablename __user *)arg;
+
+ if (copy_from_user(&getnextvariablename, getnextvariablename_user,
+ sizeof(getnextvariablename)))
+ return -EFAULT;
+
+ if (getnextvariablename.variable_name_size) {
+ if (get_user(name_size, getnextvariablename.variable_name_size))
+ return -EFAULT;
+ ns = &name_size;
+ prev_name_size = name_size;
+ }
+
+ if (getnextvariablename.vendor_guid) {
+ if (copy_from_user(&vendor_guid,
+ getnextvariablename.vendor_guid,
+ sizeof(vendor_guid)))
+ return -EFAULT;
+ vd = &vendor_guid;
+ }
+
+ if (getnextvariablename.variable_name) {
+ size_t name_string_size = 0;
+
+ rv = get_ucs2_strsize_from_user(
+ getnextvariablename.variable_name,
+ &name_string_size);
+ if (rv)
+ return rv;
+ /*
+ * The name_size may be smaller than the real buffer size where
+ * variable name located in some use cases. The most typical
+ * case is passing a 0 to get the required buffer size for the
+ * 1st time call. So we need to copy the content from user
+ * space for at least the string size of variable name, or else
+ * the name passed to UEFI may not be terminated as we expected.
+ */
+ rv = copy_ucs2_from_user_len(&name,
+ getnextvariablename.variable_name,
+ prev_name_size > name_string_size ?
+ prev_name_size : name_string_size);
+ if (rv)
+ return rv;
+ }
+
+ status = efi.get_next_variable(ns, name, vd);
+
+ if (put_user(status, getnextvariablename.status)) {
+ rv = -EFAULT;
+ goto out;
+ }
+
+ if (status != EFI_SUCCESS) {
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ if (ns && put_user(*ns,
+ getnextvariablename.variable_name_size)) {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (name) {
+ if (copy_ucs2_to_user_len(getnextvariablename.variable_name,
+ name, prev_name_size)) {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+
+ if (ns) {
+ if (put_user(*ns, getnextvariablename.variable_name_size)) {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+
+ if (vd) {
+ if (copy_to_user(getnextvariablename.vendor_guid, vd,
+ sizeof(efi_guid_t)))
+ rv = -EFAULT;
+ }
+
+out:
+ kfree(name);
+ return rv;
+}
+
+static long efi_runtime_get_nexthighmonocount(unsigned long arg)
+{
+ struct efi_getnexthighmonotoniccount __user *getnexthighmonocount_user;
+ struct efi_getnexthighmonotoniccount getnexthighmonocount;
+ efi_status_t status;
+ u32 count;
+
+ getnexthighmonocount_user = (struct
+ efi_getnexthighmonotoniccount __user *)arg;
+
+ if (copy_from_user(&getnexthighmonocount,
+ getnexthighmonocount_user,
+ sizeof(getnexthighmonocount)))
+ return -EFAULT;
+
+ status = efi.get_next_high_mono_count(
+ getnexthighmonocount.high_count ? &count : NULL);
+
+ if (put_user(status, getnexthighmonocount.status))
+ return -EFAULT;
+
+ if (status != EFI_SUCCESS)
+ return -EINVAL;
+
+ if (getnexthighmonocount.high_count &&
+ put_user(count, getnexthighmonocount.high_count))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long efi_runtime_reset_system(unsigned long arg)
+{
+ struct efi_resetsystem __user *resetsystem_user;
+ struct efi_resetsystem resetsystem;
+ void *data = NULL;
+
+ resetsystem_user = (struct efi_resetsystem __user *)arg;
+ if (copy_from_user(&resetsystem, resetsystem_user,
+ sizeof(resetsystem)))
+ return -EFAULT;
+ if (resetsystem.data_size != 0) {
+ data = memdup_user((void *)resetsystem.data,
+ resetsystem.data_size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ }
+
+ efi.reset_system(resetsystem.reset_type, resetsystem.status,
+ resetsystem.data_size, (efi_char16_t *)data);
+
+ kfree(data);
+ return 0;
+}
+
+static long efi_runtime_query_variableinfo(unsigned long arg)
+{
+ struct efi_queryvariableinfo __user *queryvariableinfo_user;
+ struct efi_queryvariableinfo queryvariableinfo;
+ efi_status_t status;
+ u64 max_storage, remaining, max_size;
+
+ queryvariableinfo_user = (struct efi_queryvariableinfo __user *)arg;
+
+ if (copy_from_user(&queryvariableinfo, queryvariableinfo_user,
+ sizeof(queryvariableinfo)))
+ return -EFAULT;
+
+ status = efi.query_variable_info(queryvariableinfo.attributes,
+ &max_storage, &remaining, &max_size);
+
+ if (put_user(status, queryvariableinfo.status))
+ return -EFAULT;
+
+ if (status != EFI_SUCCESS)
+ return -EINVAL;
+
+ if (put_user(max_storage,
+ queryvariableinfo.maximum_variable_storage_size))
+ return -EFAULT;
+
+ if (put_user(remaining,
+ queryvariableinfo.remaining_variable_storage_size))
+ return -EFAULT;
+
+ if (put_user(max_size, queryvariableinfo.maximum_variable_size))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long efi_runtime_query_capsulecaps(unsigned long arg)
+{
+ struct efi_querycapsulecapabilities __user *qcaps_user;
+ struct efi_querycapsulecapabilities qcaps;
+ efi_capsule_header_t *capsules;
+ efi_status_t status;
+ u64 max_size;
+ int i, reset_type;
+ int rv = 0;
+
+ qcaps_user = (struct efi_querycapsulecapabilities __user *)arg;
+
+ if (copy_from_user(&qcaps, qcaps_user, sizeof(qcaps)))
+ return -EFAULT;
+
+ if (qcaps.capsule_count == ULONG_MAX)
+ return -EINVAL;
+
+ capsules = kcalloc(qcaps.capsule_count + 1,
+ sizeof(efi_capsule_header_t), GFP_KERNEL);
+ if (!capsules)
+ return -ENOMEM;
+
+ for (i = 0; i < qcaps.capsule_count; i++) {
+ efi_capsule_header_t *c;
+ /*
+ * We cannot dereference qcaps.capsule_header_array directly to
+ * obtain the address of the capsule as it resides in the
+ * user space
+ */
+ if (get_user(c, qcaps.capsule_header_array + i)) {
+ rv = -EFAULT;
+ goto out;
+ }
+ if (copy_from_user(&capsules[i], c,
+ sizeof(efi_capsule_header_t))) {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+
+ qcaps.capsule_header_array = &capsules;
+
+ status = efi.query_capsule_caps((efi_capsule_header_t **)
+ qcaps.capsule_header_array,
+ qcaps.capsule_count,
+ &max_size, &reset_type);
+
+ if (put_user(status, qcaps.status)) {
+ rv = -EFAULT;
+ goto out;
+ }
+
+ if (status != EFI_SUCCESS) {
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (put_user(max_size, qcaps.maximum_capsule_size)) {
+ rv = -EFAULT;
+ goto out;
+ }
+
+ if (put_user(reset_type, qcaps.reset_type))
+ rv = -EFAULT;
+
+out:
+ kfree(capsules);
+ return rv;
+}
+
+static long efi_runtime_get_supported_mask(unsigned long arg)
+{
+ unsigned int __user *supported_mask;
+ int rv = 0;
+
+ supported_mask = (unsigned int *)arg;
+
+ if (put_user(efi.runtime_supported_mask, supported_mask))
+ rv = -EFAULT;
+
+ return rv;
+}
+
+static long efi_test_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case EFI_RUNTIME_GET_VARIABLE:
+ return efi_runtime_get_variable(arg);
+
+ case EFI_RUNTIME_SET_VARIABLE:
+ return efi_runtime_set_variable(arg);
+
+ case EFI_RUNTIME_GET_TIME:
+ return efi_runtime_get_time(arg);
+
+ case EFI_RUNTIME_SET_TIME:
+ return efi_runtime_set_time(arg);
+
+ case EFI_RUNTIME_GET_WAKETIME:
+ return efi_runtime_get_waketime(arg);
+
+ case EFI_RUNTIME_SET_WAKETIME:
+ return efi_runtime_set_waketime(arg);
+
+ case EFI_RUNTIME_GET_NEXTVARIABLENAME:
+ return efi_runtime_get_nextvariablename(arg);
+
+ case EFI_RUNTIME_GET_NEXTHIGHMONOTONICCOUNT:
+ return efi_runtime_get_nexthighmonocount(arg);
+
+ case EFI_RUNTIME_QUERY_VARIABLEINFO:
+ return efi_runtime_query_variableinfo(arg);
+
+ case EFI_RUNTIME_QUERY_CAPSULECAPABILITIES:
+ return efi_runtime_query_capsulecaps(arg);
+
+ case EFI_RUNTIME_RESET_SYSTEM:
+ return efi_runtime_reset_system(arg);
+
+ case EFI_RUNTIME_GET_SUPPORTED_MASK:
+ return efi_runtime_get_supported_mask(arg);
+ }
+
+ return -ENOTTY;
+}
+
+static int efi_test_open(struct inode *inode, struct file *file)
+{
+ int ret = security_locked_down(LOCKDOWN_EFI_TEST);
+
+ if (ret)
+ return ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ /*
+ * nothing special to do here
+ * We do accept multiple open files at the same time as we
+ * synchronize on the per call operation.
+ */
+ return 0;
+}
+
+static int efi_test_close(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+/*
+ * The various file operations we support.
+ */
+static const struct file_operations efi_test_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = efi_test_ioctl,
+ .open = efi_test_open,
+ .release = efi_test_close,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice efi_test_dev = {
+ MISC_DYNAMIC_MINOR,
+ "efi_test",
+ &efi_test_fops
+};
+
+static int __init efi_test_init(void)
+{
+ int ret;
+
+ ret = misc_register(&efi_test_dev);
+ if (ret) {
+ pr_err("efi_test: can't misc_register on minor=%d\n",
+ MISC_DYNAMIC_MINOR);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit efi_test_exit(void)
+{
+ misc_deregister(&efi_test_dev);
+}
+
+module_init(efi_test_init);
+module_exit(efi_test_exit);
diff --git a/drivers/firmware/efi/test/efi_test.h b/drivers/firmware/efi/test/efi_test.h
new file mode 100644
index 0000000000..117349e579
--- /dev/null
+++ b/drivers/firmware/efi/test/efi_test.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * EFI Test driver Header
+ *
+ * Copyright(C) 2012-2016 Canonical Ltd.
+ *
+ */
+
+#ifndef _DRIVERS_FIRMWARE_EFI_TEST_H_
+#define _DRIVERS_FIRMWARE_EFI_TEST_H_
+
+#include <linux/efi.h>
+
+struct efi_getvariable {
+ efi_char16_t *variable_name;
+ efi_guid_t *vendor_guid;
+ u32 *attributes;
+ unsigned long *data_size;
+ void *data;
+ efi_status_t *status;
+} __packed;
+
+struct efi_setvariable {
+ efi_char16_t *variable_name;
+ efi_guid_t *vendor_guid;
+ u32 attributes;
+ unsigned long data_size;
+ void *data;
+ efi_status_t *status;
+} __packed;
+
+struct efi_getnextvariablename {
+ unsigned long *variable_name_size;
+ efi_char16_t *variable_name;
+ efi_guid_t *vendor_guid;
+ efi_status_t *status;
+} __packed;
+
+struct efi_queryvariableinfo {
+ u32 attributes;
+ u64 *maximum_variable_storage_size;
+ u64 *remaining_variable_storage_size;
+ u64 *maximum_variable_size;
+ efi_status_t *status;
+} __packed;
+
+struct efi_gettime {
+ efi_time_t *time;
+ efi_time_cap_t *capabilities;
+ efi_status_t *status;
+} __packed;
+
+struct efi_settime {
+ efi_time_t *time;
+ efi_status_t *status;
+} __packed;
+
+struct efi_getwakeuptime {
+ efi_bool_t *enabled;
+ efi_bool_t *pending;
+ efi_time_t *time;
+ efi_status_t *status;
+} __packed;
+
+struct efi_setwakeuptime {
+ efi_bool_t enabled;
+ efi_time_t *time;
+ efi_status_t *status;
+} __packed;
+
+struct efi_getnexthighmonotoniccount {
+ u32 *high_count;
+ efi_status_t *status;
+} __packed;
+
+struct efi_querycapsulecapabilities {
+ efi_capsule_header_t **capsule_header_array;
+ unsigned long capsule_count;
+ u64 *maximum_capsule_size;
+ int *reset_type;
+ efi_status_t *status;
+} __packed;
+
+struct efi_resetsystem {
+ int reset_type;
+ efi_status_t status;
+ unsigned long data_size;
+ efi_char16_t *data;
+} __packed;
+
+#define EFI_RUNTIME_GET_VARIABLE \
+ _IOWR('p', 0x01, struct efi_getvariable)
+#define EFI_RUNTIME_SET_VARIABLE \
+ _IOW('p', 0x02, struct efi_setvariable)
+
+#define EFI_RUNTIME_GET_TIME \
+ _IOR('p', 0x03, struct efi_gettime)
+#define EFI_RUNTIME_SET_TIME \
+ _IOW('p', 0x04, struct efi_settime)
+
+#define EFI_RUNTIME_GET_WAKETIME \
+ _IOR('p', 0x05, struct efi_getwakeuptime)
+#define EFI_RUNTIME_SET_WAKETIME \
+ _IOW('p', 0x06, struct efi_setwakeuptime)
+
+#define EFI_RUNTIME_GET_NEXTVARIABLENAME \
+ _IOWR('p', 0x07, struct efi_getnextvariablename)
+
+#define EFI_RUNTIME_QUERY_VARIABLEINFO \
+ _IOR('p', 0x08, struct efi_queryvariableinfo)
+
+#define EFI_RUNTIME_GET_NEXTHIGHMONOTONICCOUNT \
+ _IOR('p', 0x09, struct efi_getnexthighmonotoniccount)
+
+#define EFI_RUNTIME_QUERY_CAPSULECAPABILITIES \
+ _IOR('p', 0x0A, struct efi_querycapsulecapabilities)
+
+#define EFI_RUNTIME_RESET_SYSTEM \
+ _IOW('p', 0x0B, struct efi_resetsystem)
+
+#define EFI_RUNTIME_GET_SUPPORTED_MASK \
+ _IOR('p', 0x0C, unsigned int)
+
+#endif /* _DRIVERS_FIRMWARE_EFI_TEST_H_ */
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
new file mode 100644
index 0000000000..e8d69bd548
--- /dev/null
+++ b/drivers/firmware/efi/tpm.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Google, Inc.
+ * Thiebaud Weksteen <tweek@google.com>
+ */
+
+#define TPM_MEMREMAP(start, size) early_memremap(start, size)
+#define TPM_MEMUNMAP(start, size) early_memunmap(start, size)
+
+#include <asm/early_ioremap.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/tpm_eventlog.h>
+
+int efi_tpm_final_log_size;
+EXPORT_SYMBOL(efi_tpm_final_log_size);
+
+static int __init tpm2_calc_event_log_size(void *data, int count, void *size_info)
+{
+ struct tcg_pcr_event2_head *header;
+ int event_size, size = 0;
+
+ while (count > 0) {
+ header = data + size;
+ event_size = __calc_tpm2_event_size(header, size_info, true);
+ if (event_size == 0)
+ return -1;
+ size += event_size;
+ count--;
+ }
+
+ return size;
+}
+
+/*
+ * Reserve the memory associated with the TPM Event Log configuration table.
+ */
+int __init efi_tpm_eventlog_init(void)
+{
+ struct linux_efi_tpm_eventlog *log_tbl;
+ struct efi_tcg2_final_events_table *final_tbl;
+ int tbl_size;
+ int ret = 0;
+
+ if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) {
+ /*
+ * We can't calculate the size of the final events without the
+ * first entry in the TPM log, so bail here.
+ */
+ return 0;
+ }
+
+ log_tbl = early_memremap(efi.tpm_log, sizeof(*log_tbl));
+ if (!log_tbl) {
+ pr_err("Failed to map TPM Event Log table @ 0x%lx\n",
+ efi.tpm_log);
+ efi.tpm_log = EFI_INVALID_TABLE_ADDR;
+ return -ENOMEM;
+ }
+
+ tbl_size = sizeof(*log_tbl) + log_tbl->size;
+ memblock_reserve(efi.tpm_log, tbl_size);
+
+ if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) {
+ pr_info("TPM Final Events table not present\n");
+ goto out;
+ } else if (log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
+ pr_warn(FW_BUG "TPM Final Events table invalid\n");
+ goto out;
+ }
+
+ final_tbl = early_memremap(efi.tpm_final_log, sizeof(*final_tbl));
+
+ if (!final_tbl) {
+ pr_err("Failed to map TPM Final Event Log table @ 0x%lx\n",
+ efi.tpm_final_log);
+ efi.tpm_final_log = EFI_INVALID_TABLE_ADDR;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ tbl_size = 0;
+ if (final_tbl->nr_events != 0) {
+ void *events = (void *)efi.tpm_final_log
+ + sizeof(final_tbl->version)
+ + sizeof(final_tbl->nr_events);
+
+ tbl_size = tpm2_calc_event_log_size(events,
+ final_tbl->nr_events,
+ log_tbl->log);
+ }
+
+ if (tbl_size < 0) {
+ pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
+ ret = -EINVAL;
+ goto out_calc;
+ }
+
+ memblock_reserve(efi.tpm_final_log,
+ tbl_size + sizeof(*final_tbl));
+ efi_tpm_final_log_size = tbl_size;
+
+out_calc:
+ early_memunmap(final_tbl, sizeof(*final_tbl));
+out:
+ early_memunmap(log_tbl, sizeof(*log_tbl));
+ return ret;
+}
+
diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
new file mode 100644
index 0000000000..79fb687bb9
--- /dev/null
+++ b/drivers/firmware/efi/unaccepted_memory.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/efi.h>
+#include <linux/memblock.h>
+#include <linux/spinlock.h>
+#include <asm/unaccepted_memory.h>
+
+/* Protects unaccepted memory bitmap and accepting_list */
+static DEFINE_SPINLOCK(unaccepted_memory_lock);
+
+struct accept_range {
+ struct list_head list;
+ unsigned long start;
+ unsigned long end;
+};
+
+static LIST_HEAD(accepting_list);
+
+/*
+ * accept_memory() -- Consult bitmap and accept the memory if needed.
+ *
+ * Only memory that is explicitly marked as unaccepted in the bitmap requires
+ * an action. All the remaining memory is implicitly accepted and doesn't need
+ * acceptance.
+ *
+ * No need to accept:
+ * - anything if the system has no unaccepted table;
+ * - memory that is below phys_base;
+ * - memory that is above the memory that addressable by the bitmap;
+ */
+void accept_memory(phys_addr_t start, phys_addr_t end)
+{
+ struct efi_unaccepted_memory *unaccepted;
+ unsigned long range_start, range_end;
+ struct accept_range range, *entry;
+ unsigned long flags;
+ u64 unit_size;
+
+ unaccepted = efi_get_unaccepted_table();
+ if (!unaccepted)
+ return;
+
+ unit_size = unaccepted->unit_size;
+
+ /*
+ * Only care for the part of the range that is represented
+ * in the bitmap.
+ */
+ if (start < unaccepted->phys_base)
+ start = unaccepted->phys_base;
+ if (end < unaccepted->phys_base)
+ return;
+
+ /* Translate to offsets from the beginning of the bitmap */
+ start -= unaccepted->phys_base;
+ end -= unaccepted->phys_base;
+
+ /*
+ * load_unaligned_zeropad() can lead to unwanted loads across page
+ * boundaries. The unwanted loads are typically harmless. But, they
+ * might be made to totally unrelated or even unmapped memory.
+ * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now
+ * #VE) to recover from these unwanted loads.
+ *
+ * But, this approach does not work for unaccepted memory. For TDX, a
+ * load from unaccepted memory will not lead to a recoverable exception
+ * within the guest. The guest will exit to the VMM where the only
+ * recourse is to terminate the guest.
+ *
+ * There are two parts to fix this issue and comprehensively avoid
+ * access to unaccepted memory. Together these ensure that an extra
+ * "guard" page is accepted in addition to the memory that needs to be
+ * used:
+ *
+ * 1. Implicitly extend the range_contains_unaccepted_memory(start, end)
+ * checks up to end+unit_size if 'end' is aligned on a unit_size
+ * boundary.
+ *
+ * 2. Implicitly extend accept_memory(start, end) to end+unit_size if
+ * 'end' is aligned on a unit_size boundary. (immediately following
+ * this comment)
+ */
+ if (!(end % unit_size))
+ end += unit_size;
+
+ /* Make sure not to overrun the bitmap */
+ if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
+ end = unaccepted->size * unit_size * BITS_PER_BYTE;
+
+ range.start = start / unit_size;
+ range.end = DIV_ROUND_UP(end, unit_size);
+retry:
+ spin_lock_irqsave(&unaccepted_memory_lock, flags);
+
+ /*
+ * Check if anybody works on accepting the same range of the memory.
+ *
+ * The check is done with unit_size granularity. It is crucial to catch
+ * all accept requests to the same unit_size block, even if they don't
+ * overlap on physical address level.
+ */
+ list_for_each_entry(entry, &accepting_list, list) {
+ if (entry->end <= range.start)
+ continue;
+ if (entry->start >= range.end)
+ continue;
+
+ /*
+ * Somebody else accepting the range. Or at least part of it.
+ *
+ * Drop the lock and retry until it is complete.
+ */
+ spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
+ goto retry;
+ }
+
+ /*
+ * Register that the range is about to be accepted.
+ * Make sure nobody else will accept it.
+ */
+ list_add(&range.list, &accepting_list);
+
+ range_start = range.start;
+ for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
+ range.end) {
+ unsigned long phys_start, phys_end;
+ unsigned long len = range_end - range_start;
+
+ phys_start = range_start * unit_size + unaccepted->phys_base;
+ phys_end = range_end * unit_size + unaccepted->phys_base;
+
+ /*
+ * Keep interrupts disabled until the accept operation is
+ * complete in order to prevent deadlocks.
+ *
+ * Enabling interrupts before calling arch_accept_memory()
+ * creates an opportunity for an interrupt handler to request
+ * acceptance for the same memory. The handler will continuously
+ * spin with interrupts disabled, preventing other task from
+ * making progress with the acceptance process.
+ */
+ spin_unlock(&unaccepted_memory_lock);
+
+ arch_accept_memory(phys_start, phys_end);
+
+ spin_lock(&unaccepted_memory_lock);
+ bitmap_clear(unaccepted->bitmap, range_start, len);
+ }
+
+ list_del(&range.list);
+ spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
+}
+
+bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end)
+{
+ struct efi_unaccepted_memory *unaccepted;
+ unsigned long flags;
+ bool ret = false;
+ u64 unit_size;
+
+ unaccepted = efi_get_unaccepted_table();
+ if (!unaccepted)
+ return false;
+
+ unit_size = unaccepted->unit_size;
+
+ /*
+ * Only care for the part of the range that is represented
+ * in the bitmap.
+ */
+ if (start < unaccepted->phys_base)
+ start = unaccepted->phys_base;
+ if (end < unaccepted->phys_base)
+ return false;
+
+ /* Translate to offsets from the beginning of the bitmap */
+ start -= unaccepted->phys_base;
+ end -= unaccepted->phys_base;
+
+ /*
+ * Also consider the unaccepted state of the *next* page. See fix #1 in
+ * the comment on load_unaligned_zeropad() in accept_memory().
+ */
+ if (!(end % unit_size))
+ end += unit_size;
+
+ /* Make sure not to overrun the bitmap */
+ if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
+ end = unaccepted->size * unit_size * BITS_PER_BYTE;
+
+ spin_lock_irqsave(&unaccepted_memory_lock, flags);
+ while (start < end) {
+ if (test_bit(start / unit_size, unaccepted->bitmap)) {
+ ret = true;
+ break;
+ }
+
+ start += unit_size;
+ }
+ spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
+
+ return ret;
+}
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
new file mode 100644
index 0000000000..e9dc7116da
--- /dev/null
+++ b/drivers/firmware/efi/vars.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Originally from efivars.c
+ *
+ * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
+ * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
+ */
+
+#define pr_fmt(fmt) "efivars: " fmt
+
+#include <linux/types.h>
+#include <linux/sizes.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/smp.h>
+#include <linux/efi.h>
+#include <linux/ucs2_string.h>
+
+/* Private pointer to registered efivars */
+static struct efivars *__efivars;
+
+static DEFINE_SEMAPHORE(efivars_lock, 1);
+
+static efi_status_t check_var_size(bool nonblocking, u32 attributes,
+ unsigned long size)
+{
+ const struct efivar_operations *fops;
+ efi_status_t status;
+
+ fops = __efivars->ops;
+
+ if (!fops->query_variable_store)
+ status = EFI_UNSUPPORTED;
+ else
+ status = fops->query_variable_store(attributes, size,
+ nonblocking);
+ if (status == EFI_UNSUPPORTED)
+ return (size <= SZ_64K) ? EFI_SUCCESS : EFI_OUT_OF_RESOURCES;
+ return status;
+}
+
+/**
+ * efivar_is_available - check if efivars is available
+ *
+ * @return true iff evivars is currently registered
+ */
+bool efivar_is_available(void)
+{
+ return __efivars != NULL;
+}
+EXPORT_SYMBOL_GPL(efivar_is_available);
+
+/**
+ * efivars_register - register an efivars
+ * @efivars: efivars to register
+ * @ops: efivars operations
+ *
+ * Only a single efivars can be registered at any time.
+ */
+int efivars_register(struct efivars *efivars,
+ const struct efivar_operations *ops)
+{
+ int rv;
+
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+
+ if (__efivars) {
+ pr_warn("efivars already registered\n");
+ rv = -EBUSY;
+ goto out;
+ }
+
+ efivars->ops = ops;
+
+ __efivars = efivars;
+
+ pr_info("Registered efivars operations\n");
+ rv = 0;
+out:
+ up(&efivars_lock);
+
+ return rv;
+}
+EXPORT_SYMBOL_GPL(efivars_register);
+
+/**
+ * efivars_unregister - unregister an efivars
+ * @efivars: efivars to unregister
+ *
+ * The caller must have already removed every entry from the list,
+ * failure to do so is an error.
+ */
+int efivars_unregister(struct efivars *efivars)
+{
+ int rv;
+
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+
+ if (!__efivars) {
+ pr_err("efivars not registered\n");
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (__efivars != efivars) {
+ rv = -EINVAL;
+ goto out;
+ }
+
+ pr_info("Unregistered efivars operations\n");
+ __efivars = NULL;
+
+ rv = 0;
+out:
+ up(&efivars_lock);
+ return rv;
+}
+EXPORT_SYMBOL_GPL(efivars_unregister);
+
+bool efivar_supports_writes(void)
+{
+ return __efivars && __efivars->ops->set_variable;
+}
+EXPORT_SYMBOL_GPL(efivar_supports_writes);
+
+/*
+ * efivar_lock() - obtain the efivar lock, wait for it if needed
+ * @return 0 on success, error code on failure
+ */
+int efivar_lock(void)
+{
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+ if (!__efivars->ops) {
+ up(&efivars_lock);
+ return -ENODEV;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(efivar_lock, EFIVAR);
+
+/*
+ * efivar_lock() - obtain the efivar lock if it is free
+ * @return 0 on success, error code on failure
+ */
+int efivar_trylock(void)
+{
+ if (down_trylock(&efivars_lock))
+ return -EBUSY;
+ if (!__efivars->ops) {
+ up(&efivars_lock);
+ return -ENODEV;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(efivar_trylock, EFIVAR);
+
+/*
+ * efivar_unlock() - release the efivar lock
+ */
+void efivar_unlock(void)
+{
+ up(&efivars_lock);
+}
+EXPORT_SYMBOL_NS_GPL(efivar_unlock, EFIVAR);
+
+/*
+ * efivar_get_variable() - retrieve a variable identified by name/vendor
+ *
+ * Must be called with efivars_lock held.
+ */
+efi_status_t efivar_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 *attr, unsigned long *size, void *data)
+{
+ return __efivars->ops->get_variable(name, vendor, attr, size, data);
+}
+EXPORT_SYMBOL_NS_GPL(efivar_get_variable, EFIVAR);
+
+/*
+ * efivar_get_next_variable() - enumerate the next name/vendor pair
+ *
+ * Must be called with efivars_lock held.
+ */
+efi_status_t efivar_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name, efi_guid_t *vendor)
+{
+ return __efivars->ops->get_next_variable(name_size, name, vendor);
+}
+EXPORT_SYMBOL_NS_GPL(efivar_get_next_variable, EFIVAR);
+
+/*
+ * efivar_set_variable_locked() - set a variable identified by name/vendor
+ *
+ * Must be called with efivars_lock held. If @nonblocking is set, it will use
+ * non-blocking primitives so it is guaranteed not to sleep.
+ */
+efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data, bool nonblocking)
+{
+ efi_set_variable_t *setvar;
+ efi_status_t status;
+
+ if (data_size > 0) {
+ status = check_var_size(nonblocking, attr,
+ data_size + ucs2_strsize(name, 1024));
+ if (status != EFI_SUCCESS)
+ return status;
+ }
+
+ /*
+ * If no _nonblocking variant exists, the ordinary one
+ * is assumed to be non-blocking.
+ */
+ setvar = __efivars->ops->set_variable_nonblocking;
+ if (!setvar || !nonblocking)
+ setvar = __efivars->ops->set_variable;
+
+ return setvar(name, vendor, attr, data_size, data);
+}
+EXPORT_SYMBOL_NS_GPL(efivar_set_variable_locked, EFIVAR);
+
+/*
+ * efivar_set_variable() - set a variable identified by name/vendor
+ *
+ * Can be called without holding the efivars_lock. Will sleep on obtaining the
+ * lock, or on obtaining other locks that are needed in order to complete the
+ * call.
+ */
+efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size, void *data)
+{
+ efi_status_t status;
+
+ if (efivar_lock())
+ return EFI_ABORTED;
+
+ status = efivar_set_variable_locked(name, vendor, attr, data_size,
+ data, false);
+ efivar_unlock();
+ return status;
+}
+EXPORT_SYMBOL_NS_GPL(efivar_set_variable, EFIVAR);
+
+efi_status_t efivar_query_variable_info(u32 attr,
+ u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size)
+{
+ if (!__efivars->ops->query_variable_info)
+ return EFI_UNSUPPORTED;
+ return __efivars->ops->query_variable_info(attr, storage_space,
+ remaining_space, max_variable_size);
+}
+EXPORT_SYMBOL_NS_GPL(efivar_query_variable_info, EFIVAR);
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
new file mode 100644
index 0000000000..41b78f5cb7
--- /dev/null
+++ b/drivers/firmware/google/Kconfig
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig GOOGLE_FIRMWARE
+ bool "Google Firmware Drivers"
+ default n
+ help
+ These firmware drivers are used by Google servers,
+ Chromebooks and other devices using coreboot firmware.
+ If in doubt, say "N".
+
+if GOOGLE_FIRMWARE
+
+config GOOGLE_SMI
+ tristate "SMI interface for Google platforms"
+ depends on X86 && ACPI && DMI
+ help
+ Say Y here if you want to enable SMI callbacks for Google
+ platforms. This provides an interface for writing to and
+ clearing the event log. If CONFIG_EFI is also enabled this
+ driver provides an interface for reading and writing NVRAM
+ variables.
+
+config GOOGLE_CBMEM
+ tristate "CBMEM entries in sysfs"
+ depends on GOOGLE_COREBOOT_TABLE
+ help
+ CBMEM is a downwards-growing memory region created by the
+ Coreboot BIOS containing tagged data structures from the
+ BIOS. These data structures expose things like the verified
+ boot firmware variables, flash layout, firmware event log,
+ and more.
+
+ This option enables the cbmem module, which causes the
+ kernel to search for Coreboot CBMEM entries, and expose the
+ memory for each entry in sysfs under
+ /sys/bus/coreboot/devices/cbmem-<id>.
+
+config GOOGLE_COREBOOT_TABLE
+ tristate "Coreboot Table Access"
+ depends on HAS_IOMEM && (ACPI || OF)
+ help
+ This option enables the coreboot_table module, which provides other
+ firmware modules access to the coreboot table. The coreboot table
+ pointer is accessed through the ACPI "GOOGCB00" object or the
+ device tree node /firmware/coreboot.
+ If unsure say N.
+
+config GOOGLE_MEMCONSOLE
+ tristate
+ depends on GOOGLE_MEMCONSOLE_X86_LEGACY || GOOGLE_MEMCONSOLE_COREBOOT
+
+config GOOGLE_MEMCONSOLE_X86_LEGACY
+ tristate "Firmware Memory Console - X86 Legacy support"
+ depends on X86 && ACPI && DMI
+ select GOOGLE_MEMCONSOLE
+ help
+ This option enables the kernel to search for a firmware log in
+ the EBDA on Google servers. If found, this log is exported to
+ userland in the file /sys/firmware/log.
+
+config GOOGLE_FRAMEBUFFER_COREBOOT
+ tristate "Coreboot Framebuffer"
+ depends on FB_SIMPLE || DRM_SIMPLEDRM
+ depends on GOOGLE_COREBOOT_TABLE
+ help
+ This option enables the kernel to search for a framebuffer in
+ the coreboot table. If found, it is registered with simplefb.
+
+config GOOGLE_MEMCONSOLE_COREBOOT
+ tristate "Firmware Memory Console"
+ depends on GOOGLE_COREBOOT_TABLE
+ select GOOGLE_MEMCONSOLE
+ help
+ This option enables the kernel to search for a firmware log in
+ the coreboot table. If found, this log is exported to userland
+ in the file /sys/firmware/log.
+
+config GOOGLE_VPD
+ tristate "Vital Product Data"
+ depends on GOOGLE_COREBOOT_TABLE
+ help
+ This option enables the kernel to expose the content of Google VPD
+ under /sys/firmware/vpd.
+
+endif # GOOGLE_FIRMWARE
diff --git a/drivers/firmware/google/Makefile b/drivers/firmware/google/Makefile
new file mode 100644
index 0000000000..8151e323cc
--- /dev/null
+++ b/drivers/firmware/google/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_GOOGLE_SMI) += gsmi.o
+obj-$(CONFIG_GOOGLE_COREBOOT_TABLE) += coreboot_table.o
+obj-$(CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT) += framebuffer-coreboot.o
+obj-$(CONFIG_GOOGLE_MEMCONSOLE) += memconsole.o
+obj-$(CONFIG_GOOGLE_MEMCONSOLE_COREBOOT) += memconsole-coreboot.o
+obj-$(CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY) += memconsole-x86-legacy.o
+
+# Must come after coreboot_table.o, as this driver depends on that bus type.
+obj-$(CONFIG_GOOGLE_CBMEM) += cbmem.o
+
+vpd-sysfs-y := vpd.o vpd_decode.o
+obj-$(CONFIG_GOOGLE_VPD) += vpd-sysfs.o
diff --git a/drivers/firmware/google/cbmem.c b/drivers/firmware/google/cbmem.c
new file mode 100644
index 0000000000..88e587ba1e
--- /dev/null
+++ b/drivers/firmware/google/cbmem.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * cbmem.c
+ *
+ * Driver for exporting cbmem entries in sysfs.
+ *
+ * Copyright 2022 Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "coreboot_table.h"
+
+struct cbmem_entry {
+ char *mem_file_buf;
+ u32 size;
+};
+
+static struct cbmem_entry *to_cbmem_entry(struct kobject *kobj)
+{
+ return dev_get_drvdata(kobj_to_dev(kobj));
+}
+
+static ssize_t mem_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t pos,
+ size_t count)
+{
+ struct cbmem_entry *entry = to_cbmem_entry(kobj);
+
+ return memory_read_from_buffer(buf, count, &pos, entry->mem_file_buf,
+ entry->size);
+}
+
+static ssize_t mem_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t pos,
+ size_t count)
+{
+ struct cbmem_entry *entry = to_cbmem_entry(kobj);
+
+ if (pos < 0 || pos >= entry->size)
+ return -EINVAL;
+ if (count > entry->size - pos)
+ count = entry->size - pos;
+
+ memcpy(entry->mem_file_buf + pos, buf, count);
+ return count;
+}
+static BIN_ATTR_ADMIN_RW(mem, 0);
+
+static ssize_t address_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct coreboot_device *cbdev = dev_to_coreboot_device(dev);
+
+ return sysfs_emit(buf, "0x%llx\n", cbdev->cbmem_entry.address);
+}
+static DEVICE_ATTR_RO(address);
+
+static ssize_t size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct coreboot_device *cbdev = dev_to_coreboot_device(dev);
+
+ return sysfs_emit(buf, "0x%x\n", cbdev->cbmem_entry.entry_size);
+}
+static DEVICE_ATTR_RO(size);
+
+static struct attribute *attrs[] = {
+ &dev_attr_address.attr,
+ &dev_attr_size.attr,
+ NULL,
+};
+
+static struct bin_attribute *bin_attrs[] = {
+ &bin_attr_mem,
+ NULL,
+};
+
+static const struct attribute_group cbmem_entry_group = {
+ .attrs = attrs,
+ .bin_attrs = bin_attrs,
+};
+
+static const struct attribute_group *dev_groups[] = {
+ &cbmem_entry_group,
+ NULL,
+};
+
+static int cbmem_entry_probe(struct coreboot_device *dev)
+{
+ struct cbmem_entry *entry;
+
+ entry = devm_kzalloc(&dev->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ dev_set_drvdata(&dev->dev, entry);
+ entry->mem_file_buf = devm_memremap(&dev->dev, dev->cbmem_entry.address,
+ dev->cbmem_entry.entry_size,
+ MEMREMAP_WB);
+ if (IS_ERR(entry->mem_file_buf))
+ return PTR_ERR(entry->mem_file_buf);
+
+ entry->size = dev->cbmem_entry.entry_size;
+
+ return 0;
+}
+
+static struct coreboot_driver cbmem_entry_driver = {
+ .probe = cbmem_entry_probe,
+ .drv = {
+ .name = "cbmem",
+ .owner = THIS_MODULE,
+ .dev_groups = dev_groups,
+ },
+ .tag = LB_TAG_CBMEM_ENTRY,
+};
+module_coreboot_driver(cbmem_entry_driver);
+
+MODULE_AUTHOR("Jack Rosenthal <jrosenth@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
new file mode 100644
index 0000000000..33ae94745a
--- /dev/null
+++ b/drivers/firmware/google/coreboot_table.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * coreboot_table.c
+ *
+ * Module providing coreboot table access.
+ *
+ * Copyright 2017 Google Inc.
+ * Copyright 2017 Samuel Holland <samuel@sholland.org>
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "coreboot_table.h"
+
+#define CB_DEV(d) container_of(d, struct coreboot_device, dev)
+#define CB_DRV(d) container_of(d, struct coreboot_driver, drv)
+
+static int coreboot_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct coreboot_device *device = CB_DEV(dev);
+ struct coreboot_driver *driver = CB_DRV(drv);
+
+ return device->entry.tag == driver->tag;
+}
+
+static int coreboot_bus_probe(struct device *dev)
+{
+ int ret = -ENODEV;
+ struct coreboot_device *device = CB_DEV(dev);
+ struct coreboot_driver *driver = CB_DRV(dev->driver);
+
+ if (driver->probe)
+ ret = driver->probe(device);
+
+ return ret;
+}
+
+static void coreboot_bus_remove(struct device *dev)
+{
+ struct coreboot_device *device = CB_DEV(dev);
+ struct coreboot_driver *driver = CB_DRV(dev->driver);
+
+ if (driver->remove)
+ driver->remove(device);
+}
+
+static struct bus_type coreboot_bus_type = {
+ .name = "coreboot",
+ .match = coreboot_bus_match,
+ .probe = coreboot_bus_probe,
+ .remove = coreboot_bus_remove,
+};
+
+static void coreboot_device_release(struct device *dev)
+{
+ struct coreboot_device *device = CB_DEV(dev);
+
+ kfree(device);
+}
+
+int coreboot_driver_register(struct coreboot_driver *driver)
+{
+ driver->drv.bus = &coreboot_bus_type;
+
+ return driver_register(&driver->drv);
+}
+EXPORT_SYMBOL(coreboot_driver_register);
+
+void coreboot_driver_unregister(struct coreboot_driver *driver)
+{
+ driver_unregister(&driver->drv);
+}
+EXPORT_SYMBOL(coreboot_driver_unregister);
+
+static int coreboot_table_populate(struct device *dev, void *ptr)
+{
+ int i, ret;
+ void *ptr_entry;
+ struct coreboot_device *device;
+ struct coreboot_table_entry *entry;
+ struct coreboot_table_header *header = ptr;
+
+ ptr_entry = ptr + header->header_bytes;
+ for (i = 0; i < header->table_entries; i++) {
+ entry = ptr_entry;
+
+ if (entry->size < sizeof(*entry)) {
+ dev_warn(dev, "coreboot table entry too small!\n");
+ return -EINVAL;
+ }
+
+ device = kzalloc(sizeof(device->dev) + entry->size, GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ device->dev.parent = dev;
+ device->dev.bus = &coreboot_bus_type;
+ device->dev.release = coreboot_device_release;
+ memcpy(device->raw, ptr_entry, entry->size);
+
+ switch (device->entry.tag) {
+ case LB_TAG_CBMEM_ENTRY:
+ dev_set_name(&device->dev, "cbmem-%08x",
+ device->cbmem_entry.id);
+ break;
+ default:
+ dev_set_name(&device->dev, "coreboot%d", i);
+ break;
+ }
+
+ ret = device_register(&device->dev);
+ if (ret) {
+ put_device(&device->dev);
+ return ret;
+ }
+
+ ptr_entry += entry->size;
+ }
+
+ return 0;
+}
+
+static int coreboot_table_probe(struct platform_device *pdev)
+{
+ resource_size_t len;
+ struct coreboot_table_header *header;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ void *ptr;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ len = resource_size(res);
+ if (!res->start || !len)
+ return -EINVAL;
+
+ /* Check just the header first to make sure things are sane */
+ header = memremap(res->start, sizeof(*header), MEMREMAP_WB);
+ if (!header)
+ return -ENOMEM;
+
+ len = header->header_bytes + header->table_bytes;
+ ret = strncmp(header->signature, "LBIO", sizeof(header->signature));
+ memunmap(header);
+ if (ret) {
+ dev_warn(dev, "coreboot table missing or corrupt!\n");
+ return -ENODEV;
+ }
+
+ ptr = memremap(res->start, len, MEMREMAP_WB);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = coreboot_table_populate(dev, ptr);
+
+ memunmap(ptr);
+
+ return ret;
+}
+
+static int __cb_dev_unregister(struct device *dev, void *dummy)
+{
+ device_unregister(dev);
+ return 0;
+}
+
+static int coreboot_table_remove(struct platform_device *pdev)
+{
+ bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister);
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_coreboot_acpi_match[] = {
+ { "GOOGCB00", 0 },
+ { "BOOT0000", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_coreboot_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id coreboot_of_match[] = {
+ { .compatible = "coreboot" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, coreboot_of_match);
+#endif
+
+static struct platform_driver coreboot_table_driver = {
+ .probe = coreboot_table_probe,
+ .remove = coreboot_table_remove,
+ .driver = {
+ .name = "coreboot_table",
+ .acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
+ .of_match_table = of_match_ptr(coreboot_of_match),
+ },
+};
+
+static int __init coreboot_table_driver_init(void)
+{
+ int ret;
+
+ ret = bus_register(&coreboot_bus_type);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&coreboot_table_driver);
+ if (ret) {
+ bus_unregister(&coreboot_bus_type);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit coreboot_table_driver_exit(void)
+{
+ platform_driver_unregister(&coreboot_table_driver);
+ bus_unregister(&coreboot_bus_type);
+}
+
+module_init(coreboot_table_driver_init);
+module_exit(coreboot_table_driver_exit);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
new file mode 100644
index 0000000000..d814dca33a
--- /dev/null
+++ b/drivers/firmware/google/coreboot_table.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * coreboot_table.h
+ *
+ * Internal header for coreboot table access.
+ *
+ * Copyright 2014 Gerd Hoffmann <kraxel@redhat.com>
+ * Copyright 2017 Google Inc.
+ * Copyright 2017 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef __COREBOOT_TABLE_H
+#define __COREBOOT_TABLE_H
+
+#include <linux/device.h>
+
+/* Coreboot table header structure */
+struct coreboot_table_header {
+ char signature[4];
+ u32 header_bytes;
+ u32 header_checksum;
+ u32 table_bytes;
+ u32 table_checksum;
+ u32 table_entries;
+};
+
+/* List of coreboot entry structures that is used */
+/* Generic */
+struct coreboot_table_entry {
+ u32 tag;
+ u32 size;
+};
+
+/* Points to a CBMEM entry */
+struct lb_cbmem_ref {
+ u32 tag;
+ u32 size;
+
+ u64 cbmem_addr;
+};
+
+#define LB_TAG_CBMEM_ENTRY 0x31
+
+/* Corresponds to LB_TAG_CBMEM_ENTRY */
+struct lb_cbmem_entry {
+ u32 tag;
+ u32 size;
+
+ u64 address;
+ u32 entry_size;
+ u32 id;
+};
+
+/* Describes framebuffer setup by coreboot */
+struct lb_framebuffer {
+ u32 tag;
+ u32 size;
+
+ u64 physical_address;
+ u32 x_resolution;
+ u32 y_resolution;
+ u32 bytes_per_line;
+ u8 bits_per_pixel;
+ u8 red_mask_pos;
+ u8 red_mask_size;
+ u8 green_mask_pos;
+ u8 green_mask_size;
+ u8 blue_mask_pos;
+ u8 blue_mask_size;
+ u8 reserved_mask_pos;
+ u8 reserved_mask_size;
+};
+
+/* A device, additionally with information from coreboot. */
+struct coreboot_device {
+ struct device dev;
+ union {
+ struct coreboot_table_entry entry;
+ struct lb_cbmem_ref cbmem_ref;
+ struct lb_cbmem_entry cbmem_entry;
+ struct lb_framebuffer framebuffer;
+ DECLARE_FLEX_ARRAY(u8, raw);
+ };
+};
+
+static inline struct coreboot_device *dev_to_coreboot_device(struct device *dev)
+{
+ return container_of(dev, struct coreboot_device, dev);
+}
+
+/* A driver for handling devices described in coreboot tables. */
+struct coreboot_driver {
+ int (*probe)(struct coreboot_device *);
+ void (*remove)(struct coreboot_device *);
+ struct device_driver drv;
+ u32 tag;
+};
+
+/* Register a driver that uses the data from a coreboot table. */
+int coreboot_driver_register(struct coreboot_driver *driver);
+
+/* Unregister a driver that uses the data from a coreboot table. */
+void coreboot_driver_unregister(struct coreboot_driver *driver);
+
+/* module_coreboot_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_coreboot_driver(__coreboot_driver) \
+ module_driver(__coreboot_driver, coreboot_driver_register, \
+ coreboot_driver_unregister)
+
+#endif /* __COREBOOT_TABLE_H */
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
new file mode 100644
index 0000000000..c323a81880
--- /dev/null
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * framebuffer-coreboot.c
+ *
+ * Memory based framebuffer accessed through coreboot table.
+ *
+ * Copyright 2012-2013 David Herrmann <dh.herrmann@gmail.com>
+ * Copyright 2017 Google Inc.
+ * Copyright 2017 Samuel Holland <samuel@sholland.org>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/platform_device.h>
+
+#include "coreboot_table.h"
+
+#define CB_TAG_FRAMEBUFFER 0x12
+
+static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
+
+static int framebuffer_probe(struct coreboot_device *dev)
+{
+ int i;
+ u32 length;
+ struct lb_framebuffer *fb = &dev->framebuffer;
+ struct platform_device *pdev;
+ struct resource res;
+ struct simplefb_platform_data pdata = {
+ .width = fb->x_resolution,
+ .height = fb->y_resolution,
+ .stride = fb->bytes_per_line,
+ .format = NULL,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (fb->bits_per_pixel == formats[i].bits_per_pixel &&
+ fb->red_mask_pos == formats[i].red.offset &&
+ fb->red_mask_size == formats[i].red.length &&
+ fb->green_mask_pos == formats[i].green.offset &&
+ fb->green_mask_size == formats[i].green.length &&
+ fb->blue_mask_pos == formats[i].blue.offset &&
+ fb->blue_mask_size == formats[i].blue.length)
+ pdata.format = formats[i].name;
+ }
+ if (!pdata.format)
+ return -ENODEV;
+
+ memset(&res, 0, sizeof(res));
+ res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res.name = "Coreboot Framebuffer";
+ res.start = fb->physical_address;
+ length = PAGE_ALIGN(fb->y_resolution * fb->bytes_per_line);
+ res.end = res.start + length - 1;
+ if (res.end <= res.start)
+ return -EINVAL;
+
+ pdev = platform_device_register_resndata(&dev->dev,
+ "simple-framebuffer", 0,
+ &res, 1, &pdata,
+ sizeof(pdata));
+ if (IS_ERR(pdev))
+ pr_warn("coreboot: could not register framebuffer\n");
+ else
+ dev_set_drvdata(&dev->dev, pdev);
+
+ return PTR_ERR_OR_ZERO(pdev);
+}
+
+static void framebuffer_remove(struct coreboot_device *dev)
+{
+ struct platform_device *pdev = dev_get_drvdata(&dev->dev);
+
+ platform_device_unregister(pdev);
+}
+
+static struct coreboot_driver framebuffer_driver = {
+ .probe = framebuffer_probe,
+ .remove = framebuffer_remove,
+ .drv = {
+ .name = "framebuffer",
+ },
+ .tag = CB_TAG_FRAMEBUFFER,
+};
+module_coreboot_driver(framebuffer_driver);
+
+MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
new file mode 100644
index 0000000000..96ea1fa76d
--- /dev/null
+++ b/drivers/firmware/google/gsmi.c
@@ -0,0 +1,1093 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2010 Google Inc. All Rights Reserved.
+ * Author: dlaurie@google.com (Duncan Laurie)
+ *
+ * Re-worked to expose sysfs APIs by mikew@google.com (Mike Waychison)
+ *
+ * EFI SMI interface for Google platforms
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/panic_notifier.h>
+#include <linux/ioctl.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/dmi.h>
+#include <linux/kdebug.h>
+#include <linux/reboot.h>
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/ucs2_string.h>
+#include <linux/suspend.h>
+
+#define GSMI_SHUTDOWN_CLEAN 0 /* Clean Shutdown */
+/* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */
+#define GSMI_SHUTDOWN_NMIWDT 1 /* NMI Watchdog */
+#define GSMI_SHUTDOWN_PANIC 2 /* Panic */
+#define GSMI_SHUTDOWN_OOPS 3 /* Oops */
+#define GSMI_SHUTDOWN_DIE 4 /* Die -- No longer meaningful */
+#define GSMI_SHUTDOWN_MCE 5 /* Machine Check */
+#define GSMI_SHUTDOWN_SOFTWDT 6 /* Software Watchdog */
+#define GSMI_SHUTDOWN_MBE 7 /* Uncorrected ECC */
+#define GSMI_SHUTDOWN_TRIPLE 8 /* Triple Fault */
+
+#define DRIVER_VERSION "1.0"
+#define GSMI_GUID_SIZE 16
+#define GSMI_BUF_SIZE 1024
+#define GSMI_BUF_ALIGN sizeof(u64)
+#define GSMI_CALLBACK 0xef
+
+/* SMI return codes */
+#define GSMI_SUCCESS 0x00
+#define GSMI_UNSUPPORTED2 0x03
+#define GSMI_LOG_FULL 0x0b
+#define GSMI_VAR_NOT_FOUND 0x0e
+#define GSMI_HANDSHAKE_SPIN 0x7d
+#define GSMI_HANDSHAKE_CF 0x7e
+#define GSMI_HANDSHAKE_NONE 0x7f
+#define GSMI_INVALID_PARAMETER 0x82
+#define GSMI_UNSUPPORTED 0x83
+#define GSMI_BUFFER_TOO_SMALL 0x85
+#define GSMI_NOT_READY 0x86
+#define GSMI_DEVICE_ERROR 0x87
+#define GSMI_NOT_FOUND 0x8e
+
+#define QUIRKY_BOARD_HASH 0x78a30a50
+
+/* Internally used commands passed to the firmware */
+#define GSMI_CMD_GET_NVRAM_VAR 0x01
+#define GSMI_CMD_GET_NEXT_VAR 0x02
+#define GSMI_CMD_SET_NVRAM_VAR 0x03
+#define GSMI_CMD_SET_EVENT_LOG 0x08
+#define GSMI_CMD_CLEAR_EVENT_LOG 0x09
+#define GSMI_CMD_LOG_S0IX_SUSPEND 0x0a
+#define GSMI_CMD_LOG_S0IX_RESUME 0x0b
+#define GSMI_CMD_CLEAR_CONFIG 0x20
+#define GSMI_CMD_HANDSHAKE_TYPE 0xC1
+#define GSMI_CMD_RESERVED 0xff
+
+/* Magic entry type for kernel events */
+#define GSMI_LOG_ENTRY_TYPE_KERNEL 0xDEAD
+
+/* SMI buffers must be in 32bit physical address space */
+struct gsmi_buf {
+ u8 *start; /* start of buffer */
+ size_t length; /* length of buffer */
+ u32 address; /* physical address of buffer */
+};
+
+static struct gsmi_device {
+ struct platform_device *pdev; /* platform device */
+ struct gsmi_buf *name_buf; /* variable name buffer */
+ struct gsmi_buf *data_buf; /* generic data buffer */
+ struct gsmi_buf *param_buf; /* parameter buffer */
+ spinlock_t lock; /* serialize access to SMIs */
+ u16 smi_cmd; /* SMI command port */
+ int handshake_type; /* firmware handler interlock type */
+ struct kmem_cache *mem_pool; /* kmem cache for gsmi_buf allocations */
+} gsmi_dev;
+
+/* Packed structures for communicating with the firmware */
+struct gsmi_nvram_var_param {
+ efi_guid_t guid;
+ u32 name_ptr;
+ u32 attributes;
+ u32 data_len;
+ u32 data_ptr;
+} __packed;
+
+struct gsmi_get_next_var_param {
+ u8 guid[GSMI_GUID_SIZE];
+ u32 name_ptr;
+ u32 name_len;
+} __packed;
+
+struct gsmi_set_eventlog_param {
+ u32 data_ptr;
+ u32 data_len;
+ u32 type;
+} __packed;
+
+/* Event log formats */
+struct gsmi_log_entry_type_1 {
+ u16 type;
+ u32 instance;
+} __packed;
+
+/*
+ * Some platforms don't have explicit SMI handshake
+ * and need to wait for SMI to complete.
+ */
+#define GSMI_DEFAULT_SPINCOUNT 0x10000
+static unsigned int spincount = GSMI_DEFAULT_SPINCOUNT;
+module_param(spincount, uint, 0600);
+MODULE_PARM_DESC(spincount,
+ "The number of loop iterations to use when using the spin handshake.");
+
+/*
+ * Some older platforms with Apollo Lake chipsets do not support S0ix logging
+ * in their GSMI handlers, and behaved poorly when resuming via power button
+ * press if the logging was attempted. Updated firmware with proper behavior
+ * has long since shipped, removing the need for this opt-in parameter. It
+ * now exists as an opt-out parameter for folks defiantly running old
+ * firmware, or unforeseen circumstances. After the change from opt-in to
+ * opt-out has baked sufficiently, this parameter should probably be removed
+ * entirely.
+ */
+static bool s0ix_logging_enable = true;
+module_param(s0ix_logging_enable, bool, 0600);
+
+static struct gsmi_buf *gsmi_buf_alloc(void)
+{
+ struct gsmi_buf *smibuf;
+
+ smibuf = kzalloc(sizeof(*smibuf), GFP_KERNEL);
+ if (!smibuf) {
+ printk(KERN_ERR "gsmi: out of memory\n");
+ return NULL;
+ }
+
+ /* allocate buffer in 32bit address space */
+ smibuf->start = kmem_cache_alloc(gsmi_dev.mem_pool, GFP_KERNEL);
+ if (!smibuf->start) {
+ printk(KERN_ERR "gsmi: failed to allocate name buffer\n");
+ kfree(smibuf);
+ return NULL;
+ }
+
+ /* fill in the buffer handle */
+ smibuf->length = GSMI_BUF_SIZE;
+ smibuf->address = (u32)virt_to_phys(smibuf->start);
+
+ return smibuf;
+}
+
+static void gsmi_buf_free(struct gsmi_buf *smibuf)
+{
+ if (smibuf) {
+ if (smibuf->start)
+ kmem_cache_free(gsmi_dev.mem_pool, smibuf->start);
+ kfree(smibuf);
+ }
+}
+
+/*
+ * Make a call to gsmi func(sub). GSMI error codes are translated to
+ * in-kernel errnos (0 on success, -ERRNO on error).
+ */
+static int gsmi_exec(u8 func, u8 sub)
+{
+ u16 cmd = (sub << 8) | func;
+ u16 result = 0;
+ int rc = 0;
+
+ /*
+ * AH : Subfunction number
+ * AL : Function number
+ * EBX : Parameter block address
+ * DX : SMI command port
+ *
+ * Three protocols here. See also the comment in gsmi_init().
+ */
+ if (gsmi_dev.handshake_type == GSMI_HANDSHAKE_CF) {
+ /*
+ * If handshake_type == HANDSHAKE_CF then set CF on the
+ * way in and wait for the handler to clear it; this avoids
+ * corrupting register state on those chipsets which have
+ * a delay between writing the SMI trigger register and
+ * entering SMM.
+ */
+ asm volatile (
+ "stc\n"
+ "outb %%al, %%dx\n"
+ "1: jc 1b\n"
+ : "=a" (result)
+ : "0" (cmd),
+ "d" (gsmi_dev.smi_cmd),
+ "b" (gsmi_dev.param_buf->address)
+ : "memory", "cc"
+ );
+ } else if (gsmi_dev.handshake_type == GSMI_HANDSHAKE_SPIN) {
+ /*
+ * If handshake_type == HANDSHAKE_SPIN we spin a
+ * hundred-ish usecs to ensure the SMI has triggered.
+ */
+ asm volatile (
+ "outb %%al, %%dx\n"
+ "1: loop 1b\n"
+ : "=a" (result)
+ : "0" (cmd),
+ "d" (gsmi_dev.smi_cmd),
+ "b" (gsmi_dev.param_buf->address),
+ "c" (spincount)
+ : "memory", "cc"
+ );
+ } else {
+ /*
+ * If handshake_type == HANDSHAKE_NONE we do nothing;
+ * either we don't need to or it's legacy firmware that
+ * doesn't understand the CF protocol.
+ */
+ asm volatile (
+ "outb %%al, %%dx\n\t"
+ : "=a" (result)
+ : "0" (cmd),
+ "d" (gsmi_dev.smi_cmd),
+ "b" (gsmi_dev.param_buf->address)
+ : "memory", "cc"
+ );
+ }
+
+ /* check return code from SMI handler */
+ switch (result) {
+ case GSMI_SUCCESS:
+ break;
+ case GSMI_VAR_NOT_FOUND:
+ /* not really an error, but let the caller know */
+ rc = 1;
+ break;
+ case GSMI_INVALID_PARAMETER:
+ printk(KERN_ERR "gsmi: exec 0x%04x: Invalid parameter\n", cmd);
+ rc = -EINVAL;
+ break;
+ case GSMI_BUFFER_TOO_SMALL:
+ printk(KERN_ERR "gsmi: exec 0x%04x: Buffer too small\n", cmd);
+ rc = -ENOMEM;
+ break;
+ case GSMI_UNSUPPORTED:
+ case GSMI_UNSUPPORTED2:
+ if (sub != GSMI_CMD_HANDSHAKE_TYPE)
+ printk(KERN_ERR "gsmi: exec 0x%04x: Not supported\n",
+ cmd);
+ rc = -ENOSYS;
+ break;
+ case GSMI_NOT_READY:
+ printk(KERN_ERR "gsmi: exec 0x%04x: Not ready\n", cmd);
+ rc = -EBUSY;
+ break;
+ case GSMI_DEVICE_ERROR:
+ printk(KERN_ERR "gsmi: exec 0x%04x: Device error\n", cmd);
+ rc = -EFAULT;
+ break;
+ case GSMI_NOT_FOUND:
+ printk(KERN_ERR "gsmi: exec 0x%04x: Data not found\n", cmd);
+ rc = -ENOENT;
+ break;
+ case GSMI_LOG_FULL:
+ printk(KERN_ERR "gsmi: exec 0x%04x: Log full\n", cmd);
+ rc = -ENOSPC;
+ break;
+ case GSMI_HANDSHAKE_CF:
+ case GSMI_HANDSHAKE_SPIN:
+ case GSMI_HANDSHAKE_NONE:
+ rc = result;
+ break;
+ default:
+ printk(KERN_ERR "gsmi: exec 0x%04x: Unknown error 0x%04x\n",
+ cmd, result);
+ rc = -ENXIO;
+ }
+
+ return rc;
+}
+
+#ifdef CONFIG_EFI
+
+static struct efivars efivars;
+
+static efi_status_t gsmi_get_variable(efi_char16_t *name,
+ efi_guid_t *vendor, u32 *attr,
+ unsigned long *data_size,
+ void *data)
+{
+ struct gsmi_nvram_var_param param = {
+ .name_ptr = gsmi_dev.name_buf->address,
+ .data_ptr = gsmi_dev.data_buf->address,
+ .data_len = (u32)*data_size,
+ };
+ efi_status_t ret = EFI_SUCCESS;
+ unsigned long flags;
+ size_t name_len = ucs2_strnlen(name, GSMI_BUF_SIZE / 2);
+ int rc;
+
+ if (name_len >= GSMI_BUF_SIZE / 2)
+ return EFI_BAD_BUFFER_SIZE;
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ /* Vendor guid */
+ memcpy(&param.guid, vendor, sizeof(param.guid));
+
+ /* variable name, already in UTF-16 */
+ memset(gsmi_dev.name_buf->start, 0, gsmi_dev.name_buf->length);
+ memcpy(gsmi_dev.name_buf->start, name, name_len * 2);
+
+ /* data pointer */
+ memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
+
+ /* parameter buffer */
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+ memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+ rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_GET_NVRAM_VAR);
+ if (rc < 0) {
+ printk(KERN_ERR "gsmi: Get Variable failed\n");
+ ret = EFI_LOAD_ERROR;
+ } else if (rc == 1) {
+ /* variable was not found */
+ ret = EFI_NOT_FOUND;
+ } else {
+ /* Get the arguments back */
+ memcpy(&param, gsmi_dev.param_buf->start, sizeof(param));
+
+ /* The size reported is the min of all of our buffers */
+ *data_size = min_t(unsigned long, *data_size,
+ gsmi_dev.data_buf->length);
+ *data_size = min_t(unsigned long, *data_size, param.data_len);
+
+ /* Copy data back to return buffer. */
+ memcpy(data, gsmi_dev.data_buf->start, *data_size);
+
+ /* All variables are have the following attributes */
+ if (attr)
+ *attr = EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS;
+ }
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+ return ret;
+}
+
+static efi_status_t gsmi_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name,
+ efi_guid_t *vendor)
+{
+ struct gsmi_get_next_var_param param = {
+ .name_ptr = gsmi_dev.name_buf->address,
+ .name_len = gsmi_dev.name_buf->length,
+ };
+ efi_status_t ret = EFI_SUCCESS;
+ int rc;
+ unsigned long flags;
+
+ /* For the moment, only support buffers that exactly match in size */
+ if (*name_size != GSMI_BUF_SIZE)
+ return EFI_BAD_BUFFER_SIZE;
+
+ /* Let's make sure the thing is at least null-terminated */
+ if (ucs2_strnlen(name, GSMI_BUF_SIZE / 2) == GSMI_BUF_SIZE / 2)
+ return EFI_INVALID_PARAMETER;
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ /* guid */
+ memcpy(&param.guid, vendor, sizeof(param.guid));
+
+ /* variable name, already in UTF-16 */
+ memcpy(gsmi_dev.name_buf->start, name, *name_size);
+
+ /* parameter buffer */
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+ memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+ rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_GET_NEXT_VAR);
+ if (rc < 0) {
+ printk(KERN_ERR "gsmi: Get Next Variable Name failed\n");
+ ret = EFI_LOAD_ERROR;
+ } else if (rc == 1) {
+ /* variable not found -- end of list */
+ ret = EFI_NOT_FOUND;
+ } else {
+ /* copy variable data back to return buffer */
+ memcpy(&param, gsmi_dev.param_buf->start, sizeof(param));
+
+ /* Copy the name back */
+ memcpy(name, gsmi_dev.name_buf->start, GSMI_BUF_SIZE);
+ *name_size = ucs2_strnlen(name, GSMI_BUF_SIZE / 2) * 2;
+
+ /* copy guid to return buffer */
+ memcpy(vendor, &param.guid, sizeof(param.guid));
+ ret = EFI_SUCCESS;
+ }
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+ return ret;
+}
+
+static efi_status_t gsmi_set_variable(efi_char16_t *name,
+ efi_guid_t *vendor,
+ u32 attr,
+ unsigned long data_size,
+ void *data)
+{
+ struct gsmi_nvram_var_param param = {
+ .name_ptr = gsmi_dev.name_buf->address,
+ .data_ptr = gsmi_dev.data_buf->address,
+ .data_len = (u32)data_size,
+ .attributes = EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS,
+ };
+ size_t name_len = ucs2_strnlen(name, GSMI_BUF_SIZE / 2);
+ efi_status_t ret = EFI_SUCCESS;
+ int rc;
+ unsigned long flags;
+
+ if (name_len >= GSMI_BUF_SIZE / 2)
+ return EFI_BAD_BUFFER_SIZE;
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ /* guid */
+ memcpy(&param.guid, vendor, sizeof(param.guid));
+
+ /* variable name, already in UTF-16 */
+ memset(gsmi_dev.name_buf->start, 0, gsmi_dev.name_buf->length);
+ memcpy(gsmi_dev.name_buf->start, name, name_len * 2);
+
+ /* data pointer */
+ memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
+ memcpy(gsmi_dev.data_buf->start, data, data_size);
+
+ /* parameter buffer */
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+ memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+ rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_NVRAM_VAR);
+ if (rc < 0) {
+ printk(KERN_ERR "gsmi: Set Variable failed\n");
+ ret = EFI_INVALID_PARAMETER;
+ }
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+ return ret;
+}
+
+static const struct efivar_operations efivar_ops = {
+ .get_variable = gsmi_get_variable,
+ .set_variable = gsmi_set_variable,
+ .get_next_variable = gsmi_get_next_variable,
+};
+
+#endif /* CONFIG_EFI */
+
+static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct gsmi_set_eventlog_param param = {
+ .data_ptr = gsmi_dev.data_buf->address,
+ };
+ int rc = 0;
+ unsigned long flags;
+
+ /* Pull the type out */
+ if (count < sizeof(u32))
+ return -EINVAL;
+ param.type = *(u32 *)buf;
+ buf += sizeof(u32);
+
+ /* The remaining buffer is the data payload */
+ if ((count - sizeof(u32)) > gsmi_dev.data_buf->length)
+ return -EINVAL;
+ param.data_len = count - sizeof(u32);
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ /* data pointer */
+ memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
+ memcpy(gsmi_dev.data_buf->start, buf, param.data_len);
+
+ /* parameter buffer */
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+ memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+ rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_EVENT_LOG);
+ if (rc < 0)
+ printk(KERN_ERR "gsmi: Set Event Log failed\n");
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+ return (rc == 0) ? count : rc;
+
+}
+
+static struct bin_attribute eventlog_bin_attr = {
+ .attr = {.name = "append_to_eventlog", .mode = 0200},
+ .write = eventlog_write,
+};
+
+static ssize_t gsmi_clear_eventlog_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ unsigned long flags;
+ unsigned long val;
+ struct {
+ u32 percentage;
+ u32 data_type;
+ } param;
+
+ rc = kstrtoul(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ /*
+ * Value entered is a percentage, 0 through 100, anything else
+ * is invalid.
+ */
+ if (val > 100)
+ return -EINVAL;
+
+ /* data_type here selects the smbios event log. */
+ param.percentage = val;
+ param.data_type = 0;
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ /* parameter buffer */
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+ memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+ rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_CLEAR_EVENT_LOG);
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+ if (rc)
+ return rc;
+ return count;
+}
+
+static struct kobj_attribute gsmi_clear_eventlog_attr = {
+ .attr = {.name = "clear_eventlog", .mode = 0200},
+ .store = gsmi_clear_eventlog_store,
+};
+
+static ssize_t gsmi_clear_config_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ /* clear parameter buffer */
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+
+ rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_CLEAR_CONFIG);
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+ if (rc)
+ return rc;
+ return count;
+}
+
+static struct kobj_attribute gsmi_clear_config_attr = {
+ .attr = {.name = "clear_config", .mode = 0200},
+ .store = gsmi_clear_config_store,
+};
+
+static const struct attribute *gsmi_attrs[] = {
+ &gsmi_clear_config_attr.attr,
+ &gsmi_clear_eventlog_attr.attr,
+ NULL,
+};
+
+static int gsmi_shutdown_reason(int reason)
+{
+ struct gsmi_log_entry_type_1 entry = {
+ .type = GSMI_LOG_ENTRY_TYPE_KERNEL,
+ .instance = reason,
+ };
+ struct gsmi_set_eventlog_param param = {
+ .data_len = sizeof(entry),
+ .type = 1,
+ };
+ static int saved_reason;
+ int rc = 0;
+ unsigned long flags;
+
+ /* avoid duplicate entries in the log */
+ if (saved_reason & (1 << reason))
+ return 0;
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ saved_reason |= (1 << reason);
+
+ /* data pointer */
+ memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
+ memcpy(gsmi_dev.data_buf->start, &entry, sizeof(entry));
+
+ /* parameter buffer */
+ param.data_ptr = gsmi_dev.data_buf->address;
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+ memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+ rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_EVENT_LOG);
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+ if (rc < 0)
+ printk(KERN_ERR "gsmi: Log Shutdown Reason failed\n");
+ else
+ printk(KERN_EMERG "gsmi: Log Shutdown Reason 0x%02x\n",
+ reason);
+
+ return rc;
+}
+
+static int gsmi_reboot_callback(struct notifier_block *nb,
+ unsigned long reason, void *arg)
+{
+ gsmi_shutdown_reason(GSMI_SHUTDOWN_CLEAN);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block gsmi_reboot_notifier = {
+ .notifier_call = gsmi_reboot_callback
+};
+
+static int gsmi_die_callback(struct notifier_block *nb,
+ unsigned long reason, void *arg)
+{
+ if (reason == DIE_OOPS)
+ gsmi_shutdown_reason(GSMI_SHUTDOWN_OOPS);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block gsmi_die_notifier = {
+ .notifier_call = gsmi_die_callback
+};
+
+static int gsmi_panic_callback(struct notifier_block *nb,
+ unsigned long reason, void *arg)
+{
+
+ /*
+ * Panic callbacks are executed with all other CPUs stopped,
+ * so we must not attempt to spin waiting for gsmi_dev.lock
+ * to be released.
+ */
+ if (spin_is_locked(&gsmi_dev.lock))
+ return NOTIFY_DONE;
+
+ gsmi_shutdown_reason(GSMI_SHUTDOWN_PANIC);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block gsmi_panic_notifier = {
+ .notifier_call = gsmi_panic_callback,
+};
+
+/*
+ * This hash function was blatantly copied from include/linux/hash.h.
+ * It is used by this driver to obfuscate a board name that requires a
+ * quirk within this driver.
+ *
+ * Please do not remove this copy of the function as any changes to the
+ * global utility hash_64() function would break this driver's ability
+ * to identify a board and provide the appropriate quirk -- mikew@google.com
+ */
+static u64 __init local_hash_64(u64 val, unsigned bits)
+{
+ u64 hash = val;
+
+ /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
+ u64 n = hash;
+ n <<= 18;
+ hash -= n;
+ n <<= 33;
+ hash -= n;
+ n <<= 3;
+ hash += n;
+ n <<= 3;
+ hash -= n;
+ n <<= 4;
+ hash += n;
+ n <<= 2;
+ hash += n;
+
+ /* High bits are more random, so use them. */
+ return hash >> (64 - bits);
+}
+
+static u32 __init hash_oem_table_id(char s[8])
+{
+ u64 input;
+ memcpy(&input, s, 8);
+ return local_hash_64(input, 32);
+}
+
+static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
+ {
+ .ident = "Google Board",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
+ },
+ },
+ {
+ .ident = "Coreboot Firmware",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ },
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, gsmi_dmi_table);
+
+static __init int gsmi_system_valid(void)
+{
+ u32 hash;
+ u16 cmd, result;
+
+ if (!dmi_check_system(gsmi_dmi_table))
+ return -ENODEV;
+
+ /*
+ * Only newer firmware supports the gsmi interface. All older
+ * firmware that didn't support this interface used to plug the
+ * table name in the first four bytes of the oem_table_id field.
+ * Newer firmware doesn't do that though, so use that as the
+ * discriminant factor. We have to do this in order to
+ * whitewash our board names out of the public driver.
+ */
+ if (!strncmp(acpi_gbl_FADT.header.oem_table_id, "FACP", 4)) {
+ printk(KERN_INFO "gsmi: Board is too old\n");
+ return -ENODEV;
+ }
+
+ /* Disable on board with 1.0 BIOS due to Google bug 2602657 */
+ hash = hash_oem_table_id(acpi_gbl_FADT.header.oem_table_id);
+ if (hash == QUIRKY_BOARD_HASH) {
+ const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
+ if (strncmp(bios_ver, "1.0", 3) == 0) {
+ pr_info("gsmi: disabled on this board's BIOS %s\n",
+ bios_ver);
+ return -ENODEV;
+ }
+ }
+
+ /* check for valid SMI command port in ACPI FADT */
+ if (acpi_gbl_FADT.smi_command == 0) {
+ pr_info("gsmi: missing smi_command\n");
+ return -ENODEV;
+ }
+
+ /* Test the smihandler with a bogus command. If it leaves the
+ * calling argument in %ax untouched, there is no handler for
+ * GSMI commands.
+ */
+ cmd = GSMI_CALLBACK | GSMI_CMD_RESERVED << 8;
+ asm volatile (
+ "outb %%al, %%dx\n\t"
+ : "=a" (result)
+ : "0" (cmd),
+ "d" (acpi_gbl_FADT.smi_command)
+ : "memory", "cc"
+ );
+ if (cmd == result) {
+ pr_info("gsmi: no gsmi handler in firmware\n");
+ return -ENODEV;
+ }
+
+ /* Found */
+ return 0;
+}
+
+static struct kobject *gsmi_kobj;
+
+static const struct platform_device_info gsmi_dev_info = {
+ .name = "gsmi",
+ .id = -1,
+ /* SMI callbacks require 32bit addresses */
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+#ifdef CONFIG_PM
+static void gsmi_log_s0ix_info(u8 cmd)
+{
+ unsigned long flags;
+
+ /*
+ * If platform has not enabled S0ix logging, then no action is
+ * necessary.
+ */
+ if (!s0ix_logging_enable)
+ return;
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+
+ gsmi_exec(GSMI_CALLBACK, cmd);
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+}
+
+static int gsmi_log_s0ix_suspend(struct device *dev)
+{
+ /*
+ * If system is not suspending via firmware using the standard ACPI Sx
+ * types, then make a GSMI call to log the suspend info.
+ */
+ if (!pm_suspend_via_firmware())
+ gsmi_log_s0ix_info(GSMI_CMD_LOG_S0IX_SUSPEND);
+
+ /*
+ * Always return success, since we do not want suspend
+ * to fail just because of logging failure.
+ */
+ return 0;
+}
+
+static int gsmi_log_s0ix_resume(struct device *dev)
+{
+ /*
+ * If system did not resume via firmware, then make a GSMI call to log
+ * the resume info and wake source.
+ */
+ if (!pm_resume_via_firmware())
+ gsmi_log_s0ix_info(GSMI_CMD_LOG_S0IX_RESUME);
+
+ /*
+ * Always return success, since we do not want resume
+ * to fail just because of logging failure.
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops gsmi_pm_ops = {
+ .suspend_noirq = gsmi_log_s0ix_suspend,
+ .resume_noirq = gsmi_log_s0ix_resume,
+};
+
+static int gsmi_platform_driver_probe(struct platform_device *dev)
+{
+ return 0;
+}
+
+static struct platform_driver gsmi_driver_info = {
+ .driver = {
+ .name = "gsmi",
+ .pm = &gsmi_pm_ops,
+ },
+ .probe = gsmi_platform_driver_probe,
+};
+#endif
+
+static __init int gsmi_init(void)
+{
+ unsigned long flags;
+ int ret;
+
+ ret = gsmi_system_valid();
+ if (ret)
+ return ret;
+
+ gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command;
+
+#ifdef CONFIG_PM
+ ret = platform_driver_register(&gsmi_driver_info);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "gsmi: unable to register platform driver\n");
+ return ret;
+ }
+#endif
+
+ /* register device */
+ gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
+ if (IS_ERR(gsmi_dev.pdev)) {
+ printk(KERN_ERR "gsmi: unable to register platform device\n");
+ return PTR_ERR(gsmi_dev.pdev);
+ }
+
+ /* SMI access needs to be serialized */
+ spin_lock_init(&gsmi_dev.lock);
+
+ ret = -ENOMEM;
+
+ /*
+ * SLAB cache is created using SLAB_CACHE_DMA32 to ensure that the
+ * allocations for gsmi_buf come from the DMA32 memory zone. These
+ * buffers have nothing to do with DMA. They are required for
+ * communication with firmware executing in SMI mode which can only
+ * access the bottom 4GiB of physical memory. Since DMA32 memory zone
+ * guarantees allocation under the 4GiB boundary, this driver creates
+ * a SLAB cache with SLAB_CACHE_DMA32 flag.
+ */
+ gsmi_dev.mem_pool = kmem_cache_create("gsmi", GSMI_BUF_SIZE,
+ GSMI_BUF_ALIGN,
+ SLAB_CACHE_DMA32, NULL);
+ if (!gsmi_dev.mem_pool)
+ goto out_err;
+
+ /*
+ * pre-allocate buffers because sometimes we are called when
+ * this is not feasible: oops, panic, die, mce, etc
+ */
+ gsmi_dev.name_buf = gsmi_buf_alloc();
+ if (!gsmi_dev.name_buf) {
+ printk(KERN_ERR "gsmi: failed to allocate name buffer\n");
+ goto out_err;
+ }
+
+ gsmi_dev.data_buf = gsmi_buf_alloc();
+ if (!gsmi_dev.data_buf) {
+ printk(KERN_ERR "gsmi: failed to allocate data buffer\n");
+ goto out_err;
+ }
+
+ gsmi_dev.param_buf = gsmi_buf_alloc();
+ if (!gsmi_dev.param_buf) {
+ printk(KERN_ERR "gsmi: failed to allocate param buffer\n");
+ goto out_err;
+ }
+
+ /*
+ * Determine type of handshake used to serialize the SMI
+ * entry. See also gsmi_exec().
+ *
+ * There's a "behavior" present on some chipsets where writing the
+ * SMI trigger register in the southbridge doesn't result in an
+ * immediate SMI. Rather, the processor can execute "a few" more
+ * instructions before the SMI takes effect. To ensure synchronous
+ * behavior, implement a handshake between the kernel driver and the
+ * firmware handler to spin until released. This ioctl determines
+ * the type of handshake.
+ *
+ * NONE: The firmware handler does not implement any
+ * handshake. Either it doesn't need to, or it's legacy firmware
+ * that doesn't know it needs to and never will.
+ *
+ * CF: The firmware handler will clear the CF in the saved
+ * state before returning. The driver may set the CF and test for
+ * it to clear before proceeding.
+ *
+ * SPIN: The firmware handler does not implement any handshake
+ * but the driver should spin for a hundred or so microseconds
+ * to ensure the SMI has triggered.
+ *
+ * Finally, the handler will return -ENOSYS if
+ * GSMI_CMD_HANDSHAKE_TYPE is unimplemented, which implies
+ * HANDSHAKE_NONE.
+ */
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+ gsmi_dev.handshake_type = GSMI_HANDSHAKE_SPIN;
+ gsmi_dev.handshake_type =
+ gsmi_exec(GSMI_CALLBACK, GSMI_CMD_HANDSHAKE_TYPE);
+ if (gsmi_dev.handshake_type == -ENOSYS)
+ gsmi_dev.handshake_type = GSMI_HANDSHAKE_NONE;
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+ /* Remove and clean up gsmi if the handshake could not complete. */
+ if (gsmi_dev.handshake_type == -ENXIO) {
+ printk(KERN_INFO "gsmi version " DRIVER_VERSION
+ " failed to load\n");
+ ret = -ENODEV;
+ goto out_err;
+ }
+
+ /* Register in the firmware directory */
+ ret = -ENOMEM;
+ gsmi_kobj = kobject_create_and_add("gsmi", firmware_kobj);
+ if (!gsmi_kobj) {
+ printk(KERN_INFO "gsmi: Failed to create firmware kobj\n");
+ goto out_err;
+ }
+
+ /* Setup eventlog access */
+ ret = sysfs_create_bin_file(gsmi_kobj, &eventlog_bin_attr);
+ if (ret) {
+ printk(KERN_INFO "gsmi: Failed to setup eventlog");
+ goto out_err;
+ }
+
+ /* Other attributes */
+ ret = sysfs_create_files(gsmi_kobj, gsmi_attrs);
+ if (ret) {
+ printk(KERN_INFO "gsmi: Failed to add attrs");
+ goto out_remove_bin_file;
+ }
+
+#ifdef CONFIG_EFI
+ ret = efivars_register(&efivars, &efivar_ops);
+ if (ret) {
+ printk(KERN_INFO "gsmi: Failed to register efivars\n");
+ sysfs_remove_files(gsmi_kobj, gsmi_attrs);
+ goto out_remove_bin_file;
+ }
+#endif
+
+ register_reboot_notifier(&gsmi_reboot_notifier);
+ register_die_notifier(&gsmi_die_notifier);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &gsmi_panic_notifier);
+
+ printk(KERN_INFO "gsmi version " DRIVER_VERSION " loaded\n");
+
+ return 0;
+
+out_remove_bin_file:
+ sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
+out_err:
+ kobject_put(gsmi_kobj);
+ gsmi_buf_free(gsmi_dev.param_buf);
+ gsmi_buf_free(gsmi_dev.data_buf);
+ gsmi_buf_free(gsmi_dev.name_buf);
+ kmem_cache_destroy(gsmi_dev.mem_pool);
+ platform_device_unregister(gsmi_dev.pdev);
+ pr_info("gsmi: failed to load: %d\n", ret);
+#ifdef CONFIG_PM
+ platform_driver_unregister(&gsmi_driver_info);
+#endif
+ return ret;
+}
+
+static void __exit gsmi_exit(void)
+{
+ unregister_reboot_notifier(&gsmi_reboot_notifier);
+ unregister_die_notifier(&gsmi_die_notifier);
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &gsmi_panic_notifier);
+#ifdef CONFIG_EFI
+ efivars_unregister(&efivars);
+#endif
+
+ sysfs_remove_files(gsmi_kobj, gsmi_attrs);
+ sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
+ kobject_put(gsmi_kobj);
+ gsmi_buf_free(gsmi_dev.param_buf);
+ gsmi_buf_free(gsmi_dev.data_buf);
+ gsmi_buf_free(gsmi_dev.name_buf);
+ kmem_cache_destroy(gsmi_dev.mem_pool);
+ platform_device_unregister(gsmi_dev.pdev);
+#ifdef CONFIG_PM
+ platform_driver_unregister(&gsmi_driver_info);
+#endif
+}
+
+module_init(gsmi_init);
+module_exit(gsmi_exit);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c
new file mode 100644
index 0000000000..74b5286518
--- /dev/null
+++ b/drivers/firmware/google/memconsole-coreboot.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * memconsole-coreboot.c
+ *
+ * Memory based BIOS console accessed through coreboot table.
+ *
+ * Copyright 2017 Google Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "memconsole.h"
+#include "coreboot_table.h"
+
+#define CB_TAG_CBMEM_CONSOLE 0x17
+
+/* CBMEM firmware console log descriptor. */
+struct cbmem_cons {
+ u32 size_dont_access_after_boot;
+ u32 cursor;
+ u8 body[];
+} __packed;
+
+#define CURSOR_MASK ((1 << 28) - 1)
+#define OVERFLOW (1 << 31)
+
+static struct cbmem_cons *cbmem_console;
+static u32 cbmem_console_size;
+
+/*
+ * The cbmem_console structure is read again on every access because it may
+ * change at any time if runtime firmware logs new messages. This may rarely
+ * lead to race conditions where the firmware overwrites the beginning of the
+ * ring buffer with more lines after we have already read |cursor|. It should be
+ * rare and harmless enough that we don't spend extra effort working around it.
+ */
+static ssize_t memconsole_coreboot_read(char *buf, loff_t pos, size_t count)
+{
+ u32 cursor = cbmem_console->cursor & CURSOR_MASK;
+ u32 flags = cbmem_console->cursor & ~CURSOR_MASK;
+ u32 size = cbmem_console_size;
+ struct seg { /* describes ring buffer segments in logical order */
+ u32 phys; /* physical offset from start of mem buffer */
+ u32 len; /* length of segment */
+ } seg[2] = { {0}, {0} };
+ size_t done = 0;
+ int i;
+
+ if (flags & OVERFLOW) {
+ if (cursor > size) /* Shouldn't really happen, but... */
+ cursor = 0;
+ seg[0] = (struct seg){.phys = cursor, .len = size - cursor};
+ seg[1] = (struct seg){.phys = 0, .len = cursor};
+ } else {
+ seg[0] = (struct seg){.phys = 0, .len = min(cursor, size)};
+ }
+
+ for (i = 0; i < ARRAY_SIZE(seg) && count > done; i++) {
+ done += memory_read_from_buffer(buf + done, count - done, &pos,
+ cbmem_console->body + seg[i].phys, seg[i].len);
+ pos -= seg[i].len;
+ }
+ return done;
+}
+
+static int memconsole_probe(struct coreboot_device *dev)
+{
+ struct cbmem_cons *tmp_cbmc;
+
+ tmp_cbmc = memremap(dev->cbmem_ref.cbmem_addr,
+ sizeof(*tmp_cbmc), MEMREMAP_WB);
+
+ if (!tmp_cbmc)
+ return -ENOMEM;
+
+ /* Read size only once to prevent overrun attack through /dev/mem. */
+ cbmem_console_size = tmp_cbmc->size_dont_access_after_boot;
+ cbmem_console = devm_memremap(&dev->dev, dev->cbmem_ref.cbmem_addr,
+ cbmem_console_size + sizeof(*cbmem_console),
+ MEMREMAP_WB);
+ memunmap(tmp_cbmc);
+
+ if (IS_ERR(cbmem_console))
+ return PTR_ERR(cbmem_console);
+
+ memconsole_setup(memconsole_coreboot_read);
+
+ return memconsole_sysfs_init();
+}
+
+static void memconsole_remove(struct coreboot_device *dev)
+{
+ memconsole_exit();
+}
+
+static struct coreboot_driver memconsole_driver = {
+ .probe = memconsole_probe,
+ .remove = memconsole_remove,
+ .drv = {
+ .name = "memconsole",
+ },
+ .tag = CB_TAG_CBMEM_CONSOLE,
+};
+module_coreboot_driver(memconsole_driver);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole-x86-legacy.c b/drivers/firmware/google/memconsole-x86-legacy.c
new file mode 100644
index 0000000000..3d3c4f6b81
--- /dev/null
+++ b/drivers/firmware/google/memconsole-x86-legacy.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * memconsole-x86-legacy.c
+ *
+ * EBDA specific parts of the memory based BIOS console.
+ *
+ * Copyright 2017 Google Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <linux/mm.h>
+#include <asm/bios_ebda.h>
+#include <linux/acpi.h>
+
+#include "memconsole.h"
+
+#define BIOS_MEMCONSOLE_V1_MAGIC 0xDEADBABE
+#define BIOS_MEMCONSOLE_V2_MAGIC (('M')|('C'<<8)|('O'<<16)|('N'<<24))
+
+struct biosmemcon_ebda {
+ u32 signature;
+ union {
+ struct {
+ u8 enabled;
+ u32 buffer_addr;
+ u16 start;
+ u16 end;
+ u16 num_chars;
+ u8 wrapped;
+ } __packed v1;
+ struct {
+ u32 buffer_addr;
+ /* Misdocumented as number of pages! */
+ u16 num_bytes;
+ u16 start;
+ u16 end;
+ } __packed v2;
+ };
+} __packed;
+
+static char *memconsole_baseaddr;
+static size_t memconsole_length;
+
+static ssize_t memconsole_read(char *buf, loff_t pos, size_t count)
+{
+ return memory_read_from_buffer(buf, count, &pos, memconsole_baseaddr,
+ memconsole_length);
+}
+
+static void found_v1_header(struct biosmemcon_ebda *hdr)
+{
+ pr_info("memconsole: BIOS console v1 EBDA structure found at %p\n",
+ hdr);
+ pr_info("memconsole: BIOS console buffer at 0x%.8x, start = %d, end = %d, num = %d\n",
+ hdr->v1.buffer_addr, hdr->v1.start,
+ hdr->v1.end, hdr->v1.num_chars);
+
+ memconsole_baseaddr = phys_to_virt(hdr->v1.buffer_addr);
+ memconsole_length = hdr->v1.num_chars;
+ memconsole_setup(memconsole_read);
+}
+
+static void found_v2_header(struct biosmemcon_ebda *hdr)
+{
+ pr_info("memconsole: BIOS console v2 EBDA structure found at %p\n",
+ hdr);
+ pr_info("memconsole: BIOS console buffer at 0x%.8x, start = %d, end = %d, num_bytes = %d\n",
+ hdr->v2.buffer_addr, hdr->v2.start,
+ hdr->v2.end, hdr->v2.num_bytes);
+
+ memconsole_baseaddr = phys_to_virt(hdr->v2.buffer_addr + hdr->v2.start);
+ memconsole_length = hdr->v2.end - hdr->v2.start;
+ memconsole_setup(memconsole_read);
+}
+
+/*
+ * Search through the EBDA for the BIOS Memory Console, and
+ * set the global variables to point to it. Return true if found.
+ */
+static bool memconsole_ebda_init(void)
+{
+ unsigned int address;
+ size_t length, cur;
+
+ address = get_bios_ebda();
+ if (!address) {
+ pr_info("memconsole: BIOS EBDA non-existent.\n");
+ return false;
+ }
+
+ /* EBDA length is byte 0 of EBDA (in KB) */
+ length = *(u8 *)phys_to_virt(address);
+ length <<= 10; /* convert to bytes */
+
+ /*
+ * Search through EBDA for BIOS memory console structure
+ * note: signature is not necessarily dword-aligned
+ */
+ for (cur = 0; cur < length; cur++) {
+ struct biosmemcon_ebda *hdr = phys_to_virt(address + cur);
+
+ /* memconsole v1 */
+ if (hdr->signature == BIOS_MEMCONSOLE_V1_MAGIC) {
+ found_v1_header(hdr);
+ return true;
+ }
+
+ /* memconsole v2 */
+ if (hdr->signature == BIOS_MEMCONSOLE_V2_MAGIC) {
+ found_v2_header(hdr);
+ return true;
+ }
+ }
+
+ pr_info("memconsole: BIOS console EBDA structure not found!\n");
+ return false;
+}
+
+static const struct dmi_system_id memconsole_dmi_table[] __initconst = {
+ {
+ .ident = "Google Board",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
+ },
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, memconsole_dmi_table);
+
+static bool __init memconsole_find(void)
+{
+ if (!dmi_check_system(memconsole_dmi_table))
+ return false;
+
+ return memconsole_ebda_init();
+}
+
+static int __init memconsole_x86_init(void)
+{
+ if (!memconsole_find())
+ return -ENODEV;
+
+ return memconsole_sysfs_init();
+}
+
+static void __exit memconsole_x86_exit(void)
+{
+ memconsole_exit();
+}
+
+module_init(memconsole_x86_init);
+module_exit(memconsole_x86_exit);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
new file mode 100644
index 0000000000..44d314ad69
--- /dev/null
+++ b/drivers/firmware/google/memconsole.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * memconsole.c
+ *
+ * Architecture-independent parts of the memory based BIOS console.
+ *
+ * Copyright 2017 Google Inc.
+ */
+
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+
+#include "memconsole.h"
+
+static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count)
+{
+ ssize_t (*memconsole_read_func)(char *, loff_t, size_t);
+
+ memconsole_read_func = bin_attr->private;
+ if (WARN_ON_ONCE(!memconsole_read_func))
+ return -EIO;
+
+ return memconsole_read_func(buf, pos, count);
+}
+
+static struct bin_attribute memconsole_bin_attr = {
+ .attr = {.name = "log", .mode = 0444},
+ .read = memconsole_read,
+};
+
+void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t))
+{
+ memconsole_bin_attr.private = read_func;
+}
+EXPORT_SYMBOL(memconsole_setup);
+
+int memconsole_sysfs_init(void)
+{
+ return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
+}
+EXPORT_SYMBOL(memconsole_sysfs_init);
+
+void memconsole_exit(void)
+{
+ sysfs_remove_bin_file(firmware_kobj, &memconsole_bin_attr);
+}
+EXPORT_SYMBOL(memconsole_exit);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole.h b/drivers/firmware/google/memconsole.h
new file mode 100644
index 0000000000..aaff2b72b6
--- /dev/null
+++ b/drivers/firmware/google/memconsole.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * memconsole.h
+ *
+ * Internal headers of the memory based BIOS console.
+ *
+ * Copyright 2017 Google Inc.
+ */
+
+#ifndef __FIRMWARE_GOOGLE_MEMCONSOLE_H
+#define __FIRMWARE_GOOGLE_MEMCONSOLE_H
+
+#include <linux/types.h>
+
+/*
+ * memconsole_setup
+ *
+ * Initialize the memory console, passing the function to handle read accesses.
+ */
+void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t));
+
+/*
+ * memconsole_sysfs_init
+ *
+ * Update memory console length and create binary file
+ * for firmware object.
+ */
+int memconsole_sysfs_init(void);
+
+/* memconsole_exit
+ *
+ * Unmap the console buffer.
+ */
+void memconsole_exit(void);
+
+#endif /* __FIRMWARE_GOOGLE_MEMCONSOLE_H */
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
new file mode 100644
index 0000000000..ee6e08c059
--- /dev/null
+++ b/drivers/firmware/google/vpd.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vpd.c
+ *
+ * Driver for exporting VPD content to sysfs.
+ *
+ * Copyright 2017 Google Inc.
+ */
+
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "coreboot_table.h"
+#include "vpd_decode.h"
+
+#define CB_TAG_VPD 0x2c
+#define VPD_CBMEM_MAGIC 0x43524f53
+
+static struct kobject *vpd_kobj;
+
+struct vpd_cbmem {
+ u32 magic;
+ u32 version;
+ u32 ro_size;
+ u32 rw_size;
+ u8 blob[];
+};
+
+struct vpd_section {
+ bool enabled;
+ const char *name;
+ char *raw_name; /* the string name_raw */
+ struct kobject *kobj; /* vpd/name directory */
+ char *baseaddr;
+ struct bin_attribute bin_attr; /* vpd/name_raw bin_attribute */
+ struct list_head attribs; /* key/value in vpd_attrib_info list */
+};
+
+struct vpd_attrib_info {
+ char *key;
+ const char *value;
+ struct bin_attribute bin_attr;
+ struct list_head list;
+};
+
+static struct vpd_section ro_vpd;
+static struct vpd_section rw_vpd;
+
+static ssize_t vpd_attrib_read(struct file *filp, struct kobject *kobp,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct vpd_attrib_info *info = bin_attr->private;
+
+ return memory_read_from_buffer(buf, count, &pos, info->value,
+ info->bin_attr.size);
+}
+
+/*
+ * vpd_section_check_key_name()
+ *
+ * The VPD specification supports only [a-zA-Z0-9_]+ characters in key names but
+ * old firmware versions may have entries like "S/N" which are problematic when
+ * exporting them as sysfs attributes. These keys present in old firmwares are
+ * ignored.
+ *
+ * Returns VPD_OK for a valid key name, VPD_FAIL otherwise.
+ *
+ * @key: The key name to check
+ * @key_len: key name length
+ */
+static int vpd_section_check_key_name(const u8 *key, s32 key_len)
+{
+ int c;
+
+ while (key_len-- > 0) {
+ c = *key++;
+
+ if (!isalnum(c) && c != '_')
+ return VPD_FAIL;
+ }
+
+ return VPD_OK;
+}
+
+static int vpd_section_attrib_add(const u8 *key, u32 key_len,
+ const u8 *value, u32 value_len,
+ void *arg)
+{
+ int ret;
+ struct vpd_section *sec = arg;
+ struct vpd_attrib_info *info;
+
+ /*
+ * Return VPD_OK immediately to decode next entry if the current key
+ * name contains invalid characters.
+ */
+ if (vpd_section_check_key_name(key, key_len) != VPD_OK)
+ return VPD_OK;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->key = kstrndup(key, key_len, GFP_KERNEL);
+ if (!info->key) {
+ ret = -ENOMEM;
+ goto free_info;
+ }
+
+ sysfs_bin_attr_init(&info->bin_attr);
+ info->bin_attr.attr.name = info->key;
+ info->bin_attr.attr.mode = 0444;
+ info->bin_attr.size = value_len;
+ info->bin_attr.read = vpd_attrib_read;
+ info->bin_attr.private = info;
+
+ info->value = value;
+
+ INIT_LIST_HEAD(&info->list);
+
+ ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr);
+ if (ret)
+ goto free_info_key;
+
+ list_add_tail(&info->list, &sec->attribs);
+ return 0;
+
+free_info_key:
+ kfree(info->key);
+free_info:
+ kfree(info);
+
+ return ret;
+}
+
+static void vpd_section_attrib_destroy(struct vpd_section *sec)
+{
+ struct vpd_attrib_info *info;
+ struct vpd_attrib_info *temp;
+
+ list_for_each_entry_safe(info, temp, &sec->attribs, list) {
+ sysfs_remove_bin_file(sec->kobj, &info->bin_attr);
+ kfree(info->key);
+ kfree(info);
+ }
+}
+
+static ssize_t vpd_section_read(struct file *filp, struct kobject *kobp,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct vpd_section *sec = bin_attr->private;
+
+ return memory_read_from_buffer(buf, count, &pos, sec->baseaddr,
+ sec->bin_attr.size);
+}
+
+static int vpd_section_create_attribs(struct vpd_section *sec)
+{
+ s32 consumed;
+ int ret;
+
+ consumed = 0;
+ do {
+ ret = vpd_decode_string(sec->bin_attr.size, sec->baseaddr,
+ &consumed, vpd_section_attrib_add, sec);
+ } while (ret == VPD_OK);
+
+ return 0;
+}
+
+static int vpd_section_init(const char *name, struct vpd_section *sec,
+ phys_addr_t physaddr, size_t size)
+{
+ int err;
+
+ sec->baseaddr = memremap(physaddr, size, MEMREMAP_WB);
+ if (!sec->baseaddr)
+ return -ENOMEM;
+
+ sec->name = name;
+
+ /* We want to export the raw partition with name ${name}_raw */
+ sec->raw_name = kasprintf(GFP_KERNEL, "%s_raw", name);
+ if (!sec->raw_name) {
+ err = -ENOMEM;
+ goto err_memunmap;
+ }
+
+ sysfs_bin_attr_init(&sec->bin_attr);
+ sec->bin_attr.attr.name = sec->raw_name;
+ sec->bin_attr.attr.mode = 0444;
+ sec->bin_attr.size = size;
+ sec->bin_attr.read = vpd_section_read;
+ sec->bin_attr.private = sec;
+
+ err = sysfs_create_bin_file(vpd_kobj, &sec->bin_attr);
+ if (err)
+ goto err_free_raw_name;
+
+ sec->kobj = kobject_create_and_add(name, vpd_kobj);
+ if (!sec->kobj) {
+ err = -EINVAL;
+ goto err_sysfs_remove;
+ }
+
+ INIT_LIST_HEAD(&sec->attribs);
+ vpd_section_create_attribs(sec);
+
+ sec->enabled = true;
+
+ return 0;
+
+err_sysfs_remove:
+ sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
+err_free_raw_name:
+ kfree(sec->raw_name);
+err_memunmap:
+ memunmap(sec->baseaddr);
+ return err;
+}
+
+static int vpd_section_destroy(struct vpd_section *sec)
+{
+ if (sec->enabled) {
+ vpd_section_attrib_destroy(sec);
+ kobject_put(sec->kobj);
+ sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
+ kfree(sec->raw_name);
+ memunmap(sec->baseaddr);
+ sec->enabled = false;
+ }
+
+ return 0;
+}
+
+static int vpd_sections_init(phys_addr_t physaddr)
+{
+ struct vpd_cbmem *temp;
+ struct vpd_cbmem header;
+ int ret = 0;
+
+ temp = memremap(physaddr, sizeof(struct vpd_cbmem), MEMREMAP_WB);
+ if (!temp)
+ return -ENOMEM;
+
+ memcpy(&header, temp, sizeof(struct vpd_cbmem));
+ memunmap(temp);
+
+ if (header.magic != VPD_CBMEM_MAGIC)
+ return -ENODEV;
+
+ if (header.ro_size) {
+ ret = vpd_section_init("ro", &ro_vpd,
+ physaddr + sizeof(struct vpd_cbmem),
+ header.ro_size);
+ if (ret)
+ return ret;
+ }
+
+ if (header.rw_size) {
+ ret = vpd_section_init("rw", &rw_vpd,
+ physaddr + sizeof(struct vpd_cbmem) +
+ header.ro_size, header.rw_size);
+ if (ret) {
+ vpd_section_destroy(&ro_vpd);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int vpd_probe(struct coreboot_device *dev)
+{
+ int ret;
+
+ vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
+ if (!vpd_kobj)
+ return -ENOMEM;
+
+ ret = vpd_sections_init(dev->cbmem_ref.cbmem_addr);
+ if (ret) {
+ kobject_put(vpd_kobj);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vpd_remove(struct coreboot_device *dev)
+{
+ vpd_section_destroy(&ro_vpd);
+ vpd_section_destroy(&rw_vpd);
+
+ kobject_put(vpd_kobj);
+}
+
+static struct coreboot_driver vpd_driver = {
+ .probe = vpd_probe,
+ .remove = vpd_remove,
+ .drv = {
+ .name = "vpd",
+ },
+ .tag = CB_TAG_VPD,
+};
+module_coreboot_driver(vpd_driver);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c
new file mode 100644
index 0000000000..5c6f2a74f1
--- /dev/null
+++ b/drivers/firmware/google/vpd_decode.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vpd_decode.c
+ *
+ * Google VPD decoding routines.
+ *
+ * Copyright 2017 Google Inc.
+ */
+
+#include "vpd_decode.h"
+
+static int vpd_decode_len(const u32 max_len, const u8 *in,
+ u32 *length, u32 *decoded_len)
+{
+ u8 more;
+ int i = 0;
+
+ if (!length || !decoded_len)
+ return VPD_FAIL;
+
+ *length = 0;
+ do {
+ if (i >= max_len)
+ return VPD_FAIL;
+
+ more = in[i] & 0x80;
+ *length <<= 7;
+ *length |= in[i] & 0x7f;
+ ++i;
+ } while (more);
+
+ *decoded_len = i;
+ return VPD_OK;
+}
+
+static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
+ u32 *_consumed, const u8 **entry, u32 *entry_len)
+{
+ u32 decoded_len;
+ u32 consumed = *_consumed;
+
+ if (vpd_decode_len(max_len - consumed, &input_buf[consumed],
+ entry_len, &decoded_len) != VPD_OK)
+ return VPD_FAIL;
+ if (max_len - consumed < decoded_len)
+ return VPD_FAIL;
+
+ consumed += decoded_len;
+ *entry = input_buf + consumed;
+
+ /* entry_len is untrusted data and must be checked again. */
+ if (max_len - consumed < *entry_len)
+ return VPD_FAIL;
+
+ consumed += *entry_len;
+ *_consumed = consumed;
+ return VPD_OK;
+}
+
+int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
+ vpd_decode_callback callback, void *callback_arg)
+{
+ int type;
+ u32 key_len;
+ u32 value_len;
+ const u8 *key;
+ const u8 *value;
+
+ /* type */
+ if (*consumed >= max_len)
+ return VPD_FAIL;
+
+ type = input_buf[*consumed];
+
+ switch (type) {
+ case VPD_TYPE_INFO:
+ case VPD_TYPE_STRING:
+ (*consumed)++;
+
+ if (vpd_decode_entry(max_len, input_buf, consumed, &key,
+ &key_len) != VPD_OK)
+ return VPD_FAIL;
+
+ if (vpd_decode_entry(max_len, input_buf, consumed, &value,
+ &value_len) != VPD_OK)
+ return VPD_FAIL;
+
+ if (type == VPD_TYPE_STRING)
+ return callback(key, key_len, value, value_len,
+ callback_arg);
+ break;
+
+ default:
+ return VPD_FAIL;
+ }
+
+ return VPD_OK;
+}
diff --git a/drivers/firmware/google/vpd_decode.h b/drivers/firmware/google/vpd_decode.h
new file mode 100644
index 0000000000..8dbe41cac5
--- /dev/null
+++ b/drivers/firmware/google/vpd_decode.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vpd_decode.h
+ *
+ * Google VPD decoding routines.
+ *
+ * Copyright 2017 Google Inc.
+ */
+
+#ifndef __VPD_DECODE_H
+#define __VPD_DECODE_H
+
+#include <linux/types.h>
+
+enum {
+ VPD_OK = 0,
+ VPD_FAIL,
+};
+
+enum {
+ VPD_TYPE_TERMINATOR = 0,
+ VPD_TYPE_STRING,
+ VPD_TYPE_INFO = 0xfe,
+ VPD_TYPE_IMPLICIT_TERMINATOR = 0xff,
+};
+
+/* Callback for vpd_decode_string to invoke. */
+typedef int vpd_decode_callback(const u8 *key, u32 key_len,
+ const u8 *value, u32 value_len,
+ void *arg);
+
+/*
+ * vpd_decode_string
+ *
+ * Given the encoded string, this function invokes callback with extracted
+ * (key, value). The *consumed will be plused the number of bytes consumed in
+ * this function.
+ *
+ * The input_buf points to the first byte of the input buffer.
+ *
+ * The *consumed starts from 0, which is actually the next byte to be decoded.
+ * It can be non-zero to be used in multiple calls.
+ *
+ * If one entry is successfully decoded, sends it to callback and returns the
+ * result.
+ */
+int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
+ vpd_decode_callback callback, void *callback_arg);
+
+#endif /* __VPD_DECODE_H */
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
new file mode 100644
index 0000000000..c027d99f2a
--- /dev/null
+++ b/drivers/firmware/imx/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config IMX_DSP
+ tristate "IMX DSP Protocol driver"
+ depends on IMX_MBOX
+ help
+ This enables DSP IPC protocol between host AP (Linux)
+ and the firmware running on DSP.
+ DSP exists on some i.MX8 processors (e.g i.MX8QM, i.MX8QXP).
+
+ It acts like a doorbell. Client might use shared memory to
+ exchange information with DSP side.
+
+config IMX_SCU
+ bool "IMX SCU Protocol driver"
+ depends on IMX_MBOX
+ select SOC_BUS
+ help
+ The System Controller Firmware (SCFW) is a low-level system function
+ which runs on a dedicated Cortex-M core to provide power, clock, and
+ resource management. It exists on some i.MX8 processors. e.g. i.MX8QM
+ (QM, QP), and i.MX8QX (QXP, DX).
+
+ This driver manages the IPC interface between host CPU and the
+ SCU firmware running on M4.
+
+config IMX_SCU_PD
+ bool "IMX SCU Power Domain driver"
+ depends on IMX_SCU
+ help
+ The System Controller Firmware (SCFW) based power domain driver.
diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile
new file mode 100644
index 0000000000..8f9f04a513
--- /dev/null
+++ b/drivers/firmware/imx/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_IMX_DSP) += imx-dsp.o
+obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o imx-scu-soc.o
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
new file mode 100644
index 0000000000..a48a58e0c6
--- /dev/null
+++ b/drivers/firmware/imx/imx-dsp.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019 NXP
+ * Author: Daniel Baluta <daniel.baluta@nxp.com>
+ *
+ * Implementation of the DSP IPC interface (host side)
+ */
+
+#include <linux/firmware/imx/dsp.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * imx_dsp_ring_doorbell - triggers an interrupt on the other side (DSP)
+ *
+ * @dsp: DSP IPC handle
+ * @chan_idx: index of the channel where to trigger the interrupt
+ *
+ * Returns non-negative value for success, negative value for error
+ */
+int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc, unsigned int idx)
+{
+ int ret;
+ struct imx_dsp_chan *dsp_chan;
+
+ if (idx >= DSP_MU_CHAN_NUM)
+ return -EINVAL;
+
+ dsp_chan = &ipc->chans[idx];
+ ret = mbox_send_message(dsp_chan->ch, NULL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(imx_dsp_ring_doorbell);
+
+/*
+ * imx_dsp_handle_rx - rx callback used by imx mailbox
+ *
+ * @c: mbox client
+ * @msg: message received
+ *
+ * Users of DSP IPC will need to privde handle_reply and handle_request
+ * callbacks.
+ */
+static void imx_dsp_handle_rx(struct mbox_client *c, void *msg)
+{
+ struct imx_dsp_chan *chan = container_of(c, struct imx_dsp_chan, cl);
+
+ if (chan->idx == 0) {
+ chan->ipc->ops->handle_reply(chan->ipc);
+ } else {
+ chan->ipc->ops->handle_request(chan->ipc);
+ imx_dsp_ring_doorbell(chan->ipc, 1);
+ }
+}
+
+struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *dsp_ipc, int idx)
+{
+ struct imx_dsp_chan *dsp_chan;
+
+ if (idx >= DSP_MU_CHAN_NUM)
+ return ERR_PTR(-EINVAL);
+
+ dsp_chan = &dsp_ipc->chans[idx];
+ dsp_chan->ch = mbox_request_channel_byname(&dsp_chan->cl, dsp_chan->name);
+ return dsp_chan->ch;
+}
+EXPORT_SYMBOL(imx_dsp_request_channel);
+
+void imx_dsp_free_channel(struct imx_dsp_ipc *dsp_ipc, int idx)
+{
+ struct imx_dsp_chan *dsp_chan;
+
+ if (idx >= DSP_MU_CHAN_NUM)
+ return;
+
+ dsp_chan = &dsp_ipc->chans[idx];
+ mbox_free_channel(dsp_chan->ch);
+}
+EXPORT_SYMBOL(imx_dsp_free_channel);
+
+static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc)
+{
+ struct device *dev = dsp_ipc->dev;
+ struct imx_dsp_chan *dsp_chan;
+ struct mbox_client *cl;
+ char *chan_name;
+ int ret;
+ int i, j;
+
+ for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
+ if (i < 2)
+ chan_name = kasprintf(GFP_KERNEL, "txdb%d", i);
+ else
+ chan_name = kasprintf(GFP_KERNEL, "rxdb%d", i - 2);
+
+ if (!chan_name)
+ return -ENOMEM;
+
+ dsp_chan = &dsp_ipc->chans[i];
+ dsp_chan->name = chan_name;
+ cl = &dsp_chan->cl;
+ cl->dev = dev;
+ cl->tx_block = false;
+ cl->knows_txdone = true;
+ cl->rx_callback = imx_dsp_handle_rx;
+
+ dsp_chan->ipc = dsp_ipc;
+ dsp_chan->idx = i % 2;
+ dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
+ if (IS_ERR(dsp_chan->ch)) {
+ ret = PTR_ERR(dsp_chan->ch);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request mbox chan %s ret %d\n",
+ chan_name, ret);
+ kfree(dsp_chan->name);
+ goto out;
+ }
+
+ dev_dbg(dev, "request mbox chan %s\n", chan_name);
+ }
+
+ return 0;
+out:
+ for (j = 0; j < i; j++) {
+ dsp_chan = &dsp_ipc->chans[j];
+ mbox_free_channel(dsp_chan->ch);
+ kfree(dsp_chan->name);
+ }
+
+ return ret;
+}
+
+static int imx_dsp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_dsp_ipc *dsp_ipc;
+ int ret;
+
+ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
+ dsp_ipc = devm_kzalloc(dev, sizeof(*dsp_ipc), GFP_KERNEL);
+ if (!dsp_ipc)
+ return -ENOMEM;
+
+ dsp_ipc->dev = dev;
+ dev_set_drvdata(dev, dsp_ipc);
+
+ ret = imx_dsp_setup_channels(dsp_ipc);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev, "NXP i.MX DSP IPC initialized\n");
+
+ return 0;
+}
+
+static int imx_dsp_remove(struct platform_device *pdev)
+{
+ struct imx_dsp_chan *dsp_chan;
+ struct imx_dsp_ipc *dsp_ipc;
+ int i;
+
+ dsp_ipc = dev_get_drvdata(&pdev->dev);
+
+ for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
+ dsp_chan = &dsp_ipc->chans[i];
+ mbox_free_channel(dsp_chan->ch);
+ kfree(dsp_chan->name);
+ }
+
+ return 0;
+}
+
+static struct platform_driver imx_dsp_driver = {
+ .driver = {
+ .name = "imx-dsp",
+ },
+ .probe = imx_dsp_probe,
+ .remove = imx_dsp_remove,
+};
+builtin_platform_driver(imx_dsp_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@nxp.com>");
+MODULE_DESCRIPTION("IMX DSP IPC protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c
new file mode 100644
index 0000000000..6125cccc9b
--- /dev/null
+++ b/drivers/firmware/imx/imx-scu-irq.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019,2023 NXP
+ *
+ * Implementation of the SCU IRQ functions using MU.
+ *
+ */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/kobject.h>
+#include <linux/mailbox_client.h>
+#include <linux/of.h>
+#include <linux/suspend.h>
+#include <linux/sysfs.h>
+
+#define IMX_SC_IRQ_FUNC_ENABLE 1
+#define IMX_SC_IRQ_FUNC_STATUS 2
+#define IMX_SC_IRQ_NUM_GROUP 9
+
+static u32 mu_resource_id;
+
+struct imx_sc_msg_irq_get_status {
+ struct imx_sc_rpc_msg hdr;
+ union {
+ struct {
+ u16 resource;
+ u8 group;
+ u8 reserved;
+ } __packed req;
+ struct {
+ u32 status;
+ } resp;
+ } data;
+};
+
+struct imx_sc_msg_irq_enable {
+ struct imx_sc_rpc_msg hdr;
+ u32 mask;
+ u16 resource;
+ u8 group;
+ u8 enable;
+} __packed;
+
+struct scu_wakeup {
+ u32 mask;
+ u32 wakeup_src;
+ bool valid;
+};
+
+/* Sysfs functions */
+static struct kobject *wakeup_obj;
+static ssize_t wakeup_source_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf);
+static struct kobj_attribute wakeup_source_attr =
+ __ATTR(wakeup_src, 0660, wakeup_source_show, NULL);
+
+static struct scu_wakeup scu_irq_wakeup[IMX_SC_IRQ_NUM_GROUP];
+
+static struct imx_sc_ipc *imx_sc_irq_ipc_handle;
+static struct work_struct imx_sc_irq_work;
+static BLOCKING_NOTIFIER_HEAD(imx_scu_irq_notifier_chain);
+
+int imx_scu_irq_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(
+ &imx_scu_irq_notifier_chain, nb);
+}
+EXPORT_SYMBOL(imx_scu_irq_register_notifier);
+
+int imx_scu_irq_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(
+ &imx_scu_irq_notifier_chain, nb);
+}
+EXPORT_SYMBOL(imx_scu_irq_unregister_notifier);
+
+static int imx_scu_irq_notifier_call_chain(unsigned long status, u8 *group)
+{
+ return blocking_notifier_call_chain(&imx_scu_irq_notifier_chain,
+ status, (void *)group);
+}
+
+static void imx_scu_irq_work_handler(struct work_struct *work)
+{
+ u32 irq_status;
+ int ret;
+ u8 i;
+
+ for (i = 0; i < IMX_SC_IRQ_NUM_GROUP; i++) {
+ if (scu_irq_wakeup[i].mask) {
+ scu_irq_wakeup[i].valid = false;
+ scu_irq_wakeup[i].wakeup_src = 0;
+ }
+
+ ret = imx_scu_irq_get_status(i, &irq_status);
+ if (ret) {
+ pr_err("get irq group %d status failed, ret %d\n",
+ i, ret);
+ return;
+ }
+
+ if (!irq_status)
+ continue;
+ if (scu_irq_wakeup[i].mask & irq_status) {
+ scu_irq_wakeup[i].valid = true;
+ scu_irq_wakeup[i].wakeup_src = irq_status & scu_irq_wakeup[i].mask;
+ } else {
+ scu_irq_wakeup[i].wakeup_src = irq_status;
+ }
+
+ pm_system_wakeup();
+ imx_scu_irq_notifier_call_chain(irq_status, &i);
+ }
+}
+
+int imx_scu_irq_get_status(u8 group, u32 *irq_status)
+{
+ struct imx_sc_msg_irq_get_status msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_IRQ;
+ hdr->func = IMX_SC_IRQ_FUNC_STATUS;
+ hdr->size = 2;
+
+ msg.data.req.resource = mu_resource_id;
+ msg.data.req.group = group;
+
+ ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true);
+ if (ret)
+ return ret;
+
+ if (irq_status)
+ *irq_status = msg.data.resp.status;
+
+ return 0;
+}
+EXPORT_SYMBOL(imx_scu_irq_get_status);
+
+int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable)
+{
+ struct imx_sc_msg_irq_enable msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ if (!imx_sc_irq_ipc_handle)
+ return -EPROBE_DEFER;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_IRQ;
+ hdr->func = IMX_SC_IRQ_FUNC_ENABLE;
+ hdr->size = 3;
+
+ msg.resource = mu_resource_id;
+ msg.group = group;
+ msg.mask = mask;
+ msg.enable = enable;
+
+ ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true);
+ if (ret)
+ pr_err("enable irq failed, group %d, mask %d, ret %d\n",
+ group, mask, ret);
+
+ if (enable)
+ scu_irq_wakeup[group].mask |= mask;
+ else
+ scu_irq_wakeup[group].mask &= ~mask;
+
+ return ret;
+}
+EXPORT_SYMBOL(imx_scu_irq_group_enable);
+
+static void imx_scu_irq_callback(struct mbox_client *c, void *msg)
+{
+ schedule_work(&imx_sc_irq_work);
+}
+
+static ssize_t wakeup_source_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int i;
+
+ for (i = 0; i < IMX_SC_IRQ_NUM_GROUP; i++) {
+ if (!scu_irq_wakeup[i].wakeup_src)
+ continue;
+
+ if (scu_irq_wakeup[i].valid)
+ sprintf(buf, "Wakeup source group = %d, irq = 0x%x\n",
+ i, scu_irq_wakeup[i].wakeup_src);
+ else
+ sprintf(buf, "Spurious SCU wakeup, group = %d, irq = 0x%x\n",
+ i, scu_irq_wakeup[i].wakeup_src);
+ }
+
+ return strlen(buf);
+}
+
+int imx_scu_enable_general_irq_channel(struct device *dev)
+{
+ struct of_phandle_args spec;
+ struct mbox_client *cl;
+ struct mbox_chan *ch;
+ int ret = 0, i = 0;
+
+ ret = imx_scu_get_handle(&imx_sc_irq_ipc_handle);
+ if (ret)
+ return ret;
+
+ cl = devm_kzalloc(dev, sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return -ENOMEM;
+
+ cl->dev = dev;
+ cl->rx_callback = imx_scu_irq_callback;
+
+ /* SCU general IRQ uses general interrupt channel 3 */
+ ch = mbox_request_channel_byname(cl, "gip3");
+ if (IS_ERR(ch)) {
+ ret = PTR_ERR(ch);
+ dev_err(dev, "failed to request mbox chan gip3, ret %d\n", ret);
+ devm_kfree(dev, cl);
+ return ret;
+ }
+
+ INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler);
+
+ if (!of_parse_phandle_with_args(dev->of_node, "mboxes",
+ "#mbox-cells", 0, &spec))
+ i = of_alias_get_id(spec.np, "mu");
+
+ /* use mu1 as general mu irq channel if failed */
+ if (i < 0)
+ i = 1;
+
+ mu_resource_id = IMX_SC_R_MU_0A + i;
+
+ /* Create directory under /sysfs/firmware */
+ wakeup_obj = kobject_create_and_add("scu_wakeup_source", firmware_kobj);
+ if (!wakeup_obj) {
+ ret = -ENOMEM;
+ goto free_ch;
+ }
+
+ ret = sysfs_create_file(wakeup_obj, &wakeup_source_attr.attr);
+ if (ret) {
+ dev_err(dev, "Cannot create wakeup source src file......\n");
+ kobject_put(wakeup_obj);
+ goto free_ch;
+ }
+
+ return 0;
+
+free_ch:
+ mbox_free_channel(ch);
+
+ return ret;
+}
+EXPORT_SYMBOL(imx_scu_enable_general_irq_channel);
diff --git a/drivers/firmware/imx/imx-scu-soc.c b/drivers/firmware/imx/imx-scu-soc.c
new file mode 100644
index 0000000000..4971923205
--- /dev/null
+++ b/drivers/firmware/imx/imx-scu-soc.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+static struct imx_sc_ipc *imx_sc_soc_ipc_handle;
+
+struct imx_sc_msg_misc_get_soc_id {
+ struct imx_sc_rpc_msg hdr;
+ union {
+ struct {
+ u32 control;
+ u16 resource;
+ } __packed req;
+ struct {
+ u32 id;
+ } resp;
+ } data;
+} __packed __aligned(4);
+
+struct imx_sc_msg_misc_get_soc_uid {
+ struct imx_sc_rpc_msg hdr;
+ u32 uid_low;
+ u32 uid_high;
+} __packed;
+
+static int imx_scu_soc_uid(u64 *soc_uid)
+{
+ struct imx_sc_msg_misc_get_soc_uid msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_MISC;
+ hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID;
+ hdr->size = 1;
+
+ ret = imx_scu_call_rpc(imx_sc_soc_ipc_handle, &msg, true);
+ if (ret) {
+ pr_err("%s: get soc uid failed, ret %d\n", __func__, ret);
+ return ret;
+ }
+
+ *soc_uid = msg.uid_high;
+ *soc_uid <<= 32;
+ *soc_uid |= msg.uid_low;
+
+ return 0;
+}
+
+static int imx_scu_soc_id(void)
+{
+ struct imx_sc_msg_misc_get_soc_id msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_MISC;
+ hdr->func = IMX_SC_MISC_FUNC_GET_CONTROL;
+ hdr->size = 3;
+
+ msg.data.req.control = IMX_SC_C_ID;
+ msg.data.req.resource = IMX_SC_R_SYSTEM;
+
+ ret = imx_scu_call_rpc(imx_sc_soc_ipc_handle, &msg, true);
+ if (ret) {
+ pr_err("%s: get soc info failed, ret %d\n", __func__, ret);
+ return ret;
+ }
+
+ return msg.data.resp.id;
+}
+
+static const char *imx_scu_soc_name(u32 id)
+{
+ switch (id) {
+ case 0x1:
+ return "i.MX8QM";
+ case 0x2:
+ return "i.MX8QXP";
+ case 0xe:
+ return "i.MX8DXL";
+ default:
+ break;
+ }
+
+ return "NULL";
+}
+
+int imx_scu_soc_init(struct device *dev)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ int id, ret;
+ u64 uid = 0;
+ u32 val;
+
+ ret = imx_scu_get_handle(&imx_sc_soc_ipc_handle);
+ if (ret)
+ return ret;
+
+ soc_dev_attr = devm_kzalloc(dev, sizeof(*soc_dev_attr),
+ GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->family = "Freescale i.MX";
+
+ ret = of_property_read_string(of_root,
+ "model",
+ &soc_dev_attr->machine);
+ if (ret)
+ return ret;
+
+ id = imx_scu_soc_id();
+ if (id < 0)
+ return -EINVAL;
+
+ ret = imx_scu_soc_uid(&uid);
+ if (ret < 0)
+ return -EINVAL;
+
+ /* format soc_id value passed from SCU firmware */
+ val = id & 0x1f;
+ soc_dev_attr->soc_id = imx_scu_soc_name(val);
+
+ /* format revision value passed from SCU firmware */
+ val = (id >> 5) & 0xf;
+ val = (((val >> 2) + 1) << 4) | (val & 0x3);
+ soc_dev_attr->revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d",
+ (val >> 4) & 0xf, val & 0xf);
+ if (!soc_dev_attr->revision)
+ return -ENOMEM;
+
+ soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL,
+ "%016llX", uid);
+ if (!soc_dev_attr->serial_number)
+ return -ENOMEM;
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ return 0;
+}
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
new file mode 100644
index 0000000000..1dd4362ef9
--- /dev/null
+++ b/drivers/firmware/imx/imx-scu.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ * Author: Dong Aisheng <aisheng.dong@nxp.com>
+ *
+ * Implementation of the SCU IPC functions using MUs (client side).
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#define SCU_MU_CHAN_NUM 8
+#define MAX_RX_TIMEOUT (msecs_to_jiffies(3000))
+
+struct imx_sc_chan {
+ struct imx_sc_ipc *sc_ipc;
+
+ struct mbox_client cl;
+ struct mbox_chan *ch;
+ int idx;
+ struct completion tx_done;
+};
+
+struct imx_sc_ipc {
+ /* SCU uses 4 Tx and 4 Rx channels */
+ struct imx_sc_chan chans[SCU_MU_CHAN_NUM];
+ struct device *dev;
+ struct mutex lock;
+ struct completion done;
+ bool fast_ipc;
+
+ /* temporarily store the SCU msg */
+ u32 *msg;
+ u8 rx_size;
+ u8 count;
+};
+
+/*
+ * This type is used to indicate error response for most functions.
+ */
+enum imx_sc_error_codes {
+ IMX_SC_ERR_NONE = 0, /* Success */
+ IMX_SC_ERR_VERSION = 1, /* Incompatible API version */
+ IMX_SC_ERR_CONFIG = 2, /* Configuration error */
+ IMX_SC_ERR_PARM = 3, /* Bad parameter */
+ IMX_SC_ERR_NOACCESS = 4, /* Permission error (no access) */
+ IMX_SC_ERR_LOCKED = 5, /* Permission error (locked) */
+ IMX_SC_ERR_UNAVAILABLE = 6, /* Unavailable (out of resources) */
+ IMX_SC_ERR_NOTFOUND = 7, /* Not found */
+ IMX_SC_ERR_NOPOWER = 8, /* No power */
+ IMX_SC_ERR_IPC = 9, /* Generic IPC error */
+ IMX_SC_ERR_BUSY = 10, /* Resource is currently busy/active */
+ IMX_SC_ERR_FAIL = 11, /* General I/O failure */
+ IMX_SC_ERR_LAST
+};
+
+static int imx_sc_linux_errmap[IMX_SC_ERR_LAST] = {
+ 0, /* IMX_SC_ERR_NONE */
+ -EINVAL, /* IMX_SC_ERR_VERSION */
+ -EINVAL, /* IMX_SC_ERR_CONFIG */
+ -EINVAL, /* IMX_SC_ERR_PARM */
+ -EACCES, /* IMX_SC_ERR_NOACCESS */
+ -EACCES, /* IMX_SC_ERR_LOCKED */
+ -ERANGE, /* IMX_SC_ERR_UNAVAILABLE */
+ -EEXIST, /* IMX_SC_ERR_NOTFOUND */
+ -EPERM, /* IMX_SC_ERR_NOPOWER */
+ -EPIPE, /* IMX_SC_ERR_IPC */
+ -EBUSY, /* IMX_SC_ERR_BUSY */
+ -EIO, /* IMX_SC_ERR_FAIL */
+};
+
+static struct imx_sc_ipc *imx_sc_ipc_handle;
+
+static inline int imx_sc_to_linux_errno(int errno)
+{
+ if (errno >= IMX_SC_ERR_NONE && errno < IMX_SC_ERR_LAST)
+ return imx_sc_linux_errmap[errno];
+ return -EIO;
+}
+
+/*
+ * Get the default handle used by SCU
+ */
+int imx_scu_get_handle(struct imx_sc_ipc **ipc)
+{
+ if (!imx_sc_ipc_handle)
+ return -EPROBE_DEFER;
+
+ *ipc = imx_sc_ipc_handle;
+ return 0;
+}
+EXPORT_SYMBOL(imx_scu_get_handle);
+
+/* Callback called when the word of a message is ack-ed, eg read by SCU */
+static void imx_scu_tx_done(struct mbox_client *cl, void *mssg, int r)
+{
+ struct imx_sc_chan *sc_chan = container_of(cl, struct imx_sc_chan, cl);
+
+ complete(&sc_chan->tx_done);
+}
+
+static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
+{
+ struct imx_sc_chan *sc_chan = container_of(c, struct imx_sc_chan, cl);
+ struct imx_sc_ipc *sc_ipc = sc_chan->sc_ipc;
+ struct imx_sc_rpc_msg *hdr;
+ u32 *data = msg;
+ int i;
+
+ if (!sc_ipc->msg) {
+ dev_warn(sc_ipc->dev, "unexpected rx idx %d 0x%08x, ignore!\n",
+ sc_chan->idx, *data);
+ return;
+ }
+
+ if (sc_ipc->fast_ipc) {
+ hdr = msg;
+ sc_ipc->rx_size = hdr->size;
+ sc_ipc->msg[0] = *data++;
+
+ for (i = 1; i < sc_ipc->rx_size; i++)
+ sc_ipc->msg[i] = *data++;
+
+ complete(&sc_ipc->done);
+
+ return;
+ }
+
+ if (sc_chan->idx == 0) {
+ hdr = msg;
+ sc_ipc->rx_size = hdr->size;
+ dev_dbg(sc_ipc->dev, "msg rx size %u\n", sc_ipc->rx_size);
+ if (sc_ipc->rx_size > 4)
+ dev_warn(sc_ipc->dev, "RPC does not support receiving over 4 words: %u\n",
+ sc_ipc->rx_size);
+ }
+
+ sc_ipc->msg[sc_chan->idx] = *data;
+ sc_ipc->count++;
+
+ dev_dbg(sc_ipc->dev, "mu %u msg %u 0x%x\n", sc_chan->idx,
+ sc_ipc->count, *data);
+
+ if ((sc_ipc->rx_size != 0) && (sc_ipc->count == sc_ipc->rx_size))
+ complete(&sc_ipc->done);
+}
+
+static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
+{
+ struct imx_sc_rpc_msg hdr = *(struct imx_sc_rpc_msg *)msg;
+ struct imx_sc_chan *sc_chan;
+ u32 *data = msg;
+ int ret;
+ int size;
+ int i;
+
+ /* Check size */
+ if (hdr.size > IMX_SC_RPC_MAX_MSG)
+ return -EINVAL;
+
+ dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr.svc,
+ hdr.func, hdr.size);
+
+ size = sc_ipc->fast_ipc ? 1 : hdr.size;
+ for (i = 0; i < size; i++) {
+ sc_chan = &sc_ipc->chans[i % 4];
+
+ /*
+ * SCU requires that all messages words are written
+ * sequentially but linux MU driver implements multiple
+ * independent channels for each register so ordering between
+ * different channels must be ensured by SCU API interface.
+ *
+ * Wait for tx_done before every send to ensure that no
+ * queueing happens at the mailbox channel level.
+ */
+ if (!sc_ipc->fast_ipc) {
+ wait_for_completion(&sc_chan->tx_done);
+ reinit_completion(&sc_chan->tx_done);
+ }
+
+ ret = mbox_send_message(sc_chan->ch, &data[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * RPC command/response
+ */
+int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp)
+{
+ uint8_t saved_svc, saved_func;
+ struct imx_sc_rpc_msg *hdr;
+ int ret;
+
+ if (WARN_ON(!sc_ipc || !msg))
+ return -EINVAL;
+
+ mutex_lock(&sc_ipc->lock);
+ reinit_completion(&sc_ipc->done);
+
+ if (have_resp) {
+ sc_ipc->msg = msg;
+ saved_svc = ((struct imx_sc_rpc_msg *)msg)->svc;
+ saved_func = ((struct imx_sc_rpc_msg *)msg)->func;
+ }
+ sc_ipc->count = 0;
+ ret = imx_scu_ipc_write(sc_ipc, msg);
+ if (ret < 0) {
+ dev_err(sc_ipc->dev, "RPC send msg failed: %d\n", ret);
+ goto out;
+ }
+
+ if (have_resp) {
+ if (!wait_for_completion_timeout(&sc_ipc->done,
+ MAX_RX_TIMEOUT)) {
+ dev_err(sc_ipc->dev, "RPC send msg timeout\n");
+ mutex_unlock(&sc_ipc->lock);
+ return -ETIMEDOUT;
+ }
+
+ /* response status is stored in hdr->func field */
+ hdr = msg;
+ ret = hdr->func;
+ /*
+ * Some special SCU firmware APIs do NOT have return value
+ * in hdr->func, but they do have response data, those special
+ * APIs are defined as void function in SCU firmware, so they
+ * should be treated as return success always.
+ */
+ if ((saved_svc == IMX_SC_RPC_SVC_MISC) &&
+ (saved_func == IMX_SC_MISC_FUNC_UNIQUE_ID ||
+ saved_func == IMX_SC_MISC_FUNC_GET_BUTTON_STATUS))
+ ret = 0;
+ }
+
+out:
+ sc_ipc->msg = NULL;
+ mutex_unlock(&sc_ipc->lock);
+
+ dev_dbg(sc_ipc->dev, "RPC SVC done\n");
+
+ return imx_sc_to_linux_errno(ret);
+}
+EXPORT_SYMBOL(imx_scu_call_rpc);
+
+static int imx_scu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_sc_ipc *sc_ipc;
+ struct imx_sc_chan *sc_chan;
+ struct mbox_client *cl;
+ char *chan_name;
+ struct of_phandle_args args;
+ int num_channel;
+ int ret;
+ int i;
+
+ sc_ipc = devm_kzalloc(dev, sizeof(*sc_ipc), GFP_KERNEL);
+ if (!sc_ipc)
+ return -ENOMEM;
+
+ ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes",
+ "#mbox-cells", 0, &args);
+ if (ret)
+ return ret;
+
+ sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
+
+ num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
+ for (i = 0; i < num_channel; i++) {
+ if (i < num_channel / 2)
+ chan_name = kasprintf(GFP_KERNEL, "tx%d", i);
+ else
+ chan_name = kasprintf(GFP_KERNEL, "rx%d",
+ i - num_channel / 2);
+
+ if (!chan_name)
+ return -ENOMEM;
+
+ sc_chan = &sc_ipc->chans[i];
+ cl = &sc_chan->cl;
+ cl->dev = dev;
+ cl->tx_block = false;
+ cl->knows_txdone = true;
+ cl->rx_callback = imx_scu_rx_callback;
+
+ if (!sc_ipc->fast_ipc) {
+ /* Initial tx_done completion as "done" */
+ cl->tx_done = imx_scu_tx_done;
+ init_completion(&sc_chan->tx_done);
+ complete(&sc_chan->tx_done);
+ }
+
+ sc_chan->sc_ipc = sc_ipc;
+ sc_chan->idx = i % (num_channel / 2);
+ sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
+ if (IS_ERR(sc_chan->ch)) {
+ ret = PTR_ERR(sc_chan->ch);
+ dev_err_probe(dev, ret, "Failed to request mbox chan %s\n",
+ chan_name);
+ kfree(chan_name);
+ return ret;
+ }
+
+ dev_dbg(dev, "request mbox chan %s\n", chan_name);
+ /* chan_name is not used anymore by framework */
+ kfree(chan_name);
+ }
+
+ sc_ipc->dev = dev;
+ mutex_init(&sc_ipc->lock);
+ init_completion(&sc_ipc->done);
+
+ imx_sc_ipc_handle = sc_ipc;
+
+ ret = imx_scu_soc_init(dev);
+ if (ret)
+ dev_warn(dev, "failed to initialize SoC info: %d\n", ret);
+
+ ret = imx_scu_enable_general_irq_channel(dev);
+ if (ret)
+ dev_warn(dev,
+ "failed to enable general irq channel: %d\n", ret);
+
+ dev_info(dev, "NXP i.MX SCU Initialized\n");
+
+ return devm_of_platform_populate(dev);
+}
+
+static const struct of_device_id imx_scu_match[] = {
+ { .compatible = "fsl,imx-scu", },
+ { /* Sentinel */ }
+};
+
+static struct platform_driver imx_scu_driver = {
+ .driver = {
+ .name = "imx-scu",
+ .of_match_table = imx_scu_match,
+ },
+ .probe = imx_scu_probe,
+};
+
+static int __init imx_scu_driver_init(void)
+{
+ return platform_driver_register(&imx_scu_driver);
+}
+subsys_initcall_sync(imx_scu_driver_init);
+
+MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
+MODULE_DESCRIPTION("IMX SCU firmware protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/imx/misc.c b/drivers/firmware/imx/misc.c
new file mode 100644
index 0000000000..d073cb3ce6
--- /dev/null
+++ b/drivers/firmware/imx/misc.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ * Author: Dong Aisheng <aisheng.dong@nxp.com>
+ *
+ * File containing client-side RPC functions for the MISC service. These
+ * function are ported to clients that communicate to the SC.
+ *
+ */
+
+#include <linux/firmware/imx/svc/misc.h>
+
+struct imx_sc_msg_req_misc_set_ctrl {
+ struct imx_sc_rpc_msg hdr;
+ u32 ctrl;
+ u32 val;
+ u16 resource;
+} __packed __aligned(4);
+
+struct imx_sc_msg_req_cpu_start {
+ struct imx_sc_rpc_msg hdr;
+ u32 address_hi;
+ u32 address_lo;
+ u16 resource;
+ u8 enable;
+} __packed __aligned(4);
+
+struct imx_sc_msg_req_misc_get_ctrl {
+ struct imx_sc_rpc_msg hdr;
+ u32 ctrl;
+ u16 resource;
+} __packed __aligned(4);
+
+struct imx_sc_msg_resp_misc_get_ctrl {
+ struct imx_sc_rpc_msg hdr;
+ u32 val;
+} __packed __aligned(4);
+
+/*
+ * This function sets a miscellaneous control value.
+ *
+ * @param[in] ipc IPC handle
+ * @param[in] resource resource the control is associated with
+ * @param[in] ctrl control to change
+ * @param[in] val value to apply to the control
+ *
+ * @return Returns 0 for success and < 0 for errors.
+ */
+
+int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource,
+ u8 ctrl, u32 val)
+{
+ struct imx_sc_msg_req_misc_set_ctrl msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = (uint8_t)IMX_SC_RPC_SVC_MISC;
+ hdr->func = (uint8_t)IMX_SC_MISC_FUNC_SET_CONTROL;
+ hdr->size = 4;
+
+ msg.ctrl = ctrl;
+ msg.val = val;
+ msg.resource = resource;
+
+ return imx_scu_call_rpc(ipc, &msg, true);
+}
+EXPORT_SYMBOL(imx_sc_misc_set_control);
+
+/*
+ * This function gets a miscellaneous control value.
+ *
+ * @param[in] ipc IPC handle
+ * @param[in] resource resource the control is associated with
+ * @param[in] ctrl control to get
+ * @param[out] val pointer to return the control value
+ *
+ * @return Returns 0 for success and < 0 for errors.
+ */
+
+int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
+ u8 ctrl, u32 *val)
+{
+ struct imx_sc_msg_req_misc_get_ctrl msg;
+ struct imx_sc_msg_resp_misc_get_ctrl *resp;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = (uint8_t)IMX_SC_RPC_SVC_MISC;
+ hdr->func = (uint8_t)IMX_SC_MISC_FUNC_GET_CONTROL;
+ hdr->size = 3;
+
+ msg.ctrl = ctrl;
+ msg.resource = resource;
+
+ ret = imx_scu_call_rpc(ipc, &msg, true);
+ if (ret)
+ return ret;
+
+ resp = (struct imx_sc_msg_resp_misc_get_ctrl *)&msg;
+ if (val != NULL)
+ *val = resp->val;
+
+ return 0;
+}
+EXPORT_SYMBOL(imx_sc_misc_get_control);
+
+/*
+ * This function starts/stops a CPU identified by @resource
+ *
+ * @param[in] ipc IPC handle
+ * @param[in] resource resource the control is associated with
+ * @param[in] enable true for start, false for stop
+ * @param[in] phys_addr initial instruction address to be executed
+ *
+ * @return Returns 0 for success and < 0 for errors.
+ */
+int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
+ bool enable, u64 phys_addr)
+{
+ struct imx_sc_msg_req_cpu_start msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_PM;
+ hdr->func = IMX_SC_PM_FUNC_CPU_START;
+ hdr->size = 4;
+
+ msg.address_hi = phys_addr >> 32;
+ msg.address_lo = phys_addr;
+ msg.resource = resource;
+ msg.enable = enable;
+
+ return imx_scu_call_rpc(ipc, &msg, true);
+}
+EXPORT_SYMBOL(imx_sc_pm_cpu_start);
diff --git a/drivers/firmware/imx/rm.c b/drivers/firmware/imx/rm.c
new file mode 100644
index 0000000000..d492b99e1c
--- /dev/null
+++ b/drivers/firmware/imx/rm.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2020 NXP
+ *
+ * File containing client-side RPC functions for the RM service. These
+ * function are ported to clients that communicate to the SC.
+ */
+
+#include <linux/firmware/imx/svc/rm.h>
+
+struct imx_sc_msg_rm_rsrc_owned {
+ struct imx_sc_rpc_msg hdr;
+ u16 resource;
+} __packed __aligned(4);
+
+/*
+ * This function check @resource is owned by current partition or not
+ *
+ * @param[in] ipc IPC handle
+ * @param[in] resource resource the control is associated with
+ *
+ * @return Returns 0 for not owned and 1 for owned.
+ */
+bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource)
+{
+ struct imx_sc_msg_rm_rsrc_owned msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_RM;
+ hdr->func = IMX_SC_RM_FUNC_IS_RESOURCE_OWNED;
+ hdr->size = 2;
+
+ msg.resource = resource;
+
+ /*
+ * SCU firmware only returns value 0 or 1
+ * for resource owned check which means not owned or owned.
+ * So it is always successful.
+ */
+ imx_scu_call_rpc(ipc, &msg, true);
+
+ return hdr->func;
+}
+EXPORT_SYMBOL(imx_sc_rm_is_resource_owned);
+
+struct imx_sc_msg_rm_get_resource_owner {
+ struct imx_sc_rpc_msg hdr;
+ union {
+ struct {
+ u16 resource;
+ } req;
+ struct {
+ u8 val;
+ } resp;
+ } data;
+} __packed __aligned(4);
+
+/*
+ * This function get @resource partition number
+ *
+ * @param[in] ipc IPC handle
+ * @param[in] resource resource the control is associated with
+ * @param[out] pt pointer to return the partition number
+ *
+ * @return Returns 0 for success and < 0 for errors.
+ */
+int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt)
+{
+ struct imx_sc_msg_rm_get_resource_owner msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_RM;
+ hdr->func = IMX_SC_RM_FUNC_GET_RESOURCE_OWNER;
+ hdr->size = 2;
+
+ msg.data.req.resource = resource;
+
+ ret = imx_scu_call_rpc(ipc, &msg, true);
+ if (ret)
+ return ret;
+
+ if (pt)
+ *pt = msg.data.resp.val;
+
+ return 0;
+}
+EXPORT_SYMBOL(imx_sc_rm_get_resource_owner);
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
new file mode 100644
index 0000000000..6e9788324f
--- /dev/null
+++ b/drivers/firmware/iscsi_ibft.c
@@ -0,0 +1,906 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2007-2010 Red Hat, Inc.
+ * by Peter Jones <pjones@redhat.com>
+ * Copyright 2008 IBM, Inc.
+ * by Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Copyright 2008
+ * by Konrad Rzeszutek <ketuzsezr@darnok.org>
+ *
+ * This code exposes the iSCSI Boot Format Table to userland via sysfs.
+ *
+ * Changelog:
+ *
+ * 06 Jan 2010 - Peter Jones <pjones@redhat.com>
+ * New changelog entries are in the git log from now on. Not here.
+ *
+ * 14 Mar 2008 - Konrad Rzeszutek <ketuzsezr@darnok.org>
+ * Updated comments and copyrights. (v0.4.9)
+ *
+ * 11 Feb 2008 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Converted to using ibft_addr. (v0.4.8)
+ *
+ * 8 Feb 2008 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Combined two functions in one: reserve_ibft_region. (v0.4.7)
+ *
+ * 30 Jan 2008 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Added logic to handle IPv6 addresses. (v0.4.6)
+ *
+ * 25 Jan 2008 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Added logic to handle badly not-to-spec iBFT. (v0.4.5)
+ *
+ * 4 Jan 2008 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Added __init to function declarations. (v0.4.4)
+ *
+ * 21 Dec 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Updated kobject registration, combined unregister functions in one
+ * and code and style cleanup. (v0.4.3)
+ *
+ * 5 Dec 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Added end-markers to enums and re-organized kobject registration. (v0.4.2)
+ *
+ * 4 Dec 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Created 'device' sysfs link to the NIC and style cleanup. (v0.4.1)
+ *
+ * 28 Nov 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Added sysfs-ibft documentation, moved 'find_ibft' function to
+ * in its own file and added text attributes for every struct field. (v0.4)
+ *
+ * 21 Nov 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Added text attributes emulating OpenFirmware /proc/device-tree naming.
+ * Removed binary /sysfs interface (v0.3)
+ *
+ * 29 Aug 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Added functionality in setup.c to reserve iBFT region. (v0.2)
+ *
+ * 27 Aug 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * First version exposing iBFT data via a binary /sysfs. (v0.1)
+ */
+
+
+#include <linux/blkdev.h>
+#include <linux/capability.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/iscsi_ibft.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/iscsi_boot_sysfs.h>
+
+#define IBFT_ISCSI_VERSION "0.5.0"
+#define IBFT_ISCSI_DATE "2010-Feb-25"
+
+MODULE_AUTHOR("Peter Jones <pjones@redhat.com> and "
+ "Konrad Rzeszutek <ketuzsezr@darnok.org>");
+MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(IBFT_ISCSI_VERSION);
+
+static struct acpi_table_ibft *ibft_addr;
+
+struct ibft_hdr {
+ u8 id;
+ u8 version;
+ u16 length;
+ u8 index;
+ u8 flags;
+} __attribute__((__packed__));
+
+struct ibft_control {
+ struct ibft_hdr hdr;
+ u16 extensions;
+ u16 initiator_off;
+ u16 nic0_off;
+ u16 tgt0_off;
+ u16 nic1_off;
+ u16 tgt1_off;
+ u16 expansion[];
+} __attribute__((__packed__));
+
+struct ibft_initiator {
+ struct ibft_hdr hdr;
+ char isns_server[16];
+ char slp_server[16];
+ char pri_radius_server[16];
+ char sec_radius_server[16];
+ u16 initiator_name_len;
+ u16 initiator_name_off;
+} __attribute__((__packed__));
+
+struct ibft_nic {
+ struct ibft_hdr hdr;
+ char ip_addr[16];
+ u8 subnet_mask_prefix;
+ u8 origin;
+ char gateway[16];
+ char primary_dns[16];
+ char secondary_dns[16];
+ char dhcp[16];
+ u16 vlan;
+ char mac[6];
+ u16 pci_bdf;
+ u16 hostname_len;
+ u16 hostname_off;
+} __attribute__((__packed__));
+
+struct ibft_tgt {
+ struct ibft_hdr hdr;
+ char ip_addr[16];
+ u16 port;
+ char lun[8];
+ u8 chap_type;
+ u8 nic_assoc;
+ u16 tgt_name_len;
+ u16 tgt_name_off;
+ u16 chap_name_len;
+ u16 chap_name_off;
+ u16 chap_secret_len;
+ u16 chap_secret_off;
+ u16 rev_chap_name_len;
+ u16 rev_chap_name_off;
+ u16 rev_chap_secret_len;
+ u16 rev_chap_secret_off;
+} __attribute__((__packed__));
+
+/*
+ * The kobject different types and its names.
+ *
+*/
+enum ibft_id {
+ id_reserved = 0, /* We don't support. */
+ id_control = 1, /* Should show up only once and is not exported. */
+ id_initiator = 2,
+ id_nic = 3,
+ id_target = 4,
+ id_extensions = 5, /* We don't support. */
+ id_end_marker,
+};
+
+/*
+ * The kobject and attribute structures.
+ */
+
+struct ibft_kobject {
+ struct acpi_table_ibft *header;
+ union {
+ struct ibft_initiator *initiator;
+ struct ibft_nic *nic;
+ struct ibft_tgt *tgt;
+ struct ibft_hdr *hdr;
+ };
+};
+
+static struct iscsi_boot_kset *boot_kset;
+
+/* fully null address */
+static const char nulls[16];
+
+/* IPv4-mapped IPv6 ::ffff:0.0.0.0 */
+static const char mapped_nulls[16] = { 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00 };
+
+static int address_not_null(u8 *ip)
+{
+ return (memcmp(ip, nulls, 16) && memcmp(ip, mapped_nulls, 16));
+}
+
+/*
+ * Helper functions to parse data properly.
+ */
+static ssize_t sprintf_ipaddr(char *buf, u8 *ip)
+{
+ char *str = buf;
+
+ if (ip[0] == 0 && ip[1] == 0 && ip[2] == 0 && ip[3] == 0 &&
+ ip[4] == 0 && ip[5] == 0 && ip[6] == 0 && ip[7] == 0 &&
+ ip[8] == 0 && ip[9] == 0 && ip[10] == 0xff && ip[11] == 0xff) {
+ /*
+ * IPV4
+ */
+ str += sprintf(buf, "%pI4", ip + 12);
+ } else {
+ /*
+ * IPv6
+ */
+ str += sprintf(str, "%pI6", ip);
+ }
+ str += sprintf(str, "\n");
+ return str - buf;
+}
+
+static ssize_t sprintf_string(char *str, int len, char *buf)
+{
+ return sprintf(str, "%.*s\n", len, buf);
+}
+
+/*
+ * Helper function to verify the IBFT header.
+ */
+static int ibft_verify_hdr(char *t, struct ibft_hdr *hdr, int id, int length)
+{
+ if (hdr->id != id) {
+ printk(KERN_ERR "iBFT error: We expected the %s " \
+ "field header.id to have %d but " \
+ "found %d instead!\n", t, id, hdr->id);
+ return -ENODEV;
+ }
+ if (length && hdr->length != length) {
+ printk(KERN_ERR "iBFT error: We expected the %s " \
+ "field header.length to have %d but " \
+ "found %d instead!\n", t, length, hdr->length);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/*
+ * Routines for parsing the iBFT data to be human readable.
+ */
+static ssize_t ibft_attr_show_initiator(void *data, int type, char *buf)
+{
+ struct ibft_kobject *entry = data;
+ struct ibft_initiator *initiator = entry->initiator;
+ void *ibft_loc = entry->header;
+ char *str = buf;
+
+ if (!initiator)
+ return 0;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INDEX:
+ str += sprintf(str, "%d\n", initiator->hdr.index);
+ break;
+ case ISCSI_BOOT_INI_FLAGS:
+ str += sprintf(str, "%d\n", initiator->hdr.flags);
+ break;
+ case ISCSI_BOOT_INI_ISNS_SERVER:
+ str += sprintf_ipaddr(str, initiator->isns_server);
+ break;
+ case ISCSI_BOOT_INI_SLP_SERVER:
+ str += sprintf_ipaddr(str, initiator->slp_server);
+ break;
+ case ISCSI_BOOT_INI_PRI_RADIUS_SERVER:
+ str += sprintf_ipaddr(str, initiator->pri_radius_server);
+ break;
+ case ISCSI_BOOT_INI_SEC_RADIUS_SERVER:
+ str += sprintf_ipaddr(str, initiator->sec_radius_server);
+ break;
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ str += sprintf_string(str, initiator->initiator_name_len,
+ (char *)ibft_loc +
+ initiator->initiator_name_off);
+ break;
+ default:
+ break;
+ }
+
+ return str - buf;
+}
+
+static ssize_t ibft_attr_show_nic(void *data, int type, char *buf)
+{
+ struct ibft_kobject *entry = data;
+ struct ibft_nic *nic = entry->nic;
+ void *ibft_loc = entry->header;
+ char *str = buf;
+ __be32 val;
+
+ if (!nic)
+ return 0;
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_INDEX:
+ str += sprintf(str, "%d\n", nic->hdr.index);
+ break;
+ case ISCSI_BOOT_ETH_FLAGS:
+ str += sprintf(str, "%d\n", nic->hdr.flags);
+ break;
+ case ISCSI_BOOT_ETH_IP_ADDR:
+ str += sprintf_ipaddr(str, nic->ip_addr);
+ break;
+ case ISCSI_BOOT_ETH_SUBNET_MASK:
+ val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
+ str += sprintf(str, "%pI4", &val);
+ break;
+ case ISCSI_BOOT_ETH_PREFIX_LEN:
+ str += sprintf(str, "%d\n", nic->subnet_mask_prefix);
+ break;
+ case ISCSI_BOOT_ETH_ORIGIN:
+ str += sprintf(str, "%d\n", nic->origin);
+ break;
+ case ISCSI_BOOT_ETH_GATEWAY:
+ str += sprintf_ipaddr(str, nic->gateway);
+ break;
+ case ISCSI_BOOT_ETH_PRIMARY_DNS:
+ str += sprintf_ipaddr(str, nic->primary_dns);
+ break;
+ case ISCSI_BOOT_ETH_SECONDARY_DNS:
+ str += sprintf_ipaddr(str, nic->secondary_dns);
+ break;
+ case ISCSI_BOOT_ETH_DHCP:
+ str += sprintf_ipaddr(str, nic->dhcp);
+ break;
+ case ISCSI_BOOT_ETH_VLAN:
+ str += sprintf(str, "%d\n", nic->vlan);
+ break;
+ case ISCSI_BOOT_ETH_MAC:
+ str += sprintf(str, "%pM\n", nic->mac);
+ break;
+ case ISCSI_BOOT_ETH_HOSTNAME:
+ str += sprintf_string(str, nic->hostname_len,
+ (char *)ibft_loc + nic->hostname_off);
+ break;
+ default:
+ break;
+ }
+
+ return str - buf;
+};
+
+static ssize_t ibft_attr_show_target(void *data, int type, char *buf)
+{
+ struct ibft_kobject *entry = data;
+ struct ibft_tgt *tgt = entry->tgt;
+ void *ibft_loc = entry->header;
+ char *str = buf;
+ int i;
+
+ if (!tgt)
+ return 0;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_INDEX:
+ str += sprintf(str, "%d\n", tgt->hdr.index);
+ break;
+ case ISCSI_BOOT_TGT_FLAGS:
+ str += sprintf(str, "%d\n", tgt->hdr.flags);
+ break;
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ str += sprintf_ipaddr(str, tgt->ip_addr);
+ break;
+ case ISCSI_BOOT_TGT_PORT:
+ str += sprintf(str, "%d\n", tgt->port);
+ break;
+ case ISCSI_BOOT_TGT_LUN:
+ for (i = 0; i < 8; i++)
+ str += sprintf(str, "%x", (u8)tgt->lun[i]);
+ str += sprintf(str, "\n");
+ break;
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ str += sprintf(str, "%d\n", tgt->nic_assoc);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_TYPE:
+ str += sprintf(str, "%d\n", tgt->chap_type);
+ break;
+ case ISCSI_BOOT_TGT_NAME:
+ str += sprintf_string(str, tgt->tgt_name_len,
+ (char *)ibft_loc + tgt->tgt_name_off);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ str += sprintf_string(str, tgt->chap_name_len,
+ (char *)ibft_loc + tgt->chap_name_off);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ str += sprintf_string(str, tgt->chap_secret_len,
+ (char *)ibft_loc + tgt->chap_secret_off);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ str += sprintf_string(str, tgt->rev_chap_name_len,
+ (char *)ibft_loc +
+ tgt->rev_chap_name_off);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ str += sprintf_string(str, tgt->rev_chap_secret_len,
+ (char *)ibft_loc +
+ tgt->rev_chap_secret_off);
+ break;
+ default:
+ break;
+ }
+
+ return str - buf;
+}
+
+static ssize_t ibft_attr_show_acpitbl(void *data, int type, char *buf)
+{
+ struct ibft_kobject *entry = data;
+ char *str = buf;
+
+ switch (type) {
+ case ISCSI_BOOT_ACPITBL_SIGNATURE:
+ str += sprintf_string(str, ACPI_NAMESEG_SIZE,
+ entry->header->header.signature);
+ break;
+ case ISCSI_BOOT_ACPITBL_OEM_ID:
+ str += sprintf_string(str, ACPI_OEM_ID_SIZE,
+ entry->header->header.oem_id);
+ break;
+ case ISCSI_BOOT_ACPITBL_OEM_TABLE_ID:
+ str += sprintf_string(str, ACPI_OEM_TABLE_ID_SIZE,
+ entry->header->header.oem_table_id);
+ break;
+ default:
+ break;
+ }
+
+ return str - buf;
+}
+
+static int __init ibft_check_device(void)
+{
+ int len;
+ u8 *pos;
+ u8 csum = 0;
+
+ len = ibft_addr->header.length;
+
+ /* Sanity checking of iBFT. */
+ if (ibft_addr->header.revision != 1) {
+ printk(KERN_ERR "iBFT module supports only revision 1, " \
+ "while this is %d.\n",
+ ibft_addr->header.revision);
+ return -ENOENT;
+ }
+ for (pos = (u8 *)ibft_addr; pos < (u8 *)ibft_addr + len; pos++)
+ csum += *pos;
+
+ if (csum) {
+ printk(KERN_ERR "iBFT has incorrect checksum (0x%x)!\n", csum);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+/*
+ * Helper routiners to check to determine if the entry is valid
+ * in the proper iBFT structure.
+ */
+static umode_t ibft_check_nic_for(void *data, int type)
+{
+ struct ibft_kobject *entry = data;
+ struct ibft_nic *nic = entry->nic;
+ umode_t rc = 0;
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_INDEX:
+ case ISCSI_BOOT_ETH_FLAGS:
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_IP_ADDR:
+ if (address_not_null(nic->ip_addr))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_PREFIX_LEN:
+ case ISCSI_BOOT_ETH_SUBNET_MASK:
+ if (nic->subnet_mask_prefix)
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_ORIGIN:
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_GATEWAY:
+ if (address_not_null(nic->gateway))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_PRIMARY_DNS:
+ if (address_not_null(nic->primary_dns))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_SECONDARY_DNS:
+ if (address_not_null(nic->secondary_dns))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_DHCP:
+ if (address_not_null(nic->dhcp))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_VLAN:
+ case ISCSI_BOOT_ETH_MAC:
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_HOSTNAME:
+ if (nic->hostname_off)
+ rc = S_IRUGO;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static umode_t __init ibft_check_tgt_for(void *data, int type)
+{
+ struct ibft_kobject *entry = data;
+ struct ibft_tgt *tgt = entry->tgt;
+ umode_t rc = 0;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_INDEX:
+ case ISCSI_BOOT_TGT_FLAGS:
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ case ISCSI_BOOT_TGT_PORT:
+ case ISCSI_BOOT_TGT_LUN:
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ case ISCSI_BOOT_TGT_CHAP_TYPE:
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_TGT_NAME:
+ if (tgt->tgt_name_len)
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ if (tgt->chap_name_len)
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ if (tgt->rev_chap_name_len)
+ rc = S_IRUGO;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static umode_t __init ibft_check_initiator_for(void *data, int type)
+{
+ struct ibft_kobject *entry = data;
+ struct ibft_initiator *init = entry->initiator;
+ umode_t rc = 0;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INDEX:
+ case ISCSI_BOOT_INI_FLAGS:
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_INI_ISNS_SERVER:
+ if (address_not_null(init->isns_server))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_INI_SLP_SERVER:
+ if (address_not_null(init->slp_server))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_INI_PRI_RADIUS_SERVER:
+ if (address_not_null(init->pri_radius_server))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_INI_SEC_RADIUS_SERVER:
+ if (address_not_null(init->sec_radius_server))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ if (init->initiator_name_len)
+ rc = S_IRUGO;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static umode_t __init ibft_check_acpitbl_for(void *data, int type)
+{
+
+ umode_t rc = 0;
+
+ switch (type) {
+ case ISCSI_BOOT_ACPITBL_SIGNATURE:
+ case ISCSI_BOOT_ACPITBL_OEM_ID:
+ case ISCSI_BOOT_ACPITBL_OEM_TABLE_ID:
+ rc = S_IRUGO;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static void ibft_kobj_release(void *data)
+{
+ kfree(data);
+}
+
+/*
+ * Helper function for ibft_register_kobjects.
+ */
+static int __init ibft_create_kobject(struct acpi_table_ibft *header,
+ struct ibft_hdr *hdr)
+{
+ struct iscsi_boot_kobj *boot_kobj = NULL;
+ struct ibft_kobject *ibft_kobj = NULL;
+ struct ibft_nic *nic = (struct ibft_nic *)hdr;
+ struct pci_dev *pci_dev;
+ int rc = 0;
+
+ ibft_kobj = kzalloc(sizeof(*ibft_kobj), GFP_KERNEL);
+ if (!ibft_kobj)
+ return -ENOMEM;
+
+ ibft_kobj->header = header;
+ ibft_kobj->hdr = hdr;
+
+ switch (hdr->id) {
+ case id_initiator:
+ rc = ibft_verify_hdr("initiator", hdr, id_initiator,
+ sizeof(*ibft_kobj->initiator));
+ if (rc)
+ break;
+
+ boot_kobj = iscsi_boot_create_initiator(boot_kset, hdr->index,
+ ibft_kobj,
+ ibft_attr_show_initiator,
+ ibft_check_initiator_for,
+ ibft_kobj_release);
+ if (!boot_kobj) {
+ rc = -ENOMEM;
+ goto free_ibft_obj;
+ }
+ break;
+ case id_nic:
+ rc = ibft_verify_hdr("ethernet", hdr, id_nic,
+ sizeof(*ibft_kobj->nic));
+ if (rc)
+ break;
+
+ boot_kobj = iscsi_boot_create_ethernet(boot_kset, hdr->index,
+ ibft_kobj,
+ ibft_attr_show_nic,
+ ibft_check_nic_for,
+ ibft_kobj_release);
+ if (!boot_kobj) {
+ rc = -ENOMEM;
+ goto free_ibft_obj;
+ }
+ break;
+ case id_target:
+ rc = ibft_verify_hdr("target", hdr, id_target,
+ sizeof(*ibft_kobj->tgt));
+ if (rc)
+ break;
+
+ boot_kobj = iscsi_boot_create_target(boot_kset, hdr->index,
+ ibft_kobj,
+ ibft_attr_show_target,
+ ibft_check_tgt_for,
+ ibft_kobj_release);
+ if (!boot_kobj) {
+ rc = -ENOMEM;
+ goto free_ibft_obj;
+ }
+ break;
+ case id_reserved:
+ case id_control:
+ case id_extensions:
+ /* Fields which we don't support. Ignore them */
+ rc = 1;
+ break;
+ default:
+ printk(KERN_ERR "iBFT has unknown structure type (%d). " \
+ "Report this bug to %.6s!\n", hdr->id,
+ header->header.oem_id);
+ rc = 1;
+ break;
+ }
+
+ if (rc) {
+ /* Skip adding this kobject, but exit with non-fatal error. */
+ rc = 0;
+ goto free_ibft_obj;
+ }
+
+ if (hdr->id == id_nic) {
+ /*
+ * We don't search for the device in other domains than
+ * zero. This is because on x86 platforms the BIOS
+ * executes only devices which are in domain 0. Furthermore, the
+ * iBFT spec doesn't have a domain id field :-(
+ */
+ pci_dev = pci_get_domain_bus_and_slot(0,
+ (nic->pci_bdf & 0xff00) >> 8,
+ (nic->pci_bdf & 0xff));
+ if (pci_dev) {
+ rc = sysfs_create_link(&boot_kobj->kobj,
+ &pci_dev->dev.kobj, "device");
+ pci_dev_put(pci_dev);
+ }
+ }
+ return 0;
+
+free_ibft_obj:
+ kfree(ibft_kobj);
+ return rc;
+}
+
+/*
+ * Scan the IBFT table structure for the NIC and Target fields. When
+ * found add them on the passed-in list. We do not support the other
+ * fields at this point, so they are skipped.
+ */
+static int __init ibft_register_kobjects(struct acpi_table_ibft *header)
+{
+ struct ibft_control *control = NULL;
+ struct iscsi_boot_kobj *boot_kobj;
+ struct ibft_kobject *ibft_kobj;
+ void *ptr, *end;
+ int rc = 0;
+ u16 offset;
+ u16 eot_offset;
+
+ control = (void *)header + sizeof(*header);
+ end = (void *)control + control->hdr.length;
+ eot_offset = (void *)header + header->header.length - (void *)control;
+ rc = ibft_verify_hdr("control", (struct ibft_hdr *)control, id_control, 0);
+
+ /* iBFT table safety checking */
+ rc |= ((control->hdr.index) ? -ENODEV : 0);
+ rc |= ((control->hdr.length < sizeof(*control)) ? -ENODEV : 0);
+ if (rc) {
+ printk(KERN_ERR "iBFT error: Control header is invalid!\n");
+ return rc;
+ }
+ for (ptr = &control->initiator_off; ptr + sizeof(u16) <= end; ptr += sizeof(u16)) {
+ offset = *(u16 *)ptr;
+ if (offset && offset < header->header.length &&
+ offset < eot_offset) {
+ rc = ibft_create_kobject(header,
+ (void *)header + offset);
+ if (rc)
+ break;
+ }
+ }
+ if (rc)
+ return rc;
+
+ ibft_kobj = kzalloc(sizeof(*ibft_kobj), GFP_KERNEL);
+ if (!ibft_kobj)
+ return -ENOMEM;
+
+ ibft_kobj->header = header;
+ ibft_kobj->hdr = NULL; /*for ibft_unregister*/
+
+ boot_kobj = iscsi_boot_create_acpitbl(boot_kset, 0,
+ ibft_kobj,
+ ibft_attr_show_acpitbl,
+ ibft_check_acpitbl_for,
+ ibft_kobj_release);
+ if (!boot_kobj) {
+ kfree(ibft_kobj);
+ rc = -ENOMEM;
+ }
+
+ return rc;
+}
+
+static void ibft_unregister(void)
+{
+ struct iscsi_boot_kobj *boot_kobj, *tmp_kobj;
+ struct ibft_kobject *ibft_kobj;
+
+ list_for_each_entry_safe(boot_kobj, tmp_kobj,
+ &boot_kset->kobj_list, list) {
+ ibft_kobj = boot_kobj->data;
+ if (ibft_kobj->hdr && ibft_kobj->hdr->id == id_nic)
+ sysfs_remove_link(&boot_kobj->kobj, "device");
+ };
+}
+
+static void ibft_cleanup(void)
+{
+ if (boot_kset) {
+ ibft_unregister();
+ iscsi_boot_destroy_kset(boot_kset);
+ }
+}
+
+static void __exit ibft_exit(void)
+{
+ ibft_cleanup();
+}
+
+#ifdef CONFIG_ACPI
+static const struct {
+ char *sign;
+} ibft_signs[] = {
+ /*
+ * One spec says "IBFT", the other says "iBFT". We have to check
+ * for both.
+ */
+ { ACPI_SIG_IBFT },
+ { "iBFT" },
+ { "BIFT" }, /* Broadcom iSCSI Offload */
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+ int i;
+ struct acpi_table_header *table = NULL;
+
+ if (acpi_disabled)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+ acpi_get_table(ibft_signs[i].sign, 0, &table);
+ ibft_addr = (struct acpi_table_ibft *)table;
+ }
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+#ifdef CONFIG_ISCSI_IBFT_FIND
+static int __init acpi_find_isa_region(void)
+{
+ if (ibft_phys_addr) {
+ ibft_addr = isa_bus_to_virt(ibft_phys_addr);
+ return 0;
+ }
+ return -ENODEV;
+}
+#else
+static int __init acpi_find_isa_region(void)
+{
+ return -ENODEV;
+}
+#endif
+/*
+ * ibft_init() - creates sysfs tree entries for the iBFT data.
+ */
+static int __init ibft_init(void)
+{
+ int rc = 0;
+
+ /*
+ As on UEFI systems the setup_arch()/reserve_ibft_region()
+ is called before ACPI tables are parsed and it only does
+ legacy finding.
+ */
+ if (acpi_find_isa_region())
+ acpi_find_ibft_region();
+
+ if (ibft_addr) {
+ pr_info("iBFT detected.\n");
+
+ rc = ibft_check_device();
+ if (rc)
+ return rc;
+
+ boot_kset = iscsi_boot_create_kset("ibft");
+ if (!boot_kset)
+ return -ENOMEM;
+
+ /* Scan the IBFT for data and register the kobjects. */
+ rc = ibft_register_kobjects(ibft_addr);
+ if (rc)
+ goto out_free;
+ } else
+ printk(KERN_INFO "No iBFT detected.\n");
+
+ return 0;
+
+out_free:
+ ibft_cleanup();
+ return rc;
+}
+
+module_init(ibft_init);
+module_exit(ibft_exit);
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
new file mode 100644
index 0000000000..71f51303c2
--- /dev/null
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2007-2010 Red Hat, Inc.
+ * by Peter Jones <pjones@redhat.com>
+ * Copyright 2007 IBM, Inc.
+ * by Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Copyright 2008
+ * by Konrad Rzeszutek <ketuzsezr@darnok.org>
+ *
+ * This code finds the iSCSI Boot Format Table.
+ */
+
+#include <linux/memblock.h>
+#include <linux/blkdev.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/efi.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/iscsi_ibft.h>
+
+#include <asm/mmzone.h>
+
+/*
+ * Physical location of iSCSI Boot Format Table.
+ */
+phys_addr_t ibft_phys_addr;
+EXPORT_SYMBOL_GPL(ibft_phys_addr);
+
+static const struct {
+ char *sign;
+} ibft_signs[] = {
+ { "iBFT" },
+ { "BIFT" }, /* Broadcom iSCSI Offload */
+};
+
+#define IBFT_SIGN_LEN 4
+#define VGA_MEM 0xA0000 /* VGA buffer */
+#define VGA_SIZE 0x20000 /* 128kB */
+
+/*
+ * Routine used to find and reserve the iSCSI Boot Format Table
+ */
+void __init reserve_ibft_region(void)
+{
+ unsigned long pos, virt_pos = 0;
+ unsigned int len = 0;
+ void *virt = NULL;
+ int i;
+
+ ibft_phys_addr = 0;
+
+ /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
+ * only use ACPI for this
+ */
+ if (efi_enabled(EFI_BOOT))
+ return;
+
+ for (pos = IBFT_START; pos < IBFT_END; pos += 16) {
+ /* The table can't be inside the VGA BIOS reserved space,
+ * so skip that area */
+ if (pos == VGA_MEM)
+ pos += VGA_SIZE;
+
+ /* Map page by page */
+ if (offset_in_page(pos) == 0) {
+ if (virt)
+ early_memunmap(virt, PAGE_SIZE);
+ virt = early_memremap_ro(pos, PAGE_SIZE);
+ virt_pos = pos;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ibft_signs); i++) {
+ if (memcmp(virt + (pos - virt_pos), ibft_signs[i].sign,
+ IBFT_SIGN_LEN) == 0) {
+ unsigned long *addr =
+ (unsigned long *)(virt + pos - virt_pos + 4);
+ len = *addr;
+ /* if the length of the table extends past 1M,
+ * the table cannot be valid. */
+ if (pos + len <= (IBFT_END-1)) {
+ ibft_phys_addr = pos;
+ memblock_reserve(ibft_phys_addr, PAGE_ALIGN(len));
+ pr_info("iBFT found at %pa.\n", &ibft_phys_addr);
+ goto out;
+ }
+ }
+ }
+ }
+
+out:
+ early_memunmap(virt, PAGE_SIZE);
+}
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
new file mode 100644
index 0000000000..8e59be3782
--- /dev/null
+++ b/drivers/firmware/memmap.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/firmware/memmap.c
+ * Copyright (C) 2008 SUSE LINUX Products GmbH
+ * by Bernhard Walle <bernhard.walle@gmx.de>
+ */
+
+#include <linux/string.h>
+#include <linux/firmware-map.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+/*
+ * Data types ------------------------------------------------------------------
+ */
+
+/*
+ * Firmware map entry. Because firmware memory maps are flat and not
+ * hierarchical, it's ok to organise them in a linked list. No parent
+ * information is necessary as for the resource tree.
+ */
+struct firmware_map_entry {
+ /*
+ * start and end must be u64 rather than resource_size_t, because e820
+ * resources can lie at addresses above 4G.
+ */
+ u64 start; /* start of the memory range */
+ u64 end; /* end of the memory range (incl.) */
+ const char *type; /* type of the memory range */
+ struct list_head list; /* entry for the linked list */
+ struct kobject kobj; /* kobject for each entry */
+};
+
+/*
+ * Forward declarations --------------------------------------------------------
+ */
+static ssize_t memmap_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+static ssize_t start_show(struct firmware_map_entry *entry, char *buf);
+static ssize_t end_show(struct firmware_map_entry *entry, char *buf);
+static ssize_t type_show(struct firmware_map_entry *entry, char *buf);
+
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry(u64 start, u64 end, const char *type);
+
+/*
+ * Static data -----------------------------------------------------------------
+ */
+
+struct memmap_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct firmware_map_entry *entry, char *buf);
+};
+
+static struct memmap_attribute memmap_start_attr = __ATTR_RO(start);
+static struct memmap_attribute memmap_end_attr = __ATTR_RO(end);
+static struct memmap_attribute memmap_type_attr = __ATTR_RO(type);
+
+/*
+ * These are default attributes that are added for every memmap entry.
+ */
+static struct attribute *def_attrs[] = {
+ &memmap_start_attr.attr,
+ &memmap_end_attr.attr,
+ &memmap_type_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(def);
+
+static const struct sysfs_ops memmap_attr_ops = {
+ .show = memmap_attr_show,
+};
+
+/* Firmware memory map entries. */
+static LIST_HEAD(map_entries);
+static DEFINE_SPINLOCK(map_entries_lock);
+
+/*
+ * For memory hotplug, there is no way to free memory map entries allocated
+ * by boot mem after the system is up. So when we hot-remove memory whose
+ * map entry is allocated by bootmem, we need to remember the storage and
+ * reuse it when the memory is hot-added again.
+ */
+static LIST_HEAD(map_entries_bootmem);
+static DEFINE_SPINLOCK(map_entries_bootmem_lock);
+
+
+static inline struct firmware_map_entry *
+to_memmap_entry(struct kobject *kobj)
+{
+ return container_of(kobj, struct firmware_map_entry, kobj);
+}
+
+static void __meminit release_firmware_map_entry(struct kobject *kobj)
+{
+ struct firmware_map_entry *entry = to_memmap_entry(kobj);
+
+ if (PageReserved(virt_to_page(entry))) {
+ /*
+ * Remember the storage allocated by bootmem, and reuse it when
+ * the memory is hot-added again. The entry will be added to
+ * map_entries_bootmem here, and deleted from &map_entries in
+ * firmware_map_remove_entry().
+ */
+ spin_lock(&map_entries_bootmem_lock);
+ list_add(&entry->list, &map_entries_bootmem);
+ spin_unlock(&map_entries_bootmem_lock);
+
+ return;
+ }
+
+ kfree(entry);
+}
+
+static struct kobj_type __refdata memmap_ktype = {
+ .release = release_firmware_map_entry,
+ .sysfs_ops = &memmap_attr_ops,
+ .default_groups = def_groups,
+};
+
+/*
+ * Registration functions ------------------------------------------------------
+ */
+
+/**
+ * firmware_map_add_entry() - Does the real work to add a firmware memmap entry.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ * @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised
+ * entry.
+ *
+ * Common implementation of firmware_map_add() and firmware_map_add_early()
+ * which expects a pre-allocated struct firmware_map_entry.
+ *
+ * Return: 0 always
+ */
+static int firmware_map_add_entry(u64 start, u64 end,
+ const char *type,
+ struct firmware_map_entry *entry)
+{
+ BUG_ON(start > end);
+
+ entry->start = start;
+ entry->end = end - 1;
+ entry->type = type;
+ INIT_LIST_HEAD(&entry->list);
+ kobject_init(&entry->kobj, &memmap_ktype);
+
+ spin_lock(&map_entries_lock);
+ list_add_tail(&entry->list, &map_entries);
+ spin_unlock(&map_entries_lock);
+
+ return 0;
+}
+
+/**
+ * firmware_map_remove_entry() - Does the real work to remove a firmware
+ * memmap entry.
+ * @entry: removed entry.
+ *
+ * The caller must hold map_entries_lock, and release it properly.
+ */
+static inline void firmware_map_remove_entry(struct firmware_map_entry *entry)
+{
+ list_del(&entry->list);
+}
+
+/*
+ * Add memmap entry on sysfs
+ */
+static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry)
+{
+ static int map_entries_nr;
+ static struct kset *mmap_kset;
+
+ if (entry->kobj.state_in_sysfs)
+ return -EEXIST;
+
+ if (!mmap_kset) {
+ mmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj);
+ if (!mmap_kset)
+ return -ENOMEM;
+ }
+
+ entry->kobj.kset = mmap_kset;
+ if (kobject_add(&entry->kobj, NULL, "%d", map_entries_nr++))
+ kobject_put(&entry->kobj);
+
+ return 0;
+}
+
+/*
+ * Remove memmap entry on sysfs
+ */
+static inline void remove_sysfs_fw_map_entry(struct firmware_map_entry *entry)
+{
+ kobject_put(&entry->kobj);
+}
+
+/**
+ * firmware_map_find_entry_in_list() - Search memmap entry in a given list.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ * @list: In which to find the entry.
+ *
+ * This function is to find the memmap entey of a given memory range in a
+ * given list. The caller must hold map_entries_lock, and must not release
+ * the lock until the processing of the returned entry has completed.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry_in_list(u64 start, u64 end, const char *type,
+ struct list_head *list)
+{
+ struct firmware_map_entry *entry;
+
+ list_for_each_entry(entry, list, list)
+ if ((entry->start == start) && (entry->end == end) &&
+ (!strcmp(entry->type, type))) {
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * firmware_map_find_entry() - Search memmap entry in map_entries.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ *
+ * This function is to find the memmap entey of a given memory range.
+ * The caller must hold map_entries_lock, and must not release the lock
+ * until the processing of the returned entry has completed.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry(u64 start, u64 end, const char *type)
+{
+ return firmware_map_find_entry_in_list(start, end, type, &map_entries);
+}
+
+/**
+ * firmware_map_find_entry_bootmem() - Search memmap entry in map_entries_bootmem.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ *
+ * This function is similar to firmware_map_find_entry except that it find the
+ * given entry in map_entries_bootmem.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry_bootmem(u64 start, u64 end, const char *type)
+{
+ return firmware_map_find_entry_in_list(start, end, type,
+ &map_entries_bootmem);
+}
+
+/**
+ * firmware_map_add_hotplug() - Adds a firmware mapping entry when we do
+ * memory hotplug.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive)
+ * @type: Type of the memory range.
+ *
+ * Adds a firmware mapping entry. This function is for memory hotplug, it is
+ * similar to function firmware_map_add_early(). The only difference is that
+ * it will create the syfs entry dynamically.
+ *
+ * Return: 0 on success, or -ENOMEM if no memory could be allocated.
+ */
+int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type)
+{
+ struct firmware_map_entry *entry;
+
+ entry = firmware_map_find_entry(start, end - 1, type);
+ if (entry)
+ return 0;
+
+ entry = firmware_map_find_entry_bootmem(start, end - 1, type);
+ if (!entry) {
+ entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+ } else {
+ /* Reuse storage allocated by bootmem. */
+ spin_lock(&map_entries_bootmem_lock);
+ list_del(&entry->list);
+ spin_unlock(&map_entries_bootmem_lock);
+
+ memset(entry, 0, sizeof(*entry));
+ }
+
+ firmware_map_add_entry(start, end, type, entry);
+ /* create the memmap entry */
+ add_sysfs_fw_map_entry(entry);
+
+ return 0;
+}
+
+/**
+ * firmware_map_add_early() - Adds a firmware mapping entry.
+ * @start: Start of the memory range.
+ * @end: End of the memory range.
+ * @type: Type of the memory range.
+ *
+ * Adds a firmware mapping entry. This function uses the bootmem allocator
+ * for memory allocation.
+ *
+ * That function must be called before late_initcall.
+ *
+ * Return: 0 on success, or -ENOMEM if no memory could be allocated.
+ */
+int __init firmware_map_add_early(u64 start, u64 end, const char *type)
+{
+ struct firmware_map_entry *entry;
+
+ entry = memblock_alloc(sizeof(struct firmware_map_entry),
+ SMP_CACHE_BYTES);
+ if (WARN_ON(!entry))
+ return -ENOMEM;
+
+ return firmware_map_add_entry(start, end, type, entry);
+}
+
+/**
+ * firmware_map_remove() - remove a firmware mapping entry
+ * @start: Start of the memory range.
+ * @end: End of the memory range.
+ * @type: Type of the memory range.
+ *
+ * removes a firmware mapping entry.
+ *
+ * Return: 0 on success, or -EINVAL if no entry.
+ */
+int __meminit firmware_map_remove(u64 start, u64 end, const char *type)
+{
+ struct firmware_map_entry *entry;
+
+ spin_lock(&map_entries_lock);
+ entry = firmware_map_find_entry(start, end - 1, type);
+ if (!entry) {
+ spin_unlock(&map_entries_lock);
+ return -EINVAL;
+ }
+
+ firmware_map_remove_entry(entry);
+ spin_unlock(&map_entries_lock);
+
+ /* remove the memmap entry */
+ remove_sysfs_fw_map_entry(entry);
+
+ return 0;
+}
+
+/*
+ * Sysfs functions -------------------------------------------------------------
+ */
+
+static ssize_t start_show(struct firmware_map_entry *entry, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ (unsigned long long)entry->start);
+}
+
+static ssize_t end_show(struct firmware_map_entry *entry, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ (unsigned long long)entry->end);
+}
+
+static ssize_t type_show(struct firmware_map_entry *entry, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", entry->type);
+}
+
+static inline struct memmap_attribute *to_memmap_attr(struct attribute *attr)
+{
+ return container_of(attr, struct memmap_attribute, attr);
+}
+
+static ssize_t memmap_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct firmware_map_entry *entry = to_memmap_entry(kobj);
+ struct memmap_attribute *memmap_attr = to_memmap_attr(attr);
+
+ return memmap_attr->show(entry, buf);
+}
+
+/*
+ * Initialises stuff and adds the entries in the map_entries list to
+ * sysfs. Important is that firmware_map_add() and firmware_map_add_early()
+ * must be called before late_initcall. That's just because that function
+ * is called as late_initcall() function, which means that if you call
+ * firmware_map_add() or firmware_map_add_early() afterwards, the entries
+ * are not added to sysfs.
+ */
+static int __init firmware_memmap_init(void)
+{
+ struct firmware_map_entry *entry;
+
+ list_for_each_entry(entry, &map_entries, list)
+ add_sysfs_fw_map_entry(entry);
+
+ return 0;
+}
+late_initcall(firmware_memmap_init);
+
diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
new file mode 100644
index 0000000000..f2fdd37566
--- /dev/null
+++ b/drivers/firmware/meson/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Amlogic Secure Monitor driver
+#
+config MESON_SM
+ tristate "Amlogic Secure Monitor driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ default y
+ depends on ARM64_4K_PAGES
+ help
+ Say y here to enable the Amlogic secure monitor driver
diff --git a/drivers/firmware/meson/Makefile b/drivers/firmware/meson/Makefile
new file mode 100644
index 0000000000..c6c09483b6
--- /dev/null
+++ b/drivers/firmware/meson/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MESON_SM) += meson_sm.o
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
new file mode 100644
index 0000000000..9a2656d736
--- /dev/null
+++ b/drivers/firmware/meson/meson_sm.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Amlogic Secure Monitor driver
+ *
+ * Copyright (C) 2016 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ */
+
+#define pr_fmt(fmt) "meson-sm: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/bug.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+ #include <linux/slab.h>
+
+#include <linux/firmware/meson/meson_sm.h>
+
+struct meson_sm_cmd {
+ unsigned int index;
+ u32 smc_id;
+};
+#define CMD(d, s) { .index = (d), .smc_id = (s), }
+
+struct meson_sm_chip {
+ unsigned int shmem_size;
+ u32 cmd_shmem_in_base;
+ u32 cmd_shmem_out_base;
+ struct meson_sm_cmd cmd[];
+};
+
+static const struct meson_sm_chip gxbb_chip = {
+ .shmem_size = SZ_4K,
+ .cmd_shmem_in_base = 0x82000020,
+ .cmd_shmem_out_base = 0x82000021,
+ .cmd = {
+ CMD(SM_EFUSE_READ, 0x82000030),
+ CMD(SM_EFUSE_WRITE, 0x82000031),
+ CMD(SM_EFUSE_USER_MAX, 0x82000033),
+ CMD(SM_GET_CHIP_ID, 0x82000044),
+ CMD(SM_A1_PWRC_SET, 0x82000093),
+ CMD(SM_A1_PWRC_GET, 0x82000095),
+ { /* sentinel */ },
+ },
+};
+
+struct meson_sm_firmware {
+ const struct meson_sm_chip *chip;
+ void __iomem *sm_shmem_in_base;
+ void __iomem *sm_shmem_out_base;
+};
+
+static u32 meson_sm_get_cmd(const struct meson_sm_chip *chip,
+ unsigned int cmd_index)
+{
+ const struct meson_sm_cmd *cmd = chip->cmd;
+
+ while (cmd->smc_id && cmd->index != cmd_index)
+ cmd++;
+
+ return cmd->smc_id;
+}
+
+static u32 __meson_sm_call(u32 cmd, u32 arg0, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(cmd, arg0, arg1, arg2, arg3, arg4, 0, 0, &res);
+ return res.a0;
+}
+
+static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size)
+{
+ u32 sm_phy_base;
+
+ sm_phy_base = __meson_sm_call(cmd_shmem, 0, 0, 0, 0, 0);
+ if (!sm_phy_base)
+ return NULL;
+
+ return ioremap_cache(sm_phy_base, size);
+}
+
+/**
+ * meson_sm_call - generic SMC32 call to the secure-monitor
+ *
+ * @fw: Pointer to secure-monitor firmware
+ * @cmd_index: Index of the SMC32 function ID
+ * @ret: Returned value
+ * @arg0: SMC32 Argument 0
+ * @arg1: SMC32 Argument 1
+ * @arg2: SMC32 Argument 2
+ * @arg3: SMC32 Argument 3
+ * @arg4: SMC32 Argument 4
+ *
+ * Return: 0 on success, a negative value on error
+ */
+int meson_sm_call(struct meson_sm_firmware *fw, unsigned int cmd_index,
+ u32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 cmd, lret;
+
+ if (!fw->chip)
+ return -ENOENT;
+
+ cmd = meson_sm_get_cmd(fw->chip, cmd_index);
+ if (!cmd)
+ return -EINVAL;
+
+ lret = __meson_sm_call(cmd, arg0, arg1, arg2, arg3, arg4);
+
+ if (ret)
+ *ret = lret;
+
+ return 0;
+}
+EXPORT_SYMBOL(meson_sm_call);
+
+/**
+ * meson_sm_call_read - retrieve data from secure-monitor
+ *
+ * @fw: Pointer to secure-monitor firmware
+ * @buffer: Buffer to store the retrieved data
+ * @bsize: Size of the buffer
+ * @cmd_index: Index of the SMC32 function ID
+ * @arg0: SMC32 Argument 0
+ * @arg1: SMC32 Argument 1
+ * @arg2: SMC32 Argument 2
+ * @arg3: SMC32 Argument 3
+ * @arg4: SMC32 Argument 4
+ *
+ * Return: size of read data on success, a negative value on error
+ * When 0 is returned there is no guarantee about the amount of
+ * data read and bsize bytes are copied in buffer.
+ */
+int meson_sm_call_read(struct meson_sm_firmware *fw, void *buffer,
+ unsigned int bsize, unsigned int cmd_index, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 size;
+ int ret;
+
+ if (!fw->chip)
+ return -ENOENT;
+
+ if (!fw->chip->cmd_shmem_out_base)
+ return -EINVAL;
+
+ if (bsize > fw->chip->shmem_size)
+ return -EINVAL;
+
+ if (meson_sm_call(fw, cmd_index, &size, arg0, arg1, arg2, arg3, arg4) < 0)
+ return -EINVAL;
+
+ if (size > bsize)
+ return -EINVAL;
+
+ ret = size;
+
+ if (!size)
+ size = bsize;
+
+ if (buffer)
+ memcpy(buffer, fw->sm_shmem_out_base, size);
+
+ return ret;
+}
+EXPORT_SYMBOL(meson_sm_call_read);
+
+/**
+ * meson_sm_call_write - send data to secure-monitor
+ *
+ * @fw: Pointer to secure-monitor firmware
+ * @buffer: Buffer containing data to send
+ * @size: Size of the data to send
+ * @cmd_index: Index of the SMC32 function ID
+ * @arg0: SMC32 Argument 0
+ * @arg1: SMC32 Argument 1
+ * @arg2: SMC32 Argument 2
+ * @arg3: SMC32 Argument 3
+ * @arg4: SMC32 Argument 4
+ *
+ * Return: size of sent data on success, a negative value on error
+ */
+int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer,
+ unsigned int size, unsigned int cmd_index, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 written;
+
+ if (!fw->chip)
+ return -ENOENT;
+
+ if (size > fw->chip->shmem_size)
+ return -EINVAL;
+
+ if (!fw->chip->cmd_shmem_in_base)
+ return -EINVAL;
+
+ memcpy(fw->sm_shmem_in_base, buffer, size);
+
+ if (meson_sm_call(fw, cmd_index, &written, arg0, arg1, arg2, arg3, arg4) < 0)
+ return -EINVAL;
+
+ if (!written)
+ return -EINVAL;
+
+ return written;
+}
+EXPORT_SYMBOL(meson_sm_call_write);
+
+/**
+ * meson_sm_get - get pointer to meson_sm_firmware structure.
+ *
+ * @sm_node: Pointer to the secure-monitor Device Tree node.
+ *
+ * Return: NULL is the secure-monitor device is not ready.
+ */
+struct meson_sm_firmware *meson_sm_get(struct device_node *sm_node)
+{
+ struct platform_device *pdev = of_find_device_by_node(sm_node);
+
+ if (!pdev)
+ return NULL;
+
+ return platform_get_drvdata(pdev);
+}
+EXPORT_SYMBOL_GPL(meson_sm_get);
+
+#define SM_CHIP_ID_LENGTH 119
+#define SM_CHIP_ID_OFFSET 4
+#define SM_CHIP_ID_SIZE 12
+
+static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct meson_sm_firmware *fw;
+ uint8_t *id_buf;
+ int ret;
+
+ fw = platform_get_drvdata(pdev);
+
+ id_buf = kmalloc(SM_CHIP_ID_LENGTH, GFP_KERNEL);
+ if (!id_buf)
+ return -ENOMEM;
+
+ ret = meson_sm_call_read(fw, id_buf, SM_CHIP_ID_LENGTH, SM_GET_CHIP_ID,
+ 0, 0, 0, 0, 0);
+ if (ret < 0) {
+ kfree(id_buf);
+ return ret;
+ }
+
+ ret = sprintf(buf, "%12phN\n", &id_buf[SM_CHIP_ID_OFFSET]);
+
+ kfree(id_buf);
+
+ return ret;
+}
+
+static DEVICE_ATTR_RO(serial);
+
+static struct attribute *meson_sm_sysfs_attributes[] = {
+ &dev_attr_serial.attr,
+ NULL,
+};
+
+static const struct attribute_group meson_sm_sysfs_attr_group = {
+ .attrs = meson_sm_sysfs_attributes,
+};
+
+static const struct of_device_id meson_sm_ids[] = {
+ { .compatible = "amlogic,meson-gxbb-sm", .data = &gxbb_chip },
+ { /* sentinel */ },
+};
+
+static int __init meson_sm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct meson_sm_chip *chip;
+ struct meson_sm_firmware *fw;
+
+ fw = devm_kzalloc(dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
+
+ chip = of_match_device(meson_sm_ids, dev)->data;
+ if (!chip)
+ return -EINVAL;
+
+ if (chip->cmd_shmem_in_base) {
+ fw->sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base,
+ chip->shmem_size);
+ if (WARN_ON(!fw->sm_shmem_in_base))
+ goto out;
+ }
+
+ if (chip->cmd_shmem_out_base) {
+ fw->sm_shmem_out_base = meson_sm_map_shmem(chip->cmd_shmem_out_base,
+ chip->shmem_size);
+ if (WARN_ON(!fw->sm_shmem_out_base))
+ goto out_in_base;
+ }
+
+ fw->chip = chip;
+
+ platform_set_drvdata(pdev, fw);
+
+ if (devm_of_platform_populate(dev))
+ goto out_in_base;
+
+ if (sysfs_create_group(&pdev->dev.kobj, &meson_sm_sysfs_attr_group))
+ goto out_in_base;
+
+ pr_info("secure-monitor enabled\n");
+
+ return 0;
+
+out_in_base:
+ iounmap(fw->sm_shmem_in_base);
+out:
+ return -EINVAL;
+}
+
+static struct platform_driver meson_sm_driver = {
+ .driver = {
+ .name = "meson-sm",
+ .of_match_table = of_match_ptr(meson_sm_ids),
+ },
+};
+module_platform_driver_probe(meson_sm_driver, meson_sm_probe);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/mtk-adsp-ipc.c b/drivers/firmware/mtk-adsp-ipc.c
new file mode 100644
index 0000000000..85e94ddc72
--- /dev/null
+++ b/drivers/firmware/mtk-adsp-ipc.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 MediaTek Corporation. All rights reserved.
+ * Author: Allen-KH Cheng <allen-kh.cheng@mediatek.com>
+ */
+
+#include <linux/firmware/mediatek/mtk-adsp-ipc.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+static const char * const adsp_mbox_ch_names[MTK_ADSP_MBOX_NUM] = { "rx", "tx" };
+
+/*
+ * mtk_adsp_ipc_send - send ipc cmd to MTK ADSP
+ *
+ * @ipc: ADSP IPC handle
+ * @idx: index of the mailbox channel
+ * @msg: IPC cmd (reply or request)
+ *
+ * Returns zero for success from mbox_send_message
+ * negative value for error
+ */
+int mtk_adsp_ipc_send(struct mtk_adsp_ipc *ipc, unsigned int idx, uint32_t msg)
+{
+ struct mtk_adsp_chan *adsp_chan;
+ int ret;
+
+ if (idx >= MTK_ADSP_MBOX_NUM)
+ return -EINVAL;
+
+ adsp_chan = &ipc->chans[idx];
+ ret = mbox_send_message(adsp_chan->ch, &msg);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_adsp_ipc_send);
+
+/*
+ * mtk_adsp_ipc_recv - recv callback used by MTK ADSP mailbox
+ *
+ * @c: mbox client
+ * @msg: message received
+ *
+ * Users of ADSP IPC will need to privde handle_reply and handle_request
+ * callbacks.
+ */
+static void mtk_adsp_ipc_recv(struct mbox_client *c, void *msg)
+{
+ struct mtk_adsp_chan *chan = container_of(c, struct mtk_adsp_chan, cl);
+ struct device *dev = c->dev;
+
+ switch (chan->idx) {
+ case MTK_ADSP_MBOX_REPLY:
+ chan->ipc->ops->handle_reply(chan->ipc);
+ break;
+ case MTK_ADSP_MBOX_REQUEST:
+ chan->ipc->ops->handle_request(chan->ipc);
+ break;
+ default:
+ dev_err(dev, "wrong mbox chan %d\n", chan->idx);
+ break;
+ }
+}
+
+static int mtk_adsp_ipc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_adsp_ipc *adsp_ipc;
+ struct mtk_adsp_chan *adsp_chan;
+ struct mbox_client *cl;
+ int ret;
+ int i, j;
+
+ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
+ adsp_ipc = devm_kzalloc(dev, sizeof(*adsp_ipc), GFP_KERNEL);
+ if (!adsp_ipc)
+ return -ENOMEM;
+
+ for (i = 0; i < MTK_ADSP_MBOX_NUM; i++) {
+ adsp_chan = &adsp_ipc->chans[i];
+ cl = &adsp_chan->cl;
+ cl->dev = dev->parent;
+ cl->tx_block = false;
+ cl->knows_txdone = false;
+ cl->tx_prepare = NULL;
+ cl->rx_callback = mtk_adsp_ipc_recv;
+
+ adsp_chan->ipc = adsp_ipc;
+ adsp_chan->idx = i;
+ adsp_chan->ch = mbox_request_channel_byname(cl, adsp_mbox_ch_names[i]);
+ if (IS_ERR(adsp_chan->ch)) {
+ ret = PTR_ERR(adsp_chan->ch);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request mbox chan %s ret %d\n",
+ adsp_mbox_ch_names[i], ret);
+
+ for (j = 0; j < i; j++) {
+ adsp_chan = &adsp_ipc->chans[j];
+ mbox_free_channel(adsp_chan->ch);
+ }
+
+ return ret;
+ }
+ }
+
+ adsp_ipc->dev = dev;
+ dev_set_drvdata(dev, adsp_ipc);
+ dev_dbg(dev, "MTK ADSP IPC initialized\n");
+
+ return 0;
+}
+
+static int mtk_adsp_ipc_remove(struct platform_device *pdev)
+{
+ struct mtk_adsp_ipc *adsp_ipc = dev_get_drvdata(&pdev->dev);
+ struct mtk_adsp_chan *adsp_chan;
+ int i;
+
+ for (i = 0; i < MTK_ADSP_MBOX_NUM; i++) {
+ adsp_chan = &adsp_ipc->chans[i];
+ mbox_free_channel(adsp_chan->ch);
+ }
+
+ return 0;
+}
+
+static struct platform_driver mtk_adsp_ipc_driver = {
+ .driver = {
+ .name = "mtk-adsp-ipc",
+ },
+ .probe = mtk_adsp_ipc_probe,
+ .remove = mtk_adsp_ipc_remove,
+};
+builtin_platform_driver(mtk_adsp_ipc_driver);
+
+MODULE_AUTHOR("Allen-KH Cheng <allen-kh.cheng@mediatek.com>");
+MODULE_DESCRIPTION("MTK ADSP IPC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
new file mode 100644
index 0000000000..715a45442d
--- /dev/null
+++ b/drivers/firmware/pcdp.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Parse the EFI PCDP table to locate the console device.
+ *
+ * (c) Copyright 2002, 2003, 2004 Hewlett-Packard Development Company, L.P.
+ * Khalid Aziz <khalid.aziz@hp.com>
+ * Alex Williamson <alex.williamson@hp.com>
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/console.h>
+#include <linux/efi.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <asm/vga.h>
+#include "pcdp.h"
+
+static int __init
+setup_serial_console(struct pcdp_uart *uart)
+{
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ int mmio;
+ static char options[64], *p = options;
+ char parity;
+
+ mmio = (uart->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY);
+ p += sprintf(p, "uart8250,%s,0x%llx",
+ mmio ? "mmio" : "io", uart->addr.address);
+ if (uart->baud) {
+ p += sprintf(p, ",%llu", uart->baud);
+ if (uart->bits) {
+ switch (uart->parity) {
+ case 0x2: parity = 'e'; break;
+ case 0x3: parity = 'o'; break;
+ default: parity = 'n';
+ }
+ p += sprintf(p, "%c%d", parity, uart->bits);
+ }
+ }
+
+ add_preferred_console("uart", 8250, &options[9]);
+ return setup_earlycon(options);
+#else
+ return -ENODEV;
+#endif
+}
+
+static int __init
+setup_vga_console(struct pcdp_device *dev)
+{
+#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
+ u8 *if_ptr;
+
+ if_ptr = ((u8 *)dev + sizeof(struct pcdp_device));
+ if (if_ptr[0] == PCDP_IF_PCI) {
+ struct pcdp_if_pci if_pci;
+
+ /* struct copy since ifptr might not be correctly aligned */
+
+ memcpy(&if_pci, if_ptr, sizeof(if_pci));
+
+ if (if_pci.trans & PCDP_PCI_TRANS_IOPORT)
+ vga_console_iobase = if_pci.ioport_tra;
+
+ if (if_pci.trans & PCDP_PCI_TRANS_MMIO)
+ vga_console_membase = if_pci.mmio_tra;
+ }
+
+ if (efi_mem_type(vga_console_membase + 0xA0000) == EFI_CONVENTIONAL_MEMORY) {
+ printk(KERN_ERR "PCDP: VGA selected, but frame buffer is not MMIO!\n");
+ return -ENODEV;
+ }
+
+ conswitchp = &vga_con;
+ printk(KERN_INFO "PCDP: VGA console\n");
+ return 0;
+#else
+ return -ENODEV;
+#endif
+}
+
+extern unsigned long hcdp_phys;
+
+int __init
+efi_setup_pcdp_console(char *cmdline)
+{
+ struct pcdp *pcdp;
+ struct pcdp_uart *uart;
+ struct pcdp_device *dev, *end;
+ int i, serial = 0;
+ int rc = -ENODEV;
+
+ if (hcdp_phys == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+ pcdp = early_memremap(hcdp_phys, 4096);
+ printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, hcdp_phys);
+
+ if (strstr(cmdline, "console=hcdp")) {
+ if (pcdp->rev < 3)
+ serial = 1;
+ } else if (strstr(cmdline, "console=")) {
+ printk(KERN_INFO "Explicit \"console=\"; ignoring PCDP\n");
+ goto out;
+ }
+
+ if (pcdp->rev < 3 && efi_uart_console_only())
+ serial = 1;
+
+ for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) {
+ if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) {
+ if (uart->type == PCDP_CONSOLE_UART) {
+ rc = setup_serial_console(uart);
+ goto out;
+ }
+ }
+ }
+
+ end = (struct pcdp_device *) ((u8 *) pcdp + pcdp->length);
+ for (dev = (struct pcdp_device *) (pcdp->uart + pcdp->num_uarts);
+ dev < end;
+ dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) {
+ if (dev->flags & PCDP_PRIMARY_CONSOLE) {
+ if (dev->type == PCDP_CONSOLE_VGA) {
+ rc = setup_vga_console(dev);
+ goto out;
+ }
+ }
+ }
+
+out:
+ early_memunmap(pcdp, 4096);
+ return rc;
+}
diff --git a/drivers/firmware/pcdp.h b/drivers/firmware/pcdp.h
new file mode 100644
index 0000000000..e02540571c
--- /dev/null
+++ b/drivers/firmware/pcdp.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Definitions for PCDP-defined console devices
+ *
+ * For DIG64_HCDPv10a_01.pdf and DIG64_PCDPv20.pdf (v1.0a and v2.0 resp.),
+ * please see <http://www.dig64.org/specifications/>
+ *
+ * (c) Copyright 2002, 2004 Hewlett-Packard Development Company, L.P.
+ * Khalid Aziz <khalid.aziz@hp.com>
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ */
+
+#define PCDP_CONSOLE 0
+#define PCDP_DEBUG 1
+#define PCDP_CONSOLE_OUTPUT 2
+#define PCDP_CONSOLE_INPUT 3
+
+#define PCDP_UART (0 << 3)
+#define PCDP_VGA (1 << 3)
+#define PCDP_USB (2 << 3)
+
+/* pcdp_uart.type and pcdp_device.type */
+#define PCDP_CONSOLE_UART (PCDP_UART | PCDP_CONSOLE)
+#define PCDP_DEBUG_UART (PCDP_UART | PCDP_DEBUG)
+#define PCDP_CONSOLE_VGA (PCDP_VGA | PCDP_CONSOLE_OUTPUT)
+#define PCDP_CONSOLE_USB (PCDP_USB | PCDP_CONSOLE_INPUT)
+
+/* pcdp_uart.flags */
+#define PCDP_UART_EDGE_SENSITIVE (1 << 0)
+#define PCDP_UART_ACTIVE_LOW (1 << 1)
+#define PCDP_UART_PRIMARY_CONSOLE (1 << 2)
+#define PCDP_UART_IRQ (1 << 6) /* in pci_func for rev < 3 */
+#define PCDP_UART_PCI (1 << 7) /* in pci_func for rev < 3 */
+
+struct pcdp_uart {
+ u8 type;
+ u8 bits;
+ u8 parity;
+ u8 stop_bits;
+ u8 pci_seg;
+ u8 pci_bus;
+ u8 pci_dev;
+ u8 pci_func;
+ u64 baud;
+ struct acpi_generic_address addr;
+ u16 pci_dev_id;
+ u16 pci_vendor_id;
+ u32 gsi;
+ u32 clock_rate;
+ u8 pci_prog_intfc;
+ u8 flags;
+ u16 conout_index;
+ u32 reserved;
+} __attribute__((packed));
+
+#define PCDP_IF_PCI 1
+
+/* pcdp_if_pci.trans */
+#define PCDP_PCI_TRANS_IOPORT 0x02
+#define PCDP_PCI_TRANS_MMIO 0x01
+
+struct pcdp_if_pci {
+ u8 interconnect;
+ u8 reserved;
+ u16 length;
+ u8 segment;
+ u8 bus;
+ u8 dev;
+ u8 fun;
+ u16 dev_id;
+ u16 vendor_id;
+ u32 acpi_interrupt;
+ u64 mmio_tra;
+ u64 ioport_tra;
+ u8 flags;
+ u8 trans;
+} __attribute__((packed));
+
+struct pcdp_vga {
+ u8 count; /* address space descriptors */
+} __attribute__((packed));
+
+/* pcdp_device.flags */
+#define PCDP_PRIMARY_CONSOLE 1
+
+struct pcdp_device {
+ u8 type;
+ u8 flags;
+ u16 length;
+ u16 efi_index;
+ /* next data is pcdp_if_pci or pcdp_if_acpi (not yet supported) */
+ /* next data is device specific type (currently only pcdp_vga) */
+} __attribute__((packed));
+
+struct pcdp {
+ u8 signature[4];
+ u32 length;
+ u8 rev; /* PCDP v2.0 is rev 3 */
+ u8 chksum;
+ u8 oemid[6];
+ u8 oem_tabid[8];
+ u32 oem_rev;
+ u8 creator_id[4];
+ u32 creator_rev;
+ u32 num_uarts;
+ struct pcdp_uart uart[]; /* actual size is num_uarts */
+ /* remainder of table is pcdp_device structures */
+} __attribute__((packed));
diff --git a/drivers/firmware/psci/Kconfig b/drivers/firmware/psci/Kconfig
new file mode 100644
index 0000000000..97944168b5
--- /dev/null
+++ b/drivers/firmware/psci/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config ARM_PSCI_FW
+ bool
+
+config ARM_PSCI_CHECKER
+ bool "ARM PSCI checker"
+ depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST
+ help
+ Run the PSCI checker during startup. This checks that hotplug and
+ suspend operations work correctly when using PSCI.
+
+ The torture tests may interfere with the PSCI checker by turning CPUs
+ on and off through hotplug, so for now torture tests and PSCI checker
+ are mutually exclusive.
diff --git a/drivers/firmware/psci/Makefile b/drivers/firmware/psci/Makefile
new file mode 100644
index 0000000000..1956b88247
--- /dev/null
+++ b/drivers/firmware/psci/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+obj-$(CONFIG_ARM_PSCI_FW) += psci.o
+obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
new file mode 100644
index 0000000000..d9629ff878
--- /dev/null
+++ b/drivers/firmware/psci/psci.c
@@ -0,0 +1,791 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+
+#define pr_fmt(fmt) "psci: " fmt
+
+#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
+#include <linux/cpuidle.h>
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/printk.h>
+#include <linux/psci.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+
+#include <uapi/linux/psci.h>
+
+#include <asm/cpuidle.h>
+#include <asm/cputype.h>
+#include <asm/hypervisor.h>
+#include <asm/system_misc.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+
+/*
+ * While a 64-bit OS can make calls with SMC32 calling conventions, for some
+ * calls it is necessary to use SMC64 to pass or return 64-bit values.
+ * For such calls PSCI_FN_NATIVE(version, name) will choose the appropriate
+ * (native-width) function ID.
+ */
+#ifdef CONFIG_64BIT
+#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN64_##name
+#else
+#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN_##name
+#endif
+
+/*
+ * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
+ * calls to its resident CPU, so we must avoid issuing those. We never migrate
+ * a Trusted OS even if it claims to be capable of migration -- doing so will
+ * require cooperation with a Trusted OS driver.
+ */
+static int resident_cpu = -1;
+struct psci_operations psci_ops;
+static enum arm_smccc_conduit psci_conduit = SMCCC_CONDUIT_NONE;
+
+bool psci_tos_resident_on(int cpu)
+{
+ return cpu == resident_cpu;
+}
+
+typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+ unsigned long, unsigned long);
+static psci_fn *invoke_psci_fn;
+
+static struct psci_0_1_function_ids psci_0_1_function_ids;
+
+struct psci_0_1_function_ids get_psci_0_1_function_ids(void)
+{
+ return psci_0_1_function_ids;
+}
+
+#define PSCI_0_2_POWER_STATE_MASK \
+ (PSCI_0_2_POWER_STATE_ID_MASK | \
+ PSCI_0_2_POWER_STATE_TYPE_MASK | \
+ PSCI_0_2_POWER_STATE_AFFL_MASK)
+
+#define PSCI_1_0_EXT_POWER_STATE_MASK \
+ (PSCI_1_0_EXT_POWER_STATE_ID_MASK | \
+ PSCI_1_0_EXT_POWER_STATE_TYPE_MASK)
+
+static u32 psci_cpu_suspend_feature;
+static bool psci_system_reset2_supported;
+
+static inline bool psci_has_ext_power_state(void)
+{
+ return psci_cpu_suspend_feature &
+ PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK;
+}
+
+bool psci_has_osi_support(void)
+{
+ return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED;
+}
+
+static inline bool psci_power_state_loses_context(u32 state)
+{
+ const u32 mask = psci_has_ext_power_state() ?
+ PSCI_1_0_EXT_POWER_STATE_TYPE_MASK :
+ PSCI_0_2_POWER_STATE_TYPE_MASK;
+
+ return state & mask;
+}
+
+bool psci_power_state_is_valid(u32 state)
+{
+ const u32 valid_mask = psci_has_ext_power_state() ?
+ PSCI_1_0_EXT_POWER_STATE_MASK :
+ PSCI_0_2_POWER_STATE_MASK;
+
+ return !(state & ~valid_mask);
+}
+
+static __always_inline unsigned long
+__invoke_psci_fn_hvc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
+ return res.a0;
+}
+
+static __always_inline unsigned long
+__invoke_psci_fn_smc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
+ return res.a0;
+}
+
+static __always_inline int psci_to_linux_errno(int errno)
+{
+ switch (errno) {
+ case PSCI_RET_SUCCESS:
+ return 0;
+ case PSCI_RET_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case PSCI_RET_INVALID_PARAMS:
+ case PSCI_RET_INVALID_ADDRESS:
+ return -EINVAL;
+ case PSCI_RET_DENIED:
+ return -EPERM;
+ }
+
+ return -EINVAL;
+}
+
+static u32 psci_0_1_get_version(void)
+{
+ return PSCI_VERSION(0, 1);
+}
+
+static u32 psci_0_2_get_version(void)
+{
+ return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
+}
+
+int psci_set_osi_mode(bool enable)
+{
+ unsigned long suspend_mode;
+ int err;
+
+ suspend_mode = enable ? PSCI_1_0_SUSPEND_MODE_OSI :
+ PSCI_1_0_SUSPEND_MODE_PC;
+
+ err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE, suspend_mode, 0, 0);
+ if (err < 0)
+ pr_info(FW_BUG "failed to set %s mode: %d\n",
+ enable ? "OSI" : "PC", err);
+ return psci_to_linux_errno(err);
+}
+
+static __always_inline int
+__psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
+{
+ int err;
+
+ err = invoke_psci_fn(fn, state, entry_point, 0);
+ return psci_to_linux_errno(err);
+}
+
+static __always_inline int
+psci_0_1_cpu_suspend(u32 state, unsigned long entry_point)
+{
+ return __psci_cpu_suspend(psci_0_1_function_ids.cpu_suspend,
+ state, entry_point);
+}
+
+static __always_inline int
+psci_0_2_cpu_suspend(u32 state, unsigned long entry_point)
+{
+ return __psci_cpu_suspend(PSCI_FN_NATIVE(0_2, CPU_SUSPEND),
+ state, entry_point);
+}
+
+static int __psci_cpu_off(u32 fn, u32 state)
+{
+ int err;
+
+ err = invoke_psci_fn(fn, state, 0, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_0_1_cpu_off(u32 state)
+{
+ return __psci_cpu_off(psci_0_1_function_ids.cpu_off, state);
+}
+
+static int psci_0_2_cpu_off(u32 state)
+{
+ return __psci_cpu_off(PSCI_0_2_FN_CPU_OFF, state);
+}
+
+static int __psci_cpu_on(u32 fn, unsigned long cpuid, unsigned long entry_point)
+{
+ int err;
+
+ err = invoke_psci_fn(fn, cpuid, entry_point, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_0_1_cpu_on(unsigned long cpuid, unsigned long entry_point)
+{
+ return __psci_cpu_on(psci_0_1_function_ids.cpu_on, cpuid, entry_point);
+}
+
+static int psci_0_2_cpu_on(unsigned long cpuid, unsigned long entry_point)
+{
+ return __psci_cpu_on(PSCI_FN_NATIVE(0_2, CPU_ON), cpuid, entry_point);
+}
+
+static int __psci_migrate(u32 fn, unsigned long cpuid)
+{
+ int err;
+
+ err = invoke_psci_fn(fn, cpuid, 0, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_0_1_migrate(unsigned long cpuid)
+{
+ return __psci_migrate(psci_0_1_function_ids.migrate, cpuid);
+}
+
+static int psci_0_2_migrate(unsigned long cpuid)
+{
+ return __psci_migrate(PSCI_FN_NATIVE(0_2, MIGRATE), cpuid);
+}
+
+static int psci_affinity_info(unsigned long target_affinity,
+ unsigned long lowest_affinity_level)
+{
+ return invoke_psci_fn(PSCI_FN_NATIVE(0_2, AFFINITY_INFO),
+ target_affinity, lowest_affinity_level, 0);
+}
+
+static int psci_migrate_info_type(void)
+{
+ return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
+}
+
+static unsigned long psci_migrate_info_up_cpu(void)
+{
+ return invoke_psci_fn(PSCI_FN_NATIVE(0_2, MIGRATE_INFO_UP_CPU),
+ 0, 0, 0);
+}
+
+static void set_conduit(enum arm_smccc_conduit conduit)
+{
+ switch (conduit) {
+ case SMCCC_CONDUIT_HVC:
+ invoke_psci_fn = __invoke_psci_fn_hvc;
+ break;
+ case SMCCC_CONDUIT_SMC:
+ invoke_psci_fn = __invoke_psci_fn_smc;
+ break;
+ default:
+ WARN(1, "Unexpected PSCI conduit %d\n", conduit);
+ }
+
+ psci_conduit = conduit;
+}
+
+static int get_set_conduit_method(const struct device_node *np)
+{
+ const char *method;
+
+ pr_info("probing for conduit method from DT.\n");
+
+ if (of_property_read_string(np, "method", &method)) {
+ pr_warn("missing \"method\" property\n");
+ return -ENXIO;
+ }
+
+ if (!strcmp("hvc", method)) {
+ set_conduit(SMCCC_CONDUIT_HVC);
+ } else if (!strcmp("smc", method)) {
+ set_conduit(SMCCC_CONDUIT_SMC);
+ } else {
+ pr_warn("invalid \"method\" property: %s\n", method);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int psci_sys_reset(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ if ((reboot_mode == REBOOT_WARM || reboot_mode == REBOOT_SOFT) &&
+ psci_system_reset2_supported) {
+ /*
+ * reset_type[31] = 0 (architectural)
+ * reset_type[30:0] = 0 (SYSTEM_WARM_RESET)
+ * cookie = 0 (ignored by the implementation)
+ */
+ invoke_psci_fn(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2), 0, 0, 0);
+ } else {
+ invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block psci_sys_reset_nb = {
+ .notifier_call = psci_sys_reset,
+ .priority = 129,
+};
+
+static void psci_sys_poweroff(void)
+{
+ invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
+}
+
+static int psci_features(u32 psci_func_id)
+{
+ return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES,
+ psci_func_id, 0, 0);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define PSCI_ID(ver, _name) \
+ { .fn = PSCI_##ver##_FN_##_name, .name = #_name, }
+#define PSCI_ID_NATIVE(ver, _name) \
+ { .fn = PSCI_FN_NATIVE(ver, _name), .name = #_name, }
+
+/* A table of all optional functions */
+static const struct {
+ u32 fn;
+ const char *name;
+} psci_fn_ids[] = {
+ PSCI_ID_NATIVE(0_2, MIGRATE),
+ PSCI_ID(0_2, MIGRATE_INFO_TYPE),
+ PSCI_ID_NATIVE(0_2, MIGRATE_INFO_UP_CPU),
+ PSCI_ID(1_0, CPU_FREEZE),
+ PSCI_ID_NATIVE(1_0, CPU_DEFAULT_SUSPEND),
+ PSCI_ID_NATIVE(1_0, NODE_HW_STATE),
+ PSCI_ID_NATIVE(1_0, SYSTEM_SUSPEND),
+ PSCI_ID(1_0, SET_SUSPEND_MODE),
+ PSCI_ID_NATIVE(1_0, STAT_RESIDENCY),
+ PSCI_ID_NATIVE(1_0, STAT_COUNT),
+ PSCI_ID_NATIVE(1_1, SYSTEM_RESET2),
+ PSCI_ID(1_1, MEM_PROTECT),
+ PSCI_ID_NATIVE(1_1, MEM_PROTECT_CHECK_RANGE),
+};
+
+static int psci_debugfs_read(struct seq_file *s, void *data)
+{
+ int feature, type, i;
+ u32 ver;
+
+ ver = psci_ops.get_version();
+ seq_printf(s, "PSCIv%d.%d\n",
+ PSCI_VERSION_MAJOR(ver),
+ PSCI_VERSION_MINOR(ver));
+
+ /* PSCI_FEATURES is available only starting from 1.0 */
+ if (PSCI_VERSION_MAJOR(ver) < 1)
+ return 0;
+
+ feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
+ if (feature != PSCI_RET_NOT_SUPPORTED) {
+ ver = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
+ seq_printf(s, "SMC Calling Convention v%d.%d\n",
+ PSCI_VERSION_MAJOR(ver),
+ PSCI_VERSION_MINOR(ver));
+ } else {
+ seq_puts(s, "SMC Calling Convention v1.0 is assumed\n");
+ }
+
+ feature = psci_features(PSCI_FN_NATIVE(0_2, CPU_SUSPEND));
+ if (feature < 0) {
+ seq_printf(s, "PSCI_FEATURES(CPU_SUSPEND) error (%d)\n", feature);
+ } else {
+ seq_printf(s, "OSI is %ssupported\n",
+ (feature & BIT(0)) ? "" : "not ");
+ seq_printf(s, "%s StateID format is used\n",
+ (feature & BIT(1)) ? "Extended" : "Original");
+ }
+
+ type = psci_ops.migrate_info_type();
+ if (type == PSCI_0_2_TOS_UP_MIGRATE ||
+ type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
+ unsigned long cpuid;
+
+ seq_printf(s, "Trusted OS %smigrate capable\n",
+ type == PSCI_0_2_TOS_UP_NO_MIGRATE ? "not " : "");
+ cpuid = psci_migrate_info_up_cpu();
+ seq_printf(s, "Trusted OS resident on physical CPU 0x%lx (#%d)\n",
+ cpuid, resident_cpu);
+ } else if (type == PSCI_0_2_TOS_MP) {
+ seq_puts(s, "Trusted OS migration not required\n");
+ } else {
+ if (type != PSCI_RET_NOT_SUPPORTED)
+ seq_printf(s, "MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(psci_fn_ids); i++) {
+ feature = psci_features(psci_fn_ids[i].fn);
+ if (feature == PSCI_RET_NOT_SUPPORTED)
+ continue;
+ if (feature < 0)
+ seq_printf(s, "PSCI_FEATURES(%s) error (%d)\n",
+ psci_fn_ids[i].name, feature);
+ else
+ seq_printf(s, "%s is supported\n", psci_fn_ids[i].name);
+ }
+
+ return 0;
+}
+
+static int psci_debugfs_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, psci_debugfs_read, NULL);
+}
+
+static const struct file_operations psci_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = psci_debugfs_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek
+};
+
+static int __init psci_debugfs_init(void)
+{
+ if (!invoke_psci_fn || !psci_ops.get_version)
+ return 0;
+
+ return PTR_ERR_OR_ZERO(debugfs_create_file("psci", 0444, NULL, NULL,
+ &psci_debugfs_ops));
+}
+late_initcall(psci_debugfs_init)
+#endif
+
+#ifdef CONFIG_CPU_IDLE
+static noinstr int psci_suspend_finisher(unsigned long state)
+{
+ u32 power_state = state;
+ phys_addr_t pa_cpu_resume;
+
+ pa_cpu_resume = __pa_symbol_nodebug((unsigned long)cpu_resume);
+
+ return psci_ops.cpu_suspend(power_state, pa_cpu_resume);
+}
+
+int psci_cpu_suspend_enter(u32 state)
+{
+ int ret;
+
+ if (!psci_power_state_loses_context(state)) {
+ struct arm_cpuidle_irq_context context;
+
+ ct_cpuidle_enter();
+ arm_cpuidle_save_irq_context(&context);
+ ret = psci_ops.cpu_suspend(state, 0);
+ arm_cpuidle_restore_irq_context(&context);
+ ct_cpuidle_exit();
+ } else {
+ /*
+ * ARM64 cpu_suspend() wants to do ct_cpuidle_*() itself.
+ */
+ if (!IS_ENABLED(CONFIG_ARM64))
+ ct_cpuidle_enter();
+
+ ret = cpu_suspend(state, psci_suspend_finisher);
+
+ if (!IS_ENABLED(CONFIG_ARM64))
+ ct_cpuidle_exit();
+ }
+
+ return ret;
+}
+#endif
+
+static int psci_system_suspend(unsigned long unused)
+{
+ phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
+
+ return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
+ pa_cpu_resume, 0, 0);
+}
+
+static int psci_system_suspend_enter(suspend_state_t state)
+{
+ return cpu_suspend(0, psci_system_suspend);
+}
+
+static const struct platform_suspend_ops psci_suspend_ops = {
+ .valid = suspend_valid_only_mem,
+ .enter = psci_system_suspend_enter,
+};
+
+static void __init psci_init_system_reset2(void)
+{
+ int ret;
+
+ ret = psci_features(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2));
+
+ if (ret != PSCI_RET_NOT_SUPPORTED)
+ psci_system_reset2_supported = true;
+}
+
+static void __init psci_init_system_suspend(void)
+{
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_SUSPEND))
+ return;
+
+ ret = psci_features(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND));
+
+ if (ret != PSCI_RET_NOT_SUPPORTED)
+ suspend_set_ops(&psci_suspend_ops);
+}
+
+static void __init psci_init_cpu_suspend(void)
+{
+ int feature = psci_features(PSCI_FN_NATIVE(0_2, CPU_SUSPEND));
+
+ if (feature != PSCI_RET_NOT_SUPPORTED)
+ psci_cpu_suspend_feature = feature;
+}
+
+/*
+ * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
+ * return DENIED (which would be fatal).
+ */
+static void __init psci_init_migrate(void)
+{
+ unsigned long cpuid;
+ int type, cpu = -1;
+
+ type = psci_ops.migrate_info_type();
+
+ if (type == PSCI_0_2_TOS_MP) {
+ pr_info("Trusted OS migration not required\n");
+ return;
+ }
+
+ if (type == PSCI_RET_NOT_SUPPORTED) {
+ pr_info("MIGRATE_INFO_TYPE not supported.\n");
+ return;
+ }
+
+ if (type != PSCI_0_2_TOS_UP_MIGRATE &&
+ type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
+ pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
+ return;
+ }
+
+ cpuid = psci_migrate_info_up_cpu();
+ if (cpuid & ~MPIDR_HWID_BITMASK) {
+ pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
+ cpuid);
+ return;
+ }
+
+ cpu = get_logical_index(cpuid);
+ resident_cpu = cpu >= 0 ? cpu : -1;
+
+ pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
+}
+
+static void __init psci_init_smccc(void)
+{
+ u32 ver = ARM_SMCCC_VERSION_1_0;
+ int feature;
+
+ feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
+
+ if (feature != PSCI_RET_NOT_SUPPORTED) {
+ u32 ret;
+ ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
+ if (ret >= ARM_SMCCC_VERSION_1_1) {
+ arm_smccc_version_init(ret, psci_conduit);
+ ver = ret;
+ }
+ }
+
+ /*
+ * Conveniently, the SMCCC and PSCI versions are encoded the
+ * same way. No, this isn't accidental.
+ */
+ pr_info("SMC Calling Convention v%d.%d\n",
+ PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
+
+}
+
+static void __init psci_0_2_set_functions(void)
+{
+ pr_info("Using standard PSCI v0.2 function IDs\n");
+
+ psci_ops = (struct psci_operations){
+ .get_version = psci_0_2_get_version,
+ .cpu_suspend = psci_0_2_cpu_suspend,
+ .cpu_off = psci_0_2_cpu_off,
+ .cpu_on = psci_0_2_cpu_on,
+ .migrate = psci_0_2_migrate,
+ .affinity_info = psci_affinity_info,
+ .migrate_info_type = psci_migrate_info_type,
+ };
+
+ register_restart_handler(&psci_sys_reset_nb);
+
+ pm_power_off = psci_sys_poweroff;
+}
+
+/*
+ * Probe function for PSCI firmware versions >= 0.2
+ */
+static int __init psci_probe(void)
+{
+ u32 ver = psci_0_2_get_version();
+
+ pr_info("PSCIv%d.%d detected in firmware.\n",
+ PSCI_VERSION_MAJOR(ver),
+ PSCI_VERSION_MINOR(ver));
+
+ if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
+ pr_err("Conflicting PSCI version detected.\n");
+ return -EINVAL;
+ }
+
+ psci_0_2_set_functions();
+
+ psci_init_migrate();
+
+ if (PSCI_VERSION_MAJOR(ver) >= 1) {
+ psci_init_smccc();
+ psci_init_cpu_suspend();
+ psci_init_system_suspend();
+ psci_init_system_reset2();
+ kvm_init_hyp_services();
+ }
+
+ return 0;
+}
+
+typedef int (*psci_initcall_t)(const struct device_node *);
+
+/*
+ * PSCI init function for PSCI versions >=0.2
+ *
+ * Probe based on PSCI PSCI_VERSION function
+ */
+static int __init psci_0_2_init(const struct device_node *np)
+{
+ int err;
+
+ err = get_set_conduit_method(np);
+ if (err)
+ return err;
+
+ /*
+ * Starting with v0.2, the PSCI specification introduced a call
+ * (PSCI_VERSION) that allows probing the firmware version, so
+ * that PSCI function IDs and version specific initialization
+ * can be carried out according to the specific version reported
+ * by firmware
+ */
+ return psci_probe();
+}
+
+/*
+ * PSCI < v0.2 get PSCI Function IDs via DT.
+ */
+static int __init psci_0_1_init(const struct device_node *np)
+{
+ u32 id;
+ int err;
+
+ err = get_set_conduit_method(np);
+ if (err)
+ return err;
+
+ pr_info("Using PSCI v0.1 Function IDs from DT\n");
+
+ psci_ops.get_version = psci_0_1_get_version;
+
+ if (!of_property_read_u32(np, "cpu_suspend", &id)) {
+ psci_0_1_function_ids.cpu_suspend = id;
+ psci_ops.cpu_suspend = psci_0_1_cpu_suspend;
+ }
+
+ if (!of_property_read_u32(np, "cpu_off", &id)) {
+ psci_0_1_function_ids.cpu_off = id;
+ psci_ops.cpu_off = psci_0_1_cpu_off;
+ }
+
+ if (!of_property_read_u32(np, "cpu_on", &id)) {
+ psci_0_1_function_ids.cpu_on = id;
+ psci_ops.cpu_on = psci_0_1_cpu_on;
+ }
+
+ if (!of_property_read_u32(np, "migrate", &id)) {
+ psci_0_1_function_ids.migrate = id;
+ psci_ops.migrate = psci_0_1_migrate;
+ }
+
+ return 0;
+}
+
+static int __init psci_1_0_init(const struct device_node *np)
+{
+ int err;
+
+ err = psci_0_2_init(np);
+ if (err)
+ return err;
+
+ if (psci_has_osi_support()) {
+ pr_info("OSI mode supported.\n");
+
+ /* Default to PC mode. */
+ psci_set_osi_mode(false);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id psci_of_match[] __initconst = {
+ { .compatible = "arm,psci", .data = psci_0_1_init},
+ { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
+ { .compatible = "arm,psci-1.0", .data = psci_1_0_init},
+ {},
+};
+
+int __init psci_dt_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *matched_np;
+ psci_initcall_t init_fn;
+ int ret;
+
+ np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
+
+ if (!np || !of_device_is_available(np))
+ return -ENODEV;
+
+ init_fn = (psci_initcall_t)matched_np->data;
+ ret = init_fn(np);
+
+ of_node_put(np);
+ return ret;
+}
+
+#ifdef CONFIG_ACPI
+/*
+ * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
+ * explicitly clarified in SBBR
+ */
+int __init psci_acpi_init(void)
+{
+ if (!acpi_psci_present()) {
+ pr_info("is not implemented in ACPI.\n");
+ return -EOPNOTSUPP;
+ }
+
+ pr_info("probing for conduit method from ACPI.\n");
+
+ if (acpi_psci_use_hvc())
+ set_conduit(SMCCC_CONDUIT_HVC);
+ else
+ set_conduit(SMCCC_CONDUIT_SMC);
+
+ return psci_probe();
+}
+#endif
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
new file mode 100644
index 0000000000..116eb465cd
--- /dev/null
+++ b/drivers/firmware/psci/psci_checker.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2016 ARM Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <uapi/linux/sched/types.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/psci.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+#include <linux/topology.h>
+
+#include <asm/cpuidle.h>
+
+#include <uapi/linux/psci.h>
+
+#define NUM_SUSPEND_CYCLE (10)
+
+static unsigned int nb_available_cpus;
+static int tos_resident_cpu = -1;
+
+static atomic_t nb_active_threads;
+static struct completion suspend_threads_started =
+ COMPLETION_INITIALIZER(suspend_threads_started);
+static struct completion suspend_threads_done =
+ COMPLETION_INITIALIZER(suspend_threads_done);
+
+/*
+ * We assume that PSCI operations are used if they are available. This is not
+ * necessarily true on arm64, since the decision is based on the
+ * "enable-method" property of each CPU in the DT, but given that there is no
+ * arch-specific way to check this, we assume that the DT is sensible.
+ */
+static int psci_ops_check(void)
+{
+ int migrate_type = -1;
+ int cpu;
+
+ if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) {
+ pr_warn("Missing PSCI operations, aborting tests\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (psci_ops.migrate_info_type)
+ migrate_type = psci_ops.migrate_info_type();
+
+ if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE ||
+ migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
+ /* There is a UP Trusted OS, find on which core it resides. */
+ for_each_online_cpu(cpu)
+ if (psci_tos_resident_on(cpu)) {
+ tos_resident_cpu = cpu;
+ break;
+ }
+ if (tos_resident_cpu == -1)
+ pr_warn("UP Trusted OS resides on no online CPU\n");
+ }
+
+ return 0;
+}
+
+/*
+ * offlined_cpus is a temporary array but passing it as an argument avoids
+ * multiple allocations.
+ */
+static unsigned int down_and_up_cpus(const struct cpumask *cpus,
+ struct cpumask *offlined_cpus)
+{
+ int cpu;
+ int err = 0;
+
+ cpumask_clear(offlined_cpus);
+
+ /* Try to power down all CPUs in the mask. */
+ for_each_cpu(cpu, cpus) {
+ int ret = remove_cpu(cpu);
+
+ /*
+ * cpu_down() checks the number of online CPUs before the TOS
+ * resident CPU.
+ */
+ if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) {
+ if (ret != -EBUSY) {
+ pr_err("Unexpected return code %d while trying "
+ "to power down last online CPU %d\n",
+ ret, cpu);
+ ++err;
+ }
+ } else if (cpu == tos_resident_cpu) {
+ if (ret != -EPERM) {
+ pr_err("Unexpected return code %d while trying "
+ "to power down TOS resident CPU %d\n",
+ ret, cpu);
+ ++err;
+ }
+ } else if (ret != 0) {
+ pr_err("Error occurred (%d) while trying "
+ "to power down CPU %d\n", ret, cpu);
+ ++err;
+ }
+
+ if (ret == 0)
+ cpumask_set_cpu(cpu, offlined_cpus);
+ }
+
+ /* Try to power up all the CPUs that have been offlined. */
+ for_each_cpu(cpu, offlined_cpus) {
+ int ret = add_cpu(cpu);
+
+ if (ret != 0) {
+ pr_err("Error occurred (%d) while trying "
+ "to power up CPU %d\n", ret, cpu);
+ ++err;
+ } else {
+ cpumask_clear_cpu(cpu, offlined_cpus);
+ }
+ }
+
+ /*
+ * Something went bad at some point and some CPUs could not be turned
+ * back on.
+ */
+ WARN_ON(!cpumask_empty(offlined_cpus) ||
+ num_online_cpus() != nb_available_cpus);
+
+ return err;
+}
+
+static void free_cpu_groups(int num, cpumask_var_t **pcpu_groups)
+{
+ int i;
+ cpumask_var_t *cpu_groups = *pcpu_groups;
+
+ for (i = 0; i < num; ++i)
+ free_cpumask_var(cpu_groups[i]);
+ kfree(cpu_groups);
+}
+
+static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
+{
+ int num_groups = 0;
+ cpumask_var_t tmp, *cpu_groups;
+
+ if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpu_groups = kcalloc(nb_available_cpus, sizeof(*cpu_groups),
+ GFP_KERNEL);
+ if (!cpu_groups) {
+ free_cpumask_var(tmp);
+ return -ENOMEM;
+ }
+
+ cpumask_copy(tmp, cpu_online_mask);
+
+ while (!cpumask_empty(tmp)) {
+ const struct cpumask *cpu_group =
+ topology_core_cpumask(cpumask_any(tmp));
+
+ if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
+ free_cpumask_var(tmp);
+ free_cpu_groups(num_groups, &cpu_groups);
+ return -ENOMEM;
+ }
+ cpumask_copy(cpu_groups[num_groups++], cpu_group);
+ cpumask_andnot(tmp, tmp, cpu_group);
+ }
+
+ free_cpumask_var(tmp);
+ *pcpu_groups = cpu_groups;
+
+ return num_groups;
+}
+
+static int hotplug_tests(void)
+{
+ int i, nb_cpu_group, err = -ENOMEM;
+ cpumask_var_t offlined_cpus, *cpu_groups;
+ char *page_buf;
+
+ if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
+ return err;
+
+ nb_cpu_group = alloc_init_cpu_groups(&cpu_groups);
+ if (nb_cpu_group < 0)
+ goto out_free_cpus;
+ page_buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!page_buf)
+ goto out_free_cpu_groups;
+
+ /*
+ * Of course the last CPU cannot be powered down and cpu_down() should
+ * refuse doing that.
+ */
+ pr_info("Trying to turn off and on again all CPUs\n");
+ err = down_and_up_cpus(cpu_online_mask, offlined_cpus);
+
+ /*
+ * Take down CPUs by cpu group this time. When the last CPU is turned
+ * off, the cpu group itself should shut down.
+ */
+ for (i = 0; i < nb_cpu_group; ++i) {
+ ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
+ cpu_groups[i]);
+ /* Remove trailing newline. */
+ page_buf[len - 1] = '\0';
+ pr_info("Trying to turn off and on again group %d (CPUs %s)\n",
+ i, page_buf);
+ err += down_and_up_cpus(cpu_groups[i], offlined_cpus);
+ }
+
+ free_page((unsigned long)page_buf);
+out_free_cpu_groups:
+ free_cpu_groups(nb_cpu_group, &cpu_groups);
+out_free_cpus:
+ free_cpumask_var(offlined_cpus);
+ return err;
+}
+
+static void dummy_callback(struct timer_list *unused) {}
+
+static int suspend_cpu(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ struct cpuidle_state *state = &drv->states[index];
+ bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
+ int ret;
+
+ arch_cpu_idle_enter();
+
+ if (broadcast) {
+ /*
+ * The local timer will be shut down, we need to enter tick
+ * broadcast.
+ */
+ ret = tick_broadcast_enter();
+ if (ret) {
+ /*
+ * In the absence of hardware broadcast mechanism,
+ * this CPU might be used to broadcast wakeups, which
+ * may be why entering tick broadcast has failed.
+ * There is little the kernel can do to work around
+ * that, so enter WFI instead (idle state 0).
+ */
+ cpu_do_idle();
+ ret = 0;
+ goto out_arch_exit;
+ }
+ }
+
+ ret = state->enter(dev, drv, index);
+
+ if (broadcast)
+ tick_broadcast_exit();
+
+out_arch_exit:
+ arch_cpu_idle_exit();
+
+ return ret;
+}
+
+static int suspend_test_thread(void *arg)
+{
+ int cpu = (long)arg;
+ int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0;
+ struct cpuidle_device *dev;
+ struct cpuidle_driver *drv;
+ /* No need for an actual callback, we just want to wake up the CPU. */
+ struct timer_list wakeup_timer;
+
+ /* Wait for the main thread to give the start signal. */
+ wait_for_completion(&suspend_threads_started);
+
+ /* Set maximum priority to preempt all other threads on this CPU. */
+ sched_set_fifo(current);
+
+ dev = this_cpu_read(cpuidle_devices);
+ drv = cpuidle_get_cpu_driver(dev);
+
+ pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
+ cpu, drv->state_count - 1);
+
+ timer_setup_on_stack(&wakeup_timer, dummy_callback, 0);
+ for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
+ int index;
+ /*
+ * Test all possible states, except 0 (which is usually WFI and
+ * doesn't use PSCI).
+ */
+ for (index = 1; index < drv->state_count; ++index) {
+ int ret;
+ struct cpuidle_state *state = &drv->states[index];
+
+ /*
+ * Set the timer to wake this CPU up in some time (which
+ * should be largely sufficient for entering suspend).
+ * If the local tick is disabled when entering suspend,
+ * suspend_cpu() takes care of switching to a broadcast
+ * tick, so the timer will still wake us up.
+ */
+ mod_timer(&wakeup_timer, jiffies +
+ usecs_to_jiffies(state->target_residency));
+
+ /* IRQs must be disabled during suspend operations. */
+ local_irq_disable();
+
+ ret = suspend_cpu(dev, drv, index);
+
+ /*
+ * We have woken up. Re-enable IRQs to handle any
+ * pending interrupt, do not wait until the end of the
+ * loop.
+ */
+ local_irq_enable();
+
+ if (ret == index) {
+ ++nb_suspend;
+ } else if (ret >= 0) {
+ /* We did not enter the expected state. */
+ ++nb_shallow_sleep;
+ } else {
+ pr_err("Failed to suspend CPU %d: error %d "
+ "(requested state %d, cycle %d)\n",
+ cpu, ret, index, i);
+ ++nb_err;
+ }
+ }
+ }
+
+ /*
+ * Disable the timer to make sure that the timer will not trigger
+ * later.
+ */
+ del_timer(&wakeup_timer);
+ destroy_timer_on_stack(&wakeup_timer);
+
+ if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
+ complete(&suspend_threads_done);
+
+ for (;;) {
+ /* Needs to be set first to avoid missing a wakeup. */
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_park())
+ break;
+ schedule();
+ }
+
+ pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
+ cpu, nb_suspend, nb_shallow_sleep, nb_err);
+
+ kthread_parkme();
+
+ return nb_err;
+}
+
+static int suspend_tests(void)
+{
+ int i, cpu, err = 0;
+ struct task_struct **threads;
+ int nb_threads = 0;
+
+ threads = kmalloc_array(nb_available_cpus, sizeof(*threads),
+ GFP_KERNEL);
+ if (!threads)
+ return -ENOMEM;
+
+ /*
+ * Stop cpuidle to prevent the idle tasks from entering a deep sleep
+ * mode, as it might interfere with the suspend threads on other CPUs.
+ * This does not prevent the suspend threads from using cpuidle (only
+ * the idle tasks check this status). Take the idle lock so that
+ * the cpuidle driver and device look-up can be carried out safely.
+ */
+ cpuidle_pause_and_lock();
+
+ for_each_online_cpu(cpu) {
+ struct task_struct *thread;
+ /* Check that cpuidle is available on that CPU. */
+ struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+
+ if (!dev || !drv) {
+ pr_warn("cpuidle not available on CPU %d, ignoring\n",
+ cpu);
+ continue;
+ }
+
+ thread = kthread_create_on_cpu(suspend_test_thread,
+ (void *)(long)cpu, cpu,
+ "psci_suspend_test");
+ if (IS_ERR(thread))
+ pr_err("Failed to create kthread on CPU %d\n", cpu);
+ else
+ threads[nb_threads++] = thread;
+ }
+
+ if (nb_threads < 1) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ atomic_set(&nb_active_threads, nb_threads);
+
+ /*
+ * Wake up the suspend threads. To avoid the main thread being preempted
+ * before all the threads have been unparked, the suspend threads will
+ * wait for the completion of suspend_threads_started.
+ */
+ for (i = 0; i < nb_threads; ++i)
+ wake_up_process(threads[i]);
+ complete_all(&suspend_threads_started);
+
+ wait_for_completion(&suspend_threads_done);
+
+
+ /* Stop and destroy all threads, get return status. */
+ for (i = 0; i < nb_threads; ++i) {
+ err += kthread_park(threads[i]);
+ err += kthread_stop(threads[i]);
+ }
+ out:
+ cpuidle_resume_and_unlock();
+ kfree(threads);
+ return err;
+}
+
+static int __init psci_checker(void)
+{
+ int ret;
+
+ /*
+ * Since we're in an initcall, we assume that all the CPUs that all
+ * CPUs that can be onlined have been onlined.
+ *
+ * The tests assume that hotplug is enabled but nobody else is using it,
+ * otherwise the results will be unpredictable. However, since there
+ * is no userspace yet in initcalls, that should be fine, as long as
+ * no torture test is running at the same time (see Kconfig).
+ */
+ nb_available_cpus = num_online_cpus();
+
+ /* Check PSCI operations are set up and working. */
+ ret = psci_ops_check();
+ if (ret)
+ return ret;
+
+ pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus);
+
+ pr_info("Starting hotplug tests\n");
+ ret = hotplug_tests();
+ if (ret == 0)
+ pr_info("Hotplug tests passed OK\n");
+ else if (ret > 0)
+ pr_err("%d error(s) encountered in hotplug tests\n", ret);
+ else {
+ pr_err("Out of memory\n");
+ return ret;
+ }
+
+ pr_info("Starting suspend tests (%d cycles per state)\n",
+ NUM_SUSPEND_CYCLE);
+ ret = suspend_tests();
+ if (ret == 0)
+ pr_info("Suspend tests passed OK\n");
+ else if (ret > 0)
+ pr_err("%d error(s) encountered in suspend tests\n", ret);
+ else {
+ switch (ret) {
+ case -ENOMEM:
+ pr_err("Out of memory\n");
+ break;
+ case -ENODEV:
+ pr_warn("Could not start suspend tests on any CPU\n");
+ break;
+ }
+ }
+
+ pr_info("PSCI checker completed\n");
+ return ret < 0 ? ret : 0;
+}
+late_initcall(psci_checker);
diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c
new file mode 100644
index 0000000000..029e6d117c
--- /dev/null
+++ b/drivers/firmware/qcom_scm-legacy.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/arm-smccc.h>
+#include <linux/dma-mapping.h>
+
+#include "qcom_scm.h"
+
+static DEFINE_MUTEX(qcom_scm_lock);
+
+
+/**
+ * struct arm_smccc_args
+ * @args: The array of values used in registers in smc instruction
+ */
+struct arm_smccc_args {
+ unsigned long args[8];
+};
+
+
+/**
+ * struct scm_legacy_command - one SCM command buffer
+ * @len: total available memory for command and response
+ * @buf_offset: start of command buffer
+ * @resp_hdr_offset: start of response buffer
+ * @id: command to be executed
+ * @buf: buffer returned from scm_legacy_get_command_buffer()
+ *
+ * An SCM command is laid out in memory as follows:
+ *
+ * ------------------- <--- struct scm_legacy_command
+ * | command header |
+ * ------------------- <--- scm_legacy_get_command_buffer()
+ * | command buffer |
+ * ------------------- <--- struct scm_legacy_response and
+ * | response header | scm_legacy_command_to_response()
+ * ------------------- <--- scm_legacy_get_response_buffer()
+ * | response buffer |
+ * -------------------
+ *
+ * There can be arbitrary padding between the headers and buffers so
+ * you should always use the appropriate scm_legacy_get_*_buffer() routines
+ * to access the buffers in a safe manner.
+ */
+struct scm_legacy_command {
+ __le32 len;
+ __le32 buf_offset;
+ __le32 resp_hdr_offset;
+ __le32 id;
+ __le32 buf[];
+};
+
+/**
+ * struct scm_legacy_response - one SCM response buffer
+ * @len: total available memory for response
+ * @buf_offset: start of response data relative to start of scm_legacy_response
+ * @is_complete: indicates if the command has finished processing
+ */
+struct scm_legacy_response {
+ __le32 len;
+ __le32 buf_offset;
+ __le32 is_complete;
+};
+
+/**
+ * scm_legacy_command_to_response() - Get a pointer to a scm_legacy_response
+ * @cmd: command
+ *
+ * Returns a pointer to a response for a command.
+ */
+static inline struct scm_legacy_response *scm_legacy_command_to_response(
+ const struct scm_legacy_command *cmd)
+{
+ return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
+}
+
+/**
+ * scm_legacy_get_command_buffer() - Get a pointer to a command buffer
+ * @cmd: command
+ *
+ * Returns a pointer to the command buffer of a command.
+ */
+static inline void *scm_legacy_get_command_buffer(
+ const struct scm_legacy_command *cmd)
+{
+ return (void *)cmd->buf;
+}
+
+/**
+ * scm_legacy_get_response_buffer() - Get a pointer to a response buffer
+ * @rsp: response
+ *
+ * Returns a pointer to a response buffer of a response.
+ */
+static inline void *scm_legacy_get_response_buffer(
+ const struct scm_legacy_response *rsp)
+{
+ return (void *)rsp + le32_to_cpu(rsp->buf_offset);
+}
+
+static void __scm_legacy_do(const struct arm_smccc_args *smc,
+ struct arm_smccc_res *res)
+{
+ do {
+ arm_smccc_smc(smc->args[0], smc->args[1], smc->args[2],
+ smc->args[3], smc->args[4], smc->args[5],
+ smc->args[6], smc->args[7], res);
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+}
+
+/**
+ * scm_legacy_call() - Sends a command to the SCM and waits for the command to
+ * finish processing.
+ * @dev: device
+ * @desc: descriptor structure containing arguments and return values
+ * @res: results from SMC call
+ *
+ * A note on cache maintenance:
+ * Note that any buffers that are expected to be accessed by the secure world
+ * must be flushed before invoking qcom_scm_call and invalidated in the cache
+ * immediately after qcom_scm_call returns. Cache maintenance on the command
+ * and response buffers is taken care of by qcom_scm_call; however, callers are
+ * responsible for any other cached buffers passed over to the secure world.
+ */
+int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res)
+{
+ u8 arglen = desc->arginfo & 0xf;
+ int ret = 0, context_id;
+ unsigned int i;
+ struct scm_legacy_command *cmd;
+ struct scm_legacy_response *rsp;
+ struct arm_smccc_args smc = {0};
+ struct arm_smccc_res smc_res;
+ const size_t cmd_len = arglen * sizeof(__le32);
+ const size_t resp_len = MAX_QCOM_SCM_RETS * sizeof(__le32);
+ size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
+ dma_addr_t cmd_phys;
+ __le32 *arg_buf;
+ const __le32 *res_buf;
+
+ cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->len = cpu_to_le32(alloc_len);
+ cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
+ cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
+ cmd->id = cpu_to_le32(SCM_LEGACY_FNID(desc->svc, desc->cmd));
+
+ arg_buf = scm_legacy_get_command_buffer(cmd);
+ for (i = 0; i < arglen; i++)
+ arg_buf[i] = cpu_to_le32(desc->args[i]);
+
+ rsp = scm_legacy_command_to_response(cmd);
+
+ cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, cmd_phys)) {
+ kfree(cmd);
+ return -ENOMEM;
+ }
+
+ smc.args[0] = 1;
+ smc.args[1] = (unsigned long)&context_id;
+ smc.args[2] = cmd_phys;
+
+ mutex_lock(&qcom_scm_lock);
+ __scm_legacy_do(&smc, &smc_res);
+ if (smc_res.a0)
+ ret = qcom_scm_remap_error(smc_res.a0);
+ mutex_unlock(&qcom_scm_lock);
+ if (ret)
+ goto out;
+
+ do {
+ dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
+ sizeof(*rsp), DMA_FROM_DEVICE);
+ } while (!rsp->is_complete);
+
+ dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
+ le32_to_cpu(rsp->buf_offset),
+ resp_len, DMA_FROM_DEVICE);
+
+ if (res) {
+ res_buf = scm_legacy_get_response_buffer(rsp);
+ for (i = 0; i < MAX_QCOM_SCM_RETS; i++)
+ res->result[i] = le32_to_cpu(res_buf[i]);
+ }
+out:
+ dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
+ kfree(cmd);
+ return ret;
+}
+
+#define SCM_LEGACY_ATOMIC_N_REG_ARGS 5
+#define SCM_LEGACY_ATOMIC_FIRST_REG_IDX 2
+#define SCM_LEGACY_CLASS_REGISTER (0x2 << 8)
+#define SCM_LEGACY_MASK_IRQS BIT(5)
+#define SCM_LEGACY_ATOMIC_ID(svc, cmd, n) \
+ ((SCM_LEGACY_FNID(svc, cmd) << 12) | \
+ SCM_LEGACY_CLASS_REGISTER | \
+ SCM_LEGACY_MASK_IRQS | \
+ (n & 0xf))
+
+/**
+ * scm_legacy_call_atomic() - Send an atomic SCM command with up to 5 arguments
+ * and 3 return values
+ * @unused: device, legacy argument, not used, can be NULL
+ * @desc: SCM call descriptor containing arguments
+ * @res: SCM call return values
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+int scm_legacy_call_atomic(struct device *unused,
+ const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res)
+{
+ int context_id;
+ struct arm_smccc_res smc_res;
+ size_t arglen = desc->arginfo & 0xf;
+
+ BUG_ON(arglen > SCM_LEGACY_ATOMIC_N_REG_ARGS);
+
+ arm_smccc_smc(SCM_LEGACY_ATOMIC_ID(desc->svc, desc->cmd, arglen),
+ (unsigned long)&context_id,
+ desc->args[0], desc->args[1], desc->args[2],
+ desc->args[3], desc->args[4], 0, &smc_res);
+
+ if (res) {
+ res->result[0] = smc_res.a1;
+ res->result[1] = smc_res.a2;
+ res->result[2] = smc_res.a3;
+ }
+
+ return smc_res.a0;
+}
diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
new file mode 100644
index 0000000000..16cf88acfa
--- /dev/null
+++ b/drivers/firmware/qcom_scm-smc.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/arm-smccc.h>
+#include <linux/dma-mapping.h>
+
+#include "qcom_scm.h"
+
+/**
+ * struct arm_smccc_args
+ * @args: The array of values used in registers in smc instruction
+ */
+struct arm_smccc_args {
+ unsigned long args[8];
+};
+
+static DEFINE_MUTEX(qcom_scm_lock);
+
+#define QCOM_SCM_EBUSY_WAIT_MS 30
+#define QCOM_SCM_EBUSY_MAX_RETRY 20
+
+#define SCM_SMC_N_REG_ARGS 4
+#define SCM_SMC_FIRST_EXT_IDX (SCM_SMC_N_REG_ARGS - 1)
+#define SCM_SMC_N_EXT_ARGS (MAX_QCOM_SCM_ARGS - SCM_SMC_N_REG_ARGS + 1)
+#define SCM_SMC_FIRST_REG_IDX 2
+#define SCM_SMC_LAST_REG_IDX (SCM_SMC_FIRST_REG_IDX + SCM_SMC_N_REG_ARGS - 1)
+
+static void __scm_smc_do_quirk(const struct arm_smccc_args *smc,
+ struct arm_smccc_res *res)
+{
+ unsigned long a0 = smc->args[0];
+ struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
+
+ quirk.state.a6 = 0;
+
+ do {
+ arm_smccc_smc_quirk(a0, smc->args[1], smc->args[2],
+ smc->args[3], smc->args[4], smc->args[5],
+ quirk.state.a6, smc->args[7], res, &quirk);
+
+ if (res->a0 == QCOM_SCM_INTERRUPTED)
+ a0 = res->a0;
+
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+}
+
+static void fill_wq_resume_args(struct arm_smccc_args *resume, u32 smc_call_ctx)
+{
+ memset(resume->args, 0, sizeof(resume->args[0]) * ARRAY_SIZE(resume->args));
+
+ resume->args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+ ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
+ SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_RESUME));
+
+ resume->args[1] = QCOM_SCM_ARGS(1);
+
+ resume->args[2] = smc_call_ctx;
+}
+
+int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending)
+{
+ int ret;
+ struct arm_smccc_res get_wq_res;
+ struct arm_smccc_args get_wq_ctx = {0};
+
+ get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+ ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
+ SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX));
+
+ /* Guaranteed to return only success or error, no WAITQ_* */
+ __scm_smc_do_quirk(&get_wq_ctx, &get_wq_res);
+ ret = get_wq_res.a0;
+ if (ret)
+ return ret;
+
+ *wq_ctx = get_wq_res.a1;
+ *flags = get_wq_res.a2;
+ *more_pending = get_wq_res.a3;
+
+ return 0;
+}
+
+static int __scm_smc_do_quirk_handle_waitq(struct device *dev, struct arm_smccc_args *waitq,
+ struct arm_smccc_res *res)
+{
+ int ret;
+ u32 wq_ctx, smc_call_ctx;
+ struct arm_smccc_args resume;
+ struct arm_smccc_args *smc = waitq;
+
+ do {
+ __scm_smc_do_quirk(smc, res);
+
+ if (res->a0 == QCOM_SCM_WAITQ_SLEEP) {
+ wq_ctx = res->a1;
+ smc_call_ctx = res->a2;
+
+ ret = qcom_scm_wait_for_wq_completion(wq_ctx);
+ if (ret)
+ return ret;
+
+ fill_wq_resume_args(&resume, smc_call_ctx);
+ smc = &resume;
+ }
+ } while (res->a0 == QCOM_SCM_WAITQ_SLEEP);
+
+ return 0;
+}
+
+static int __scm_smc_do(struct device *dev, struct arm_smccc_args *smc,
+ struct arm_smccc_res *res, bool atomic)
+{
+ int ret, retry_count = 0;
+
+ if (atomic) {
+ __scm_smc_do_quirk(smc, res);
+ return 0;
+ }
+
+ do {
+ mutex_lock(&qcom_scm_lock);
+
+ ret = __scm_smc_do_quirk_handle_waitq(dev, smc, res);
+
+ mutex_unlock(&qcom_scm_lock);
+
+ if (ret)
+ return ret;
+
+ if (res->a0 == QCOM_SCM_V2_EBUSY) {
+ if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
+ break;
+ msleep(QCOM_SCM_EBUSY_WAIT_MS);
+ }
+ } while (res->a0 == QCOM_SCM_V2_EBUSY);
+
+ return 0;
+}
+
+
+int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ enum qcom_scm_convention qcom_convention,
+ struct qcom_scm_res *res, bool atomic)
+{
+ int arglen = desc->arginfo & 0xf;
+ int i, ret;
+ dma_addr_t args_phys = 0;
+ void *args_virt = NULL;
+ size_t alloc_len;
+ gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
+ u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
+ u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
+ ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
+ struct arm_smccc_res smc_res;
+ struct arm_smccc_args smc = {0};
+
+ smc.args[0] = ARM_SMCCC_CALL_VAL(
+ smccc_call_type,
+ qcom_smccc_convention,
+ desc->owner,
+ SCM_SMC_FNID(desc->svc, desc->cmd));
+ smc.args[1] = desc->arginfo;
+ for (i = 0; i < SCM_SMC_N_REG_ARGS; i++)
+ smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
+
+ if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
+ alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64);
+ args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
+
+ if (!args_virt)
+ return -ENOMEM;
+
+ if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
+ __le32 *args = args_virt;
+
+ for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
+ args[i] = cpu_to_le32(desc->args[i +
+ SCM_SMC_FIRST_EXT_IDX]);
+ } else {
+ __le64 *args = args_virt;
+
+ for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
+ args[i] = cpu_to_le64(desc->args[i +
+ SCM_SMC_FIRST_EXT_IDX]);
+ }
+
+ args_phys = dma_map_single(dev, args_virt, alloc_len,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, args_phys)) {
+ kfree(args_virt);
+ return -ENOMEM;
+ }
+
+ smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
+ }
+
+ /* ret error check follows after args_virt cleanup*/
+ ret = __scm_smc_do(dev, &smc, &smc_res, atomic);
+
+ if (args_virt) {
+ dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
+ kfree(args_virt);
+ }
+
+ if (ret)
+ return ret;
+
+ if (res) {
+ res->result[0] = smc_res.a1;
+ res->result[1] = smc_res.a2;
+ res->result[2] = smc_res.a3;
+ }
+
+ return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
+
+}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
new file mode 100644
index 0000000000..69831f1d91
--- /dev/null
+++ b/drivers/firmware/qcom_scm.c
@@ -0,0 +1,1518 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/dma-mapping.h>
+#include <linux/interconnect.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/reset-controller.h>
+#include <linux/arm-smccc.h>
+
+#include "qcom_scm.h"
+
+static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
+module_param(download_mode, bool, 0);
+
+struct qcom_scm {
+ struct device *dev;
+ struct clk *core_clk;
+ struct clk *iface_clk;
+ struct clk *bus_clk;
+ struct icc_path *path;
+ struct completion waitq_comp;
+ struct reset_controller_dev reset;
+
+ /* control access to the interconnect path */
+ struct mutex scm_bw_lock;
+ int scm_vote_count;
+
+ u64 dload_mode_addr;
+};
+
+struct qcom_scm_current_perm_info {
+ __le32 vmid;
+ __le32 perm;
+ __le64 ctx;
+ __le32 ctx_size;
+ __le32 unused;
+};
+
+struct qcom_scm_mem_map_info {
+ __le64 mem_addr;
+ __le64 mem_size;
+};
+
+/* Each bit configures cold/warm boot address for one of the 4 CPUs */
+static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
+ 0, BIT(0), BIT(3), BIT(5)
+};
+static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
+ BIT(2), BIT(1), BIT(4), BIT(6)
+};
+
+#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
+#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1)
+
+static const char * const qcom_scm_convention_names[] = {
+ [SMC_CONVENTION_UNKNOWN] = "unknown",
+ [SMC_CONVENTION_ARM_32] = "smc arm 32",
+ [SMC_CONVENTION_ARM_64] = "smc arm 64",
+ [SMC_CONVENTION_LEGACY] = "smc legacy",
+};
+
+static struct qcom_scm *__scm;
+
+static int qcom_scm_clk_enable(void)
+{
+ int ret;
+
+ ret = clk_prepare_enable(__scm->core_clk);
+ if (ret)
+ goto bail;
+
+ ret = clk_prepare_enable(__scm->iface_clk);
+ if (ret)
+ goto disable_core;
+
+ ret = clk_prepare_enable(__scm->bus_clk);
+ if (ret)
+ goto disable_iface;
+
+ return 0;
+
+disable_iface:
+ clk_disable_unprepare(__scm->iface_clk);
+disable_core:
+ clk_disable_unprepare(__scm->core_clk);
+bail:
+ return ret;
+}
+
+static void qcom_scm_clk_disable(void)
+{
+ clk_disable_unprepare(__scm->core_clk);
+ clk_disable_unprepare(__scm->iface_clk);
+ clk_disable_unprepare(__scm->bus_clk);
+}
+
+static int qcom_scm_bw_enable(void)
+{
+ int ret = 0;
+
+ if (!__scm->path)
+ return 0;
+
+ if (IS_ERR(__scm->path))
+ return -EINVAL;
+
+ mutex_lock(&__scm->scm_bw_lock);
+ if (!__scm->scm_vote_count) {
+ ret = icc_set_bw(__scm->path, 0, UINT_MAX);
+ if (ret < 0) {
+ dev_err(__scm->dev, "failed to set bandwidth request\n");
+ goto err_bw;
+ }
+ }
+ __scm->scm_vote_count++;
+err_bw:
+ mutex_unlock(&__scm->scm_bw_lock);
+
+ return ret;
+}
+
+static void qcom_scm_bw_disable(void)
+{
+ if (IS_ERR_OR_NULL(__scm->path))
+ return;
+
+ mutex_lock(&__scm->scm_bw_lock);
+ if (__scm->scm_vote_count-- == 1)
+ icc_set_bw(__scm->path, 0, 0);
+ mutex_unlock(&__scm->scm_bw_lock);
+}
+
+enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
+static DEFINE_SPINLOCK(scm_query_lock);
+
+static enum qcom_scm_convention __get_convention(void)
+{
+ unsigned long flags;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_INFO,
+ .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
+ .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
+ QCOM_SCM_INFO_IS_CALL_AVAIL) |
+ (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
+ .arginfo = QCOM_SCM_ARGS(1),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ enum qcom_scm_convention probed_convention;
+ int ret;
+ bool forced = false;
+
+ if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
+ return qcom_scm_convention;
+
+ /*
+ * Per the "SMC calling convention specification", the 64-bit calling
+ * convention can only be used when the client is 64-bit, otherwise
+ * system will encounter the undefined behaviour.
+ */
+#if IS_ENABLED(CONFIG_ARM64)
+ /*
+ * Device isn't required as there is only one argument - no device
+ * needed to dma_map_single to secure world
+ */
+ probed_convention = SMC_CONVENTION_ARM_64;
+ ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
+ if (!ret && res.result[0] == 1)
+ goto found;
+
+ /*
+ * Some SC7180 firmwares didn't implement the
+ * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
+ * calling conventions on these firmwares. Luckily we don't make any
+ * early calls into the firmware on these SoCs so the device pointer
+ * will be valid here to check if the compatible matches.
+ */
+ if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
+ forced = true;
+ goto found;
+ }
+#endif
+
+ probed_convention = SMC_CONVENTION_ARM_32;
+ ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
+ if (!ret && res.result[0] == 1)
+ goto found;
+
+ probed_convention = SMC_CONVENTION_LEGACY;
+found:
+ spin_lock_irqsave(&scm_query_lock, flags);
+ if (probed_convention != qcom_scm_convention) {
+ qcom_scm_convention = probed_convention;
+ pr_info("qcom_scm: convention: %s%s\n",
+ qcom_scm_convention_names[qcom_scm_convention],
+ forced ? " (forced)" : "");
+ }
+ spin_unlock_irqrestore(&scm_query_lock, flags);
+
+ return qcom_scm_convention;
+}
+
+/**
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @dev: device
+ * @desc: Descriptor structure containing arguments and return values
+ * @res: Structure containing results from SMC/HVC call
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+ */
+static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res)
+{
+ might_sleep();
+ switch (__get_convention()) {
+ case SMC_CONVENTION_ARM_32:
+ case SMC_CONVENTION_ARM_64:
+ return scm_smc_call(dev, desc, res, false);
+ case SMC_CONVENTION_LEGACY:
+ return scm_legacy_call(dev, desc, res);
+ default:
+ pr_err("Unknown current SCM calling convention.\n");
+ return -EINVAL;
+ }
+}
+
+/**
+ * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
+ * @dev: device
+ * @desc: Descriptor structure containing arguments and return values
+ * @res: Structure containing results from SMC/HVC call
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This can be called in atomic context.
+ */
+static int qcom_scm_call_atomic(struct device *dev,
+ const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res)
+{
+ switch (__get_convention()) {
+ case SMC_CONVENTION_ARM_32:
+ case SMC_CONVENTION_ARM_64:
+ return scm_smc_call(dev, desc, res, true);
+ case SMC_CONVENTION_LEGACY:
+ return scm_legacy_call_atomic(dev, desc, res);
+ default:
+ pr_err("Unknown current SCM calling convention.\n");
+ return -EINVAL;
+ }
+}
+
+static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+ u32 cmd_id)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_INFO,
+ .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ desc.arginfo = QCOM_SCM_ARGS(1);
+ switch (__get_convention()) {
+ case SMC_CONVENTION_ARM_32:
+ case SMC_CONVENTION_ARM_64:
+ desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
+ (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
+ break;
+ case SMC_CONVENTION_LEGACY:
+ desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
+ break;
+ default:
+ pr_err("Unknown SMC convention being used\n");
+ return false;
+ }
+
+ ret = qcom_scm_call(dev, &desc, &res);
+
+ return ret ? false : !!res.result[0];
+}
+
+static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
+{
+ int cpu;
+ unsigned int flags = 0;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_BOOT,
+ .cmd = QCOM_SCM_BOOT_SET_ADDR,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ for_each_present_cpu(cpu) {
+ if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
+ return -EINVAL;
+ flags |= cpu_bits[cpu];
+ }
+
+ desc.args[0] = flags;
+ desc.args[1] = virt_to_phys(entry);
+
+ return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
+}
+
+static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_BOOT,
+ .cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ .arginfo = QCOM_SCM_ARGS(6),
+ .args = {
+ virt_to_phys(entry),
+ /* Apply to all CPUs in all affinity levels */
+ ~0ULL, ~0ULL, ~0ULL, ~0ULL,
+ flags,
+ },
+ };
+
+ /* Need a device for DMA of the additional arguments */
+ if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
+ return -EOPNOTSUPP;
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+
+/**
+ * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
+ * @entry: Entry point function for the cpus
+ *
+ * Set the Linux entry point for the SCM to transfer control to when coming
+ * out of a power down. CPU power down may be executed on cpuidle or hotplug.
+ */
+int qcom_scm_set_warm_boot_addr(void *entry)
+{
+ if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
+ /* Fallback to old SCM call */
+ return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
+
+/**
+ * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
+ * @entry: Entry point function for the cpus
+ */
+int qcom_scm_set_cold_boot_addr(void *entry)
+{
+ if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
+ /* Fallback to old SCM call */
+ return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
+
+/**
+ * qcom_scm_cpu_power_down() - Power down the cpu
+ * @flags: Flags to flush cache
+ *
+ * This is an end point to power down cpu. If there was a pending interrupt,
+ * the control would return from this function, otherwise, the cpu jumps to the
+ * warm boot entry point set for this cpu upon reset.
+ */
+void qcom_scm_cpu_power_down(u32 flags)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_BOOT,
+ .cmd = QCOM_SCM_BOOT_TERMINATE_PC,
+ .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
+
+int qcom_scm_set_remote_state(u32 state, u32 id)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_BOOT,
+ .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = state,
+ .args[1] = id,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ return ret ? : res.result[0];
+}
+EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
+
+static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_BOOT,
+ .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
+
+ return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
+}
+
+static void qcom_scm_set_download_mode(bool enable)
+{
+ bool avail;
+ int ret = 0;
+
+ avail = __qcom_scm_is_call_available(__scm->dev,
+ QCOM_SCM_SVC_BOOT,
+ QCOM_SCM_BOOT_SET_DLOAD_MODE);
+ if (avail) {
+ ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
+ } else if (__scm->dload_mode_addr) {
+ ret = qcom_scm_io_writel(__scm->dload_mode_addr,
+ enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
+ } else {
+ dev_err(__scm->dev,
+ "No available mechanism for setting download mode\n");
+ }
+
+ if (ret)
+ dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
+}
+
+/**
+ * qcom_scm_pas_init_image() - Initialize peripheral authentication service
+ * state machine for a given peripheral, using the
+ * metadata
+ * @peripheral: peripheral id
+ * @metadata: pointer to memory containing ELF header, program header table
+ * and optional blob of data used for authenticating the metadata
+ * and the rest of the firmware
+ * @size: size of the metadata
+ * @ctx: optional metadata context
+ *
+ * Return: 0 on success.
+ *
+ * Upon successful return, the PAS metadata context (@ctx) will be used to
+ * track the metadata allocation, this needs to be released by invoking
+ * qcom_scm_pas_metadata_release() by the caller.
+ */
+int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
+ struct qcom_scm_pas_metadata *ctx)
+{
+ dma_addr_t mdata_phys;
+ void *mdata_buf;
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
+ .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
+ .args[0] = peripheral,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ /*
+ * During the scm call memory protection will be enabled for the meta
+ * data blob, so make sure it's physically contiguous, 4K aligned and
+ * non-cachable to avoid XPU violations.
+ */
+ mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
+ GFP_KERNEL);
+ if (!mdata_buf) {
+ dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
+ return -ENOMEM;
+ }
+ memcpy(mdata_buf, metadata, size);
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ goto out;
+
+ ret = qcom_scm_bw_enable();
+ if (ret)
+ return ret;
+
+ desc.args[1] = mdata_phys;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ qcom_scm_bw_disable();
+ qcom_scm_clk_disable();
+
+out:
+ if (ret < 0 || !ctx) {
+ dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
+ } else if (ctx) {
+ ctx->ptr = mdata_buf;
+ ctx->phys = mdata_phys;
+ ctx->size = size;
+ }
+
+ return ret ? : res.result[0];
+}
+EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
+
+/**
+ * qcom_scm_pas_metadata_release() - release metadata context
+ * @ctx: metadata context
+ */
+void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
+{
+ if (!ctx->ptr)
+ return;
+
+ dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
+
+ ctx->ptr = NULL;
+ ctx->phys = 0;
+ ctx->size = 0;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
+
+/**
+ * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
+ * for firmware loading
+ * @peripheral: peripheral id
+ * @addr: start address of memory area to prepare
+ * @size: size of the memory area to prepare
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
+ .arginfo = QCOM_SCM_ARGS(3),
+ .args[0] = peripheral,
+ .args[1] = addr,
+ .args[2] = size,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = qcom_scm_bw_enable();
+ if (ret)
+ return ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_bw_disable();
+ qcom_scm_clk_disable();
+
+ return ret ? : res.result[0];
+}
+EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
+
+/**
+ * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
+ * and reset the remote processor
+ * @peripheral: peripheral id
+ *
+ * Return 0 on success.
+ */
+int qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = peripheral,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = qcom_scm_bw_enable();
+ if (ret)
+ return ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_bw_disable();
+ qcom_scm_clk_disable();
+
+ return ret ? : res.result[0];
+}
+EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
+
+/**
+ * qcom_scm_pas_shutdown() - Shut down the remote processor
+ * @peripheral: peripheral id
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_shutdown(u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = peripheral,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = qcom_scm_bw_enable();
+ if (ret)
+ return ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ qcom_scm_bw_disable();
+ qcom_scm_clk_disable();
+
+ return ret ? : res.result[0];
+}
+EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
+
+/**
+ * qcom_scm_pas_supported() - Check if the peripheral authentication service is
+ * available for the given peripherial
+ * @peripheral: peripheral id
+ *
+ * Returns true if PAS is supported for this peripheral, otherwise false.
+ */
+bool qcom_scm_pas_supported(u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = peripheral,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PIL_PAS_IS_SUPPORTED))
+ return false;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ return ret ? false : !!res.result[0];
+}
+EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
+
+static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = reset,
+ .args[1] = 0,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ return ret ? : res.result[0];
+}
+
+static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ if (idx != 0)
+ return -EINVAL;
+
+ return __qcom_scm_pas_mss_reset(__scm->dev, 1);
+}
+
+static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ if (idx != 0)
+ return -EINVAL;
+
+ return __qcom_scm_pas_mss_reset(__scm->dev, 0);
+}
+
+static const struct reset_control_ops qcom_scm_pas_reset_ops = {
+ .assert = qcom_scm_pas_reset_assert,
+ .deassert = qcom_scm_pas_reset_deassert,
+};
+
+int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_IO,
+ .cmd = QCOM_SCM_IO_READ,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = addr,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+
+ ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
+ if (ret >= 0)
+ *val = res.result[0];
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
+
+int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_IO,
+ .cmd = QCOM_SCM_IO_WRITE,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = addr,
+ .args[1] = val,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
+
+/**
+ * qcom_scm_restore_sec_cfg_available() - Check if secure environment
+ * supports restore security config interface.
+ *
+ * Return true if restore-cfg interface is supported, false if not.
+ */
+bool qcom_scm_restore_sec_cfg_available(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
+ QCOM_SCM_MP_RESTORE_SEC_CFG);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
+
+int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = device_id,
+ .args[1] = spare,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ return ret ? : res.result[0];
+}
+EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
+
+int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = spare,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ if (size)
+ *size = res.result[0];
+
+ return ret ? : res.result[1];
+}
+EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
+
+int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
+ .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
+ QCOM_SCM_VAL),
+ .args[0] = addr,
+ .args[1] = size,
+ .args[2] = spare,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+
+ /* the pg table has been initialized already, ignore the error */
+ if (ret == -EPERM)
+ ret = 0;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
+
+int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = size,
+ .args[1] = spare,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
+
+int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
+ u32 cp_nonpixel_start,
+ u32 cp_nonpixel_size)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_VIDEO_VAR,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
+ QCOM_SCM_VAL, QCOM_SCM_VAL),
+ .args[0] = cp_start,
+ .args[1] = cp_size,
+ .args[2] = cp_nonpixel_start,
+ .args[3] = cp_nonpixel_size,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ return ret ? : res.result[0];
+}
+EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
+
+static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
+ size_t mem_sz, phys_addr_t src, size_t src_sz,
+ phys_addr_t dest, size_t dest_sz)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_ASSIGN,
+ .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
+ QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
+ QCOM_SCM_VAL, QCOM_SCM_VAL),
+ .args[0] = mem_region,
+ .args[1] = mem_sz,
+ .args[2] = src,
+ .args[3] = src_sz,
+ .args[4] = dest,
+ .args[5] = dest_sz,
+ .args[6] = 0,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ ret = qcom_scm_call(dev, &desc, &res);
+
+ return ret ? : res.result[0];
+}
+
+/**
+ * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
+ * @mem_addr: mem region whose ownership need to be reassigned
+ * @mem_sz: size of the region.
+ * @srcvm: vmid for current set of owners, each set bit in
+ * flag indicate a unique owner
+ * @newvm: array having new owners and corresponding permission
+ * flags
+ * @dest_cnt: number of owners in next set.
+ *
+ * Return negative errno on failure or 0 on success with @srcvm updated.
+ */
+int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+ u64 *srcvm,
+ const struct qcom_scm_vmperm *newvm,
+ unsigned int dest_cnt)
+{
+ struct qcom_scm_current_perm_info *destvm;
+ struct qcom_scm_mem_map_info *mem_to_map;
+ phys_addr_t mem_to_map_phys;
+ phys_addr_t dest_phys;
+ dma_addr_t ptr_phys;
+ size_t mem_to_map_sz;
+ size_t dest_sz;
+ size_t src_sz;
+ size_t ptr_sz;
+ int next_vm;
+ __le32 *src;
+ void *ptr;
+ int ret, i, b;
+ u64 srcvm_bits = *srcvm;
+
+ src_sz = hweight64(srcvm_bits) * sizeof(*src);
+ mem_to_map_sz = sizeof(*mem_to_map);
+ dest_sz = dest_cnt * sizeof(*destvm);
+ ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
+ ALIGN(dest_sz, SZ_64);
+
+ ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ /* Fill source vmid detail */
+ src = ptr;
+ i = 0;
+ for (b = 0; b < BITS_PER_TYPE(u64); b++) {
+ if (srcvm_bits & BIT(b))
+ src[i++] = cpu_to_le32(b);
+ }
+
+ /* Fill details of mem buff to map */
+ mem_to_map = ptr + ALIGN(src_sz, SZ_64);
+ mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
+ mem_to_map->mem_addr = cpu_to_le64(mem_addr);
+ mem_to_map->mem_size = cpu_to_le64(mem_sz);
+
+ next_vm = 0;
+ /* Fill details of next vmid detail */
+ destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
+ dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
+ for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
+ destvm->vmid = cpu_to_le32(newvm->vmid);
+ destvm->perm = cpu_to_le32(newvm->perm);
+ destvm->ctx = 0;
+ destvm->ctx_size = 0;
+ next_vm |= BIT(newvm->vmid);
+ }
+
+ ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
+ ptr_phys, src_sz, dest_phys, dest_sz);
+ dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
+ if (ret) {
+ dev_err(__scm->dev,
+ "Assign memory protection call failed %d\n", ret);
+ return -EINVAL;
+ }
+
+ *srcvm = next_vm;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
+
+/**
+ * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
+ */
+bool qcom_scm_ocmem_lock_available(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
+ QCOM_SCM_OCMEM_LOCK_CMD);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
+
+/**
+ * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
+ * region to the specified initiator
+ *
+ * @id: tz initiator id
+ * @offset: OCMEM offset
+ * @size: OCMEM size
+ * @mode: access mode (WIDE/NARROW)
+ */
+int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
+ u32 mode)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_OCMEM,
+ .cmd = QCOM_SCM_OCMEM_LOCK_CMD,
+ .args[0] = id,
+ .args[1] = offset,
+ .args[2] = size,
+ .args[3] = mode,
+ .arginfo = QCOM_SCM_ARGS(4),
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
+
+/**
+ * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
+ * region from the specified initiator
+ *
+ * @id: tz initiator id
+ * @offset: OCMEM offset
+ * @size: OCMEM size
+ */
+int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_OCMEM,
+ .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
+ .args[0] = id,
+ .args[1] = offset,
+ .args[2] = size,
+ .arginfo = QCOM_SCM_ARGS(3),
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
+
+/**
+ * qcom_scm_ice_available() - Is the ICE key programming interface available?
+ *
+ * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
+ * qcom_scm_ice_set_key() are available.
+ */
+bool qcom_scm_ice_available(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
+
+/**
+ * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
+ * @index: the keyslot to invalidate
+ *
+ * The UFSHCI and eMMC standards define a standard way to do this, but it
+ * doesn't work on these SoCs; only this SCM call does.
+ *
+ * It is assumed that the SoC has only one ICE instance being used, as this SCM
+ * call doesn't specify which ICE instance the keyslot belongs to.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_ice_invalidate_key(u32 index)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = index,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
+
+/**
+ * qcom_scm_ice_set_key() - Set an inline encryption key
+ * @index: the keyslot into which to set the key
+ * @key: the key to program
+ * @key_size: the size of the key in bytes
+ * @cipher: the encryption algorithm the key is for
+ * @data_unit_size: the encryption data unit size, i.e. the size of each
+ * individual plaintext and ciphertext. Given in 512-byte
+ * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
+ *
+ * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
+ * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
+ *
+ * The UFSHCI and eMMC standards define a standard way to do this, but it
+ * doesn't work on these SoCs; only this SCM call does.
+ *
+ * It is assumed that the SoC has only one ICE instance being used, as this SCM
+ * call doesn't specify which ICE instance the keyslot belongs to.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
+ enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
+ QCOM_SCM_VAL, QCOM_SCM_VAL,
+ QCOM_SCM_VAL),
+ .args[0] = index,
+ .args[2] = key_size,
+ .args[3] = cipher,
+ .args[4] = data_unit_size,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ void *keybuf;
+ dma_addr_t key_phys;
+ int ret;
+
+ /*
+ * 'key' may point to vmalloc()'ed memory, but we need to pass a
+ * physical address that's been properly flushed. The sanctioned way to
+ * do this is by using the DMA API. But as is best practice for crypto
+ * keys, we also must wipe the key after use. This makes kmemdup() +
+ * dma_map_single() not clearly correct, since the DMA API can use
+ * bounce buffers. Instead, just use dma_alloc_coherent(). Programming
+ * keys is normally rare and thus not performance-critical.
+ */
+
+ keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
+ GFP_KERNEL);
+ if (!keybuf)
+ return -ENOMEM;
+ memcpy(keybuf, key, key_size);
+ desc.args[1] = key_phys;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+
+ memzero_explicit(keybuf, key_size);
+
+ dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
+
+/**
+ * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
+ *
+ * Return true if HDCP is supported, false if not.
+ */
+bool qcom_scm_hdcp_available(void)
+{
+ bool avail;
+ int ret = qcom_scm_clk_enable();
+
+ if (ret)
+ return ret;
+
+ avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
+ QCOM_SCM_HDCP_INVOKE);
+
+ qcom_scm_clk_disable();
+
+ return avail;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
+
+/**
+ * qcom_scm_hdcp_req() - Send HDCP request.
+ * @req: HDCP request array
+ * @req_cnt: HDCP request array count
+ * @resp: response buffer passed to SCM
+ *
+ * Write HDCP register(s) through SCM.
+ */
+int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_HDCP,
+ .cmd = QCOM_SCM_HDCP_INVOKE,
+ .arginfo = QCOM_SCM_ARGS(10),
+ .args = {
+ req[0].addr,
+ req[0].val,
+ req[1].addr,
+ req[1].val,
+ req[2].addr,
+ req[2].val,
+ req[3].addr,
+ req[3].val,
+ req[4].addr,
+ req[4].val
+ },
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
+ return -ERANGE;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ *resp = res.result[0];
+
+ qcom_scm_clk_disable();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
+
+int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
+ .cmd = QCOM_SCM_SMMU_PT_FORMAT,
+ .arginfo = QCOM_SCM_ARGS(3),
+ .args[0] = sec_id,
+ .args[1] = ctx_num,
+ .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
+
+int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
+ .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
+ .args[1] = en,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+
+ return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
+
+bool qcom_scm_lmh_dcvsh_available(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
+
+int qcom_scm_lmh_profile_change(u32 profile_id)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_LMH,
+ .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
+ .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
+ .args[0] = profile_id,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
+
+int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
+ u64 limit_node, u32 node_id, u64 version)
+{
+ dma_addr_t payload_phys;
+ u32 *payload_buf;
+ int ret, payload_size = 5 * sizeof(u32);
+
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_LMH,
+ .cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
+ .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
+ QCOM_SCM_VAL, QCOM_SCM_VAL),
+ .args[1] = payload_size,
+ .args[2] = limit_node,
+ .args[3] = node_id,
+ .args[4] = version,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
+ if (!payload_buf)
+ return -ENOMEM;
+
+ payload_buf[0] = payload_fn;
+ payload_buf[1] = 0;
+ payload_buf[2] = payload_reg;
+ payload_buf[3] = 1;
+ payload_buf[4] = payload_val;
+
+ desc.args[0] = payload_phys;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+
+ dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
+
+static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
+{
+ struct device_node *tcsr;
+ struct device_node *np = dev->of_node;
+ struct resource res;
+ u32 offset;
+ int ret;
+
+ tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
+ if (!tcsr)
+ return 0;
+
+ ret = of_address_to_resource(tcsr, 0, &res);
+ of_node_put(tcsr);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
+ if (ret < 0)
+ return ret;
+
+ *addr = res.start + offset;
+
+ return 0;
+}
+
+/**
+ * qcom_scm_is_available() - Checks if SCM is available
+ */
+bool qcom_scm_is_available(void)
+{
+ return !!__scm;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_is_available);
+
+static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
+{
+ /* FW currently only supports a single wq_ctx (zero).
+ * TODO: Update this logic to include dynamic allocation and lookup of
+ * completion structs when FW supports more wq_ctx values.
+ */
+ if (wq_ctx != 0) {
+ dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
+{
+ int ret;
+
+ ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&__scm->waitq_comp);
+
+ return 0;
+}
+
+static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx)
+{
+ int ret;
+
+ ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
+ if (ret)
+ return ret;
+
+ complete(&__scm->waitq_comp);
+
+ return 0;
+}
+
+static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
+{
+ int ret;
+ struct qcom_scm *scm = data;
+ u32 wq_ctx, flags, more_pending = 0;
+
+ do {
+ ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
+ if (ret) {
+ dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
+ goto out;
+ }
+
+ if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE &&
+ flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) {
+ dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags);
+ goto out;
+ }
+
+ ret = qcom_scm_waitq_wakeup(scm, wq_ctx);
+ if (ret)
+ goto out;
+ } while (more_pending);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int qcom_scm_probe(struct platform_device *pdev)
+{
+ struct qcom_scm *scm;
+ int irq, ret;
+
+ scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
+ if (!scm)
+ return -ENOMEM;
+
+ ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&scm->scm_bw_lock);
+
+ scm->path = devm_of_icc_get(&pdev->dev, NULL);
+ if (IS_ERR(scm->path))
+ return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
+ "failed to acquire interconnect path\n");
+
+ scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
+ if (IS_ERR(scm->core_clk))
+ return PTR_ERR(scm->core_clk);
+
+ scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
+ if (IS_ERR(scm->iface_clk))
+ return PTR_ERR(scm->iface_clk);
+
+ scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
+ if (IS_ERR(scm->bus_clk))
+ return PTR_ERR(scm->bus_clk);
+
+ scm->reset.ops = &qcom_scm_pas_reset_ops;
+ scm->reset.nr_resets = 1;
+ scm->reset.of_node = pdev->dev.of_node;
+ ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
+ if (ret)
+ return ret;
+
+ /* vote for max clk rate for highest performance */
+ ret = clk_set_rate(scm->core_clk, INT_MAX);
+ if (ret)
+ return ret;
+
+ __scm = scm;
+ __scm->dev = &pdev->dev;
+
+ init_completion(&__scm->waitq_comp);
+
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq < 0) {
+ if (irq != -ENXIO)
+ return irq;
+ } else {
+ ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
+ IRQF_ONESHOT, "qcom-scm", __scm);
+ if (ret < 0)
+ return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
+ }
+
+ __get_convention();
+
+ /*
+ * If requested enable "download mode", from this point on warmboot
+ * will cause the boot stages to enter download mode, unless
+ * disabled below by a clean shutdown/reboot.
+ */
+ if (download_mode)
+ qcom_scm_set_download_mode(true);
+
+ return 0;
+}
+
+static void qcom_scm_shutdown(struct platform_device *pdev)
+{
+ /* Clean shutdown, disable download mode to allow normal restart */
+ qcom_scm_set_download_mode(false);
+}
+
+static const struct of_device_id qcom_scm_dt_match[] = {
+ { .compatible = "qcom,scm" },
+
+ /* Legacy entries kept for backwards compatibility */
+ { .compatible = "qcom,scm-apq8064" },
+ { .compatible = "qcom,scm-apq8084" },
+ { .compatible = "qcom,scm-ipq4019" },
+ { .compatible = "qcom,scm-msm8953" },
+ { .compatible = "qcom,scm-msm8974" },
+ { .compatible = "qcom,scm-msm8996" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
+
+static struct platform_driver qcom_scm_driver = {
+ .driver = {
+ .name = "qcom_scm",
+ .of_match_table = qcom_scm_dt_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = qcom_scm_probe,
+ .shutdown = qcom_scm_shutdown,
+};
+
+static int __init qcom_scm_init(void)
+{
+ return platform_driver_register(&qcom_scm_driver);
+}
+subsys_initcall(qcom_scm_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
new file mode 100644
index 0000000000..e6e512bd57
--- /dev/null
+++ b/drivers/firmware/qcom_scm.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2010-2015,2019 The Linux Foundation. All rights reserved.
+ */
+#ifndef __QCOM_SCM_INT_H
+#define __QCOM_SCM_INT_H
+
+enum qcom_scm_convention {
+ SMC_CONVENTION_UNKNOWN,
+ SMC_CONVENTION_LEGACY,
+ SMC_CONVENTION_ARM_32,
+ SMC_CONVENTION_ARM_64,
+};
+
+extern enum qcom_scm_convention qcom_scm_convention;
+
+#define MAX_QCOM_SCM_ARGS 10
+#define MAX_QCOM_SCM_RETS 3
+
+enum qcom_scm_arg_types {
+ QCOM_SCM_VAL,
+ QCOM_SCM_RO,
+ QCOM_SCM_RW,
+ QCOM_SCM_BUFVAL,
+};
+
+#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
+ (((a) & 0x3) << 4) | \
+ (((b) & 0x3) << 6) | \
+ (((c) & 0x3) << 8) | \
+ (((d) & 0x3) << 10) | \
+ (((e) & 0x3) << 12) | \
+ (((f) & 0x3) << 14) | \
+ (((g) & 0x3) << 16) | \
+ (((h) & 0x3) << 18) | \
+ (((i) & 0x3) << 20) | \
+ (((j) & 0x3) << 22) | \
+ ((num) & 0xf))
+
+#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+
+/**
+ * struct qcom_scm_desc
+ * @arginfo: Metadata describing the arguments in args[]
+ * @args: The array of arguments for the secure syscall
+ */
+struct qcom_scm_desc {
+ u32 svc;
+ u32 cmd;
+ u32 arginfo;
+ u64 args[MAX_QCOM_SCM_ARGS];
+ u32 owner;
+};
+
+/**
+ * struct qcom_scm_res
+ * @result: The values returned by the secure syscall
+ */
+struct qcom_scm_res {
+ u64 result[MAX_QCOM_SCM_RETS];
+};
+
+int qcom_scm_wait_for_wq_completion(u32 wq_ctx);
+int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending);
+
+#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
+extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ enum qcom_scm_convention qcom_convention,
+ struct qcom_scm_res *res, bool atomic);
+#define scm_smc_call(dev, desc, res, atomic) \
+ __scm_smc_call((dev), (desc), qcom_scm_convention, (res), (atomic))
+
+#define SCM_LEGACY_FNID(s, c) (((s) << 10) | ((c) & 0x3ff))
+extern int scm_legacy_call_atomic(struct device *dev,
+ const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res);
+extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res);
+
+#define QCOM_SCM_SVC_BOOT 0x01
+#define QCOM_SCM_BOOT_SET_ADDR 0x01
+#define QCOM_SCM_BOOT_TERMINATE_PC 0x02
+#define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10
+#define QCOM_SCM_BOOT_SET_ADDR_MC 0x11
+#define QCOM_SCM_BOOT_SET_REMOTE_STATE 0x0a
+#define QCOM_SCM_FLUSH_FLAG_MASK 0x3
+#define QCOM_SCM_BOOT_MAX_CPUS 4
+#define QCOM_SCM_BOOT_MC_FLAG_AARCH64 BIT(0)
+#define QCOM_SCM_BOOT_MC_FLAG_COLDBOOT BIT(1)
+#define QCOM_SCM_BOOT_MC_FLAG_WARMBOOT BIT(2)
+
+#define QCOM_SCM_SVC_PIL 0x02
+#define QCOM_SCM_PIL_PAS_INIT_IMAGE 0x01
+#define QCOM_SCM_PIL_PAS_MEM_SETUP 0x02
+#define QCOM_SCM_PIL_PAS_AUTH_AND_RESET 0x05
+#define QCOM_SCM_PIL_PAS_SHUTDOWN 0x06
+#define QCOM_SCM_PIL_PAS_IS_SUPPORTED 0x07
+#define QCOM_SCM_PIL_PAS_MSS_RESET 0x0a
+
+#define QCOM_SCM_SVC_IO 0x05
+#define QCOM_SCM_IO_READ 0x01
+#define QCOM_SCM_IO_WRITE 0x02
+
+#define QCOM_SCM_SVC_INFO 0x06
+#define QCOM_SCM_INFO_IS_CALL_AVAIL 0x01
+
+#define QCOM_SCM_SVC_MP 0x0c
+#define QCOM_SCM_MP_RESTORE_SEC_CFG 0x02
+#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE 0x03
+#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT 0x04
+#define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05
+#define QCOM_SCM_MP_VIDEO_VAR 0x08
+#define QCOM_SCM_MP_ASSIGN 0x16
+
+#define QCOM_SCM_SVC_OCMEM 0x0f
+#define QCOM_SCM_OCMEM_LOCK_CMD 0x01
+#define QCOM_SCM_OCMEM_UNLOCK_CMD 0x02
+
+#define QCOM_SCM_SVC_ES 0x10 /* Enterprise Security */
+#define QCOM_SCM_ES_INVALIDATE_ICE_KEY 0x03
+#define QCOM_SCM_ES_CONFIG_SET_ICE_KEY 0x04
+
+#define QCOM_SCM_SVC_HDCP 0x11
+#define QCOM_SCM_HDCP_INVOKE 0x01
+
+#define QCOM_SCM_SVC_LMH 0x13
+#define QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE 0x01
+#define QCOM_SCM_LMH_LIMIT_DCVSH 0x10
+
+#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
+#define QCOM_SCM_SMMU_PT_FORMAT 0x01
+#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03
+#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02
+
+#define QCOM_SCM_SVC_WAITQ 0x24
+#define QCOM_SCM_WAITQ_RESUME 0x02
+#define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03
+
+/* common error codes */
+#define QCOM_SCM_V2_EBUSY -12
+#define QCOM_SCM_ENOMEM -5
+#define QCOM_SCM_EOPNOTSUPP -4
+#define QCOM_SCM_EINVAL_ADDR -3
+#define QCOM_SCM_EINVAL_ARG -2
+#define QCOM_SCM_ERROR -1
+#define QCOM_SCM_INTERRUPTED 1
+#define QCOM_SCM_WAITQ_SLEEP 2
+
+static inline int qcom_scm_remap_error(int err)
+{
+ switch (err) {
+ case QCOM_SCM_ERROR:
+ return -EIO;
+ case QCOM_SCM_EINVAL_ADDR:
+ case QCOM_SCM_EINVAL_ARG:
+ return -EINVAL;
+ case QCOM_SCM_EOPNOTSUPP:
+ return -EOPNOTSUPP;
+ case QCOM_SCM_ENOMEM:
+ return -ENOMEM;
+ case QCOM_SCM_V2_EBUSY:
+ return -EBUSY;
+ }
+ return -EINVAL;
+}
+
+#endif
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
new file mode 100644
index 0000000000..a69399a6b7
--- /dev/null
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -0,0 +1,940 @@
+/*
+ * drivers/firmware/qemu_fw_cfg.c
+ *
+ * Copyright 2015 Carnegie Mellon University
+ *
+ * Expose entries from QEMU's firmware configuration (fw_cfg) device in
+ * sysfs (read-only, under "/sys/firmware/qemu_fw_cfg/...").
+ *
+ * The fw_cfg device may be instantiated via either an ACPI node (on x86
+ * and select subsets of aarch64), a Device Tree node (on arm), or using
+ * a kernel module (or command line) parameter with the following syntax:
+ *
+ * [qemu_fw_cfg.]ioport=<size>@<base>[:<ctrl_off>:<data_off>[:<dma_off>]]
+ * or
+ * [qemu_fw_cfg.]mmio=<size>@<base>[:<ctrl_off>:<data_off>[:<dma_off>]]
+ *
+ * where:
+ * <size> := size of ioport or mmio range
+ * <base> := physical base address of ioport or mmio range
+ * <ctrl_off> := (optional) offset of control register
+ * <data_off> := (optional) offset of data register
+ * <dma_off> := (optional) offset of dma register
+ *
+ * e.g.:
+ * qemu_fw_cfg.ioport=12@0x510:0:1:4 (the default on x86)
+ * or
+ * qemu_fw_cfg.mmio=16@0x9020000:8:0:16 (the default on arm)
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <uapi/linux/qemu_fw_cfg.h>
+#include <linux/delay.h>
+#include <linux/crash_dump.h>
+#include <linux/crash_core.h>
+
+MODULE_AUTHOR("Gabriel L. Somlo <somlo@cmu.edu>");
+MODULE_DESCRIPTION("QEMU fw_cfg sysfs support");
+MODULE_LICENSE("GPL");
+
+/* fw_cfg revision attribute, in /sys/firmware/qemu_fw_cfg top-level dir. */
+static u32 fw_cfg_rev;
+
+/* fw_cfg device i/o register addresses */
+static bool fw_cfg_is_mmio;
+static phys_addr_t fw_cfg_p_base;
+static resource_size_t fw_cfg_p_size;
+static void __iomem *fw_cfg_dev_base;
+static void __iomem *fw_cfg_reg_ctrl;
+static void __iomem *fw_cfg_reg_data;
+static void __iomem *fw_cfg_reg_dma;
+
+/* atomic access to fw_cfg device (potentially slow i/o, so using mutex) */
+static DEFINE_MUTEX(fw_cfg_dev_lock);
+
+/* pick appropriate endianness for selector key */
+static void fw_cfg_sel_endianness(u16 key)
+{
+ if (fw_cfg_is_mmio)
+ iowrite16be(key, fw_cfg_reg_ctrl);
+ else
+ iowrite16(key, fw_cfg_reg_ctrl);
+}
+
+#ifdef CONFIG_CRASH_CORE
+static inline bool fw_cfg_dma_enabled(void)
+{
+ return (fw_cfg_rev & FW_CFG_VERSION_DMA) && fw_cfg_reg_dma;
+}
+
+/* qemu fw_cfg device is sync today, but spec says it may become async */
+static void fw_cfg_wait_for_control(struct fw_cfg_dma_access *d)
+{
+ for (;;) {
+ u32 ctrl = be32_to_cpu(READ_ONCE(d->control));
+
+ /* do not reorder the read to d->control */
+ rmb();
+ if ((ctrl & ~FW_CFG_DMA_CTL_ERROR) == 0)
+ return;
+
+ cpu_relax();
+ }
+}
+
+static ssize_t fw_cfg_dma_transfer(void *address, u32 length, u32 control)
+{
+ phys_addr_t dma;
+ struct fw_cfg_dma_access *d = NULL;
+ ssize_t ret = length;
+
+ d = kmalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ /* fw_cfg device does not need IOMMU protection, so use physical addresses */
+ *d = (struct fw_cfg_dma_access) {
+ .address = cpu_to_be64(address ? virt_to_phys(address) : 0),
+ .length = cpu_to_be32(length),
+ .control = cpu_to_be32(control)
+ };
+
+ dma = virt_to_phys(d);
+
+ iowrite32be((u64)dma >> 32, fw_cfg_reg_dma);
+ /* force memory to sync before notifying device via MMIO */
+ wmb();
+ iowrite32be(dma, fw_cfg_reg_dma + 4);
+
+ fw_cfg_wait_for_control(d);
+
+ if (be32_to_cpu(READ_ONCE(d->control)) & FW_CFG_DMA_CTL_ERROR) {
+ ret = -EIO;
+ }
+
+end:
+ kfree(d);
+
+ return ret;
+}
+#endif
+
+/* read chunk of given fw_cfg blob (caller responsible for sanity-check) */
+static ssize_t fw_cfg_read_blob(u16 key,
+ void *buf, loff_t pos, size_t count)
+{
+ u32 glk = -1U;
+ acpi_status status;
+
+ /* If we have ACPI, ensure mutual exclusion against any potential
+ * device access by the firmware, e.g. via AML methods:
+ */
+ status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
+ if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
+ /* Should never get here */
+ WARN(1, "fw_cfg_read_blob: Failed to lock ACPI!\n");
+ memset(buf, 0, count);
+ return -EINVAL;
+ }
+
+ mutex_lock(&fw_cfg_dev_lock);
+ fw_cfg_sel_endianness(key);
+ while (pos-- > 0)
+ ioread8(fw_cfg_reg_data);
+ ioread8_rep(fw_cfg_reg_data, buf, count);
+ mutex_unlock(&fw_cfg_dev_lock);
+
+ acpi_release_global_lock(glk);
+ return count;
+}
+
+#ifdef CONFIG_CRASH_CORE
+/* write chunk of given fw_cfg blob (caller responsible for sanity-check) */
+static ssize_t fw_cfg_write_blob(u16 key,
+ void *buf, loff_t pos, size_t count)
+{
+ u32 glk = -1U;
+ acpi_status status;
+ ssize_t ret = count;
+
+ /* If we have ACPI, ensure mutual exclusion against any potential
+ * device access by the firmware, e.g. via AML methods:
+ */
+ status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
+ if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
+ /* Should never get here */
+ WARN(1, "%s: Failed to lock ACPI!\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&fw_cfg_dev_lock);
+ if (pos == 0) {
+ ret = fw_cfg_dma_transfer(buf, count, key << 16
+ | FW_CFG_DMA_CTL_SELECT
+ | FW_CFG_DMA_CTL_WRITE);
+ } else {
+ fw_cfg_sel_endianness(key);
+ ret = fw_cfg_dma_transfer(NULL, pos, FW_CFG_DMA_CTL_SKIP);
+ if (ret < 0)
+ goto end;
+ ret = fw_cfg_dma_transfer(buf, count, FW_CFG_DMA_CTL_WRITE);
+ }
+
+end:
+ mutex_unlock(&fw_cfg_dev_lock);
+
+ acpi_release_global_lock(glk);
+
+ return ret;
+}
+#endif /* CONFIG_CRASH_CORE */
+
+/* clean up fw_cfg device i/o */
+static void fw_cfg_io_cleanup(void)
+{
+ if (fw_cfg_is_mmio) {
+ iounmap(fw_cfg_dev_base);
+ release_mem_region(fw_cfg_p_base, fw_cfg_p_size);
+ } else {
+ ioport_unmap(fw_cfg_dev_base);
+ release_region(fw_cfg_p_base, fw_cfg_p_size);
+ }
+}
+
+/* arch-specific ctrl & data register offsets are not available in ACPI, DT */
+#if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF))
+# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
+# define FW_CFG_CTRL_OFF 0x08
+# define FW_CFG_DATA_OFF 0x00
+# define FW_CFG_DMA_OFF 0x10
+# elif defined(CONFIG_PARISC) /* parisc */
+# define FW_CFG_CTRL_OFF 0x00
+# define FW_CFG_DATA_OFF 0x04
+# elif (defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC32)) /* ppc/mac,sun4m */
+# define FW_CFG_CTRL_OFF 0x00
+# define FW_CFG_DATA_OFF 0x02
+# elif (defined(CONFIG_X86) || defined(CONFIG_SPARC64)) /* x86, sun4u */
+# define FW_CFG_CTRL_OFF 0x00
+# define FW_CFG_DATA_OFF 0x01
+# define FW_CFG_DMA_OFF 0x04
+# else
+# error "QEMU FW_CFG not available on this architecture!"
+# endif
+#endif
+
+/* initialize fw_cfg device i/o from platform data */
+static int fw_cfg_do_platform_probe(struct platform_device *pdev)
+{
+ char sig[FW_CFG_SIG_SIZE];
+ struct resource *range, *ctrl, *data, *dma;
+
+ /* acquire i/o range details */
+ fw_cfg_is_mmio = false;
+ range = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!range) {
+ fw_cfg_is_mmio = true;
+ range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!range)
+ return -EINVAL;
+ }
+ fw_cfg_p_base = range->start;
+ fw_cfg_p_size = resource_size(range);
+
+ if (fw_cfg_is_mmio) {
+ if (!request_mem_region(fw_cfg_p_base,
+ fw_cfg_p_size, "fw_cfg_mem"))
+ return -EBUSY;
+ fw_cfg_dev_base = ioremap(fw_cfg_p_base, fw_cfg_p_size);
+ if (!fw_cfg_dev_base) {
+ release_mem_region(fw_cfg_p_base, fw_cfg_p_size);
+ return -EFAULT;
+ }
+ } else {
+ if (!request_region(fw_cfg_p_base,
+ fw_cfg_p_size, "fw_cfg_io"))
+ return -EBUSY;
+ fw_cfg_dev_base = ioport_map(fw_cfg_p_base, fw_cfg_p_size);
+ if (!fw_cfg_dev_base) {
+ release_region(fw_cfg_p_base, fw_cfg_p_size);
+ return -EFAULT;
+ }
+ }
+
+ /* were custom register offsets provided (e.g. on the command line)? */
+ ctrl = platform_get_resource_byname(pdev, IORESOURCE_REG, "ctrl");
+ data = platform_get_resource_byname(pdev, IORESOURCE_REG, "data");
+ dma = platform_get_resource_byname(pdev, IORESOURCE_REG, "dma");
+ if (ctrl && data) {
+ fw_cfg_reg_ctrl = fw_cfg_dev_base + ctrl->start;
+ fw_cfg_reg_data = fw_cfg_dev_base + data->start;
+ } else {
+ /* use architecture-specific offsets */
+ fw_cfg_reg_ctrl = fw_cfg_dev_base + FW_CFG_CTRL_OFF;
+ fw_cfg_reg_data = fw_cfg_dev_base + FW_CFG_DATA_OFF;
+ }
+
+ if (dma)
+ fw_cfg_reg_dma = fw_cfg_dev_base + dma->start;
+#ifdef FW_CFG_DMA_OFF
+ else
+ fw_cfg_reg_dma = fw_cfg_dev_base + FW_CFG_DMA_OFF;
+#endif
+
+ /* verify fw_cfg device signature */
+ if (fw_cfg_read_blob(FW_CFG_SIGNATURE, sig,
+ 0, FW_CFG_SIG_SIZE) < 0 ||
+ memcmp(sig, "QEMU", FW_CFG_SIG_SIZE) != 0) {
+ fw_cfg_io_cleanup();
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static ssize_t fw_cfg_showrev(struct kobject *k, struct kobj_attribute *a,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", fw_cfg_rev);
+}
+
+static const struct kobj_attribute fw_cfg_rev_attr = {
+ .attr = { .name = "rev", .mode = S_IRUSR },
+ .show = fw_cfg_showrev,
+};
+
+/* fw_cfg_sysfs_entry type */
+struct fw_cfg_sysfs_entry {
+ struct kobject kobj;
+ u32 size;
+ u16 select;
+ char name[FW_CFG_MAX_FILE_PATH];
+ struct list_head list;
+};
+
+#ifdef CONFIG_CRASH_CORE
+static ssize_t fw_cfg_write_vmcoreinfo(const struct fw_cfg_file *f)
+{
+ static struct fw_cfg_vmcoreinfo *data;
+ ssize_t ret;
+
+ data = kmalloc(sizeof(struct fw_cfg_vmcoreinfo), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ *data = (struct fw_cfg_vmcoreinfo) {
+ .guest_format = cpu_to_le16(FW_CFG_VMCOREINFO_FORMAT_ELF),
+ .size = cpu_to_le32(VMCOREINFO_NOTE_SIZE),
+ .paddr = cpu_to_le64(paddr_vmcoreinfo_note())
+ };
+ /* spare ourself reading host format support for now since we
+ * don't know what else to format - host may ignore ours
+ */
+ ret = fw_cfg_write_blob(be16_to_cpu(f->select), data,
+ 0, sizeof(struct fw_cfg_vmcoreinfo));
+
+ kfree(data);
+ return ret;
+}
+#endif /* CONFIG_CRASH_CORE */
+
+/* get fw_cfg_sysfs_entry from kobject member */
+static inline struct fw_cfg_sysfs_entry *to_entry(struct kobject *kobj)
+{
+ return container_of(kobj, struct fw_cfg_sysfs_entry, kobj);
+}
+
+/* fw_cfg_sysfs_attribute type */
+struct fw_cfg_sysfs_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct fw_cfg_sysfs_entry *entry, char *buf);
+};
+
+/* get fw_cfg_sysfs_attribute from attribute member */
+static inline struct fw_cfg_sysfs_attribute *to_attr(struct attribute *attr)
+{
+ return container_of(attr, struct fw_cfg_sysfs_attribute, attr);
+}
+
+/* global cache of fw_cfg_sysfs_entry objects */
+static LIST_HEAD(fw_cfg_entry_cache);
+
+/* kobjects removed lazily by kernel, mutual exclusion needed */
+static DEFINE_SPINLOCK(fw_cfg_cache_lock);
+
+static inline void fw_cfg_sysfs_cache_enlist(struct fw_cfg_sysfs_entry *entry)
+{
+ spin_lock(&fw_cfg_cache_lock);
+ list_add_tail(&entry->list, &fw_cfg_entry_cache);
+ spin_unlock(&fw_cfg_cache_lock);
+}
+
+static inline void fw_cfg_sysfs_cache_delist(struct fw_cfg_sysfs_entry *entry)
+{
+ spin_lock(&fw_cfg_cache_lock);
+ list_del(&entry->list);
+ spin_unlock(&fw_cfg_cache_lock);
+}
+
+static void fw_cfg_sysfs_cache_cleanup(void)
+{
+ struct fw_cfg_sysfs_entry *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) {
+ fw_cfg_sysfs_cache_delist(entry);
+ kobject_del(&entry->kobj);
+ kobject_put(&entry->kobj);
+ }
+}
+
+/* per-entry attributes and show methods */
+
+#define FW_CFG_SYSFS_ATTR(_attr) \
+struct fw_cfg_sysfs_attribute fw_cfg_sysfs_attr_##_attr = { \
+ .attr = { .name = __stringify(_attr), .mode = S_IRUSR }, \
+ .show = fw_cfg_sysfs_show_##_attr, \
+}
+
+static ssize_t fw_cfg_sysfs_show_size(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+ return sprintf(buf, "%u\n", e->size);
+}
+
+static ssize_t fw_cfg_sysfs_show_key(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+ return sprintf(buf, "%u\n", e->select);
+}
+
+static ssize_t fw_cfg_sysfs_show_name(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+ return sprintf(buf, "%s\n", e->name);
+}
+
+static FW_CFG_SYSFS_ATTR(size);
+static FW_CFG_SYSFS_ATTR(key);
+static FW_CFG_SYSFS_ATTR(name);
+
+static struct attribute *fw_cfg_sysfs_entry_attrs[] = {
+ &fw_cfg_sysfs_attr_size.attr,
+ &fw_cfg_sysfs_attr_key.attr,
+ &fw_cfg_sysfs_attr_name.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(fw_cfg_sysfs_entry);
+
+/* sysfs_ops: find fw_cfg_[entry, attribute] and call appropriate show method */
+static ssize_t fw_cfg_sysfs_attr_show(struct kobject *kobj, struct attribute *a,
+ char *buf)
+{
+ struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+ struct fw_cfg_sysfs_attribute *attr = to_attr(a);
+
+ return attr->show(entry, buf);
+}
+
+static const struct sysfs_ops fw_cfg_sysfs_attr_ops = {
+ .show = fw_cfg_sysfs_attr_show,
+};
+
+/* release: destructor, to be called via kobject_put() */
+static void fw_cfg_sysfs_release_entry(struct kobject *kobj)
+{
+ struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+
+ kfree(entry);
+}
+
+/* kobj_type: ties together all properties required to register an entry */
+static struct kobj_type fw_cfg_sysfs_entry_ktype = {
+ .default_groups = fw_cfg_sysfs_entry_groups,
+ .sysfs_ops = &fw_cfg_sysfs_attr_ops,
+ .release = fw_cfg_sysfs_release_entry,
+};
+
+/* raw-read method and attribute */
+static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+
+ if (pos > entry->size)
+ return -EINVAL;
+
+ if (count > entry->size - pos)
+ count = entry->size - pos;
+
+ return fw_cfg_read_blob(entry->select, buf, pos, count);
+}
+
+static struct bin_attribute fw_cfg_sysfs_attr_raw = {
+ .attr = { .name = "raw", .mode = S_IRUSR },
+ .read = fw_cfg_sysfs_read_raw,
+};
+
+/*
+ * Create a kset subdirectory matching each '/' delimited dirname token
+ * in 'name', starting with sysfs kset/folder 'dir'; At the end, create
+ * a symlink directed at the given 'target'.
+ * NOTE: We do this on a best-effort basis, since 'name' is not guaranteed
+ * to be a well-behaved path name. Whenever a symlink vs. kset directory
+ * name collision occurs, the kernel will issue big scary warnings while
+ * refusing to add the offending link or directory. We follow up with our
+ * own, slightly less scary error messages explaining the situation :)
+ */
+static int fw_cfg_build_symlink(struct kset *dir,
+ struct kobject *target, const char *name)
+{
+ int ret;
+ struct kset *subdir;
+ struct kobject *ko;
+ char *name_copy, *p, *tok;
+
+ if (!dir || !target || !name || !*name)
+ return -EINVAL;
+
+ /* clone a copy of name for parsing */
+ name_copy = p = kstrdup(name, GFP_KERNEL);
+ if (!name_copy)
+ return -ENOMEM;
+
+ /* create folders for each dirname token, then symlink for basename */
+ while ((tok = strsep(&p, "/")) && *tok) {
+
+ /* last (basename) token? If so, add symlink here */
+ if (!p || !*p) {
+ ret = sysfs_create_link(&dir->kobj, target, tok);
+ break;
+ }
+
+ /* does the current dir contain an item named after tok ? */
+ ko = kset_find_obj(dir, tok);
+ if (ko) {
+ /* drop reference added by kset_find_obj */
+ kobject_put(ko);
+
+ /* ko MUST be a kset - we're about to use it as one ! */
+ if (ko->ktype != dir->kobj.ktype) {
+ ret = -EINVAL;
+ break;
+ }
+
+ /* descend into already existing subdirectory */
+ dir = to_kset(ko);
+ } else {
+ /* create new subdirectory kset */
+ subdir = kzalloc(sizeof(struct kset), GFP_KERNEL);
+ if (!subdir) {
+ ret = -ENOMEM;
+ break;
+ }
+ subdir->kobj.kset = dir;
+ subdir->kobj.ktype = dir->kobj.ktype;
+ ret = kobject_set_name(&subdir->kobj, "%s", tok);
+ if (ret) {
+ kfree(subdir);
+ break;
+ }
+ ret = kset_register(subdir);
+ if (ret) {
+ kfree(subdir);
+ break;
+ }
+
+ /* descend into newly created subdirectory */
+ dir = subdir;
+ }
+ }
+
+ /* we're done with cloned copy of name */
+ kfree(name_copy);
+ return ret;
+}
+
+/* recursively unregister fw_cfg/by_name/ kset directory tree */
+static void fw_cfg_kset_unregister_recursive(struct kset *kset)
+{
+ struct kobject *k, *next;
+
+ list_for_each_entry_safe(k, next, &kset->list, entry)
+ /* all set members are ksets too, but check just in case... */
+ if (k->ktype == kset->kobj.ktype)
+ fw_cfg_kset_unregister_recursive(to_kset(k));
+
+ /* symlinks are cleanly and automatically removed with the directory */
+ kset_unregister(kset);
+}
+
+/* kobjects & kset representing top-level, by_key, and by_name folders */
+static struct kobject *fw_cfg_top_ko;
+static struct kobject *fw_cfg_sel_ko;
+static struct kset *fw_cfg_fname_kset;
+
+/* register an individual fw_cfg file */
+static int fw_cfg_register_file(const struct fw_cfg_file *f)
+{
+ int err;
+ struct fw_cfg_sysfs_entry *entry;
+
+#ifdef CONFIG_CRASH_CORE
+ if (fw_cfg_dma_enabled() &&
+ strcmp(f->name, FW_CFG_VMCOREINFO_FILENAME) == 0 &&
+ !is_kdump_kernel()) {
+ if (fw_cfg_write_vmcoreinfo(f) < 0)
+ pr_warn("fw_cfg: failed to write vmcoreinfo");
+ }
+#endif
+
+ /* allocate new entry */
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ /* set file entry information */
+ entry->size = be32_to_cpu(f->size);
+ entry->select = be16_to_cpu(f->select);
+ strscpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH);
+
+ /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
+ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
+ fw_cfg_sel_ko, "%d", entry->select);
+ if (err)
+ goto err_put_entry;
+
+ /* add raw binary content access */
+ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
+ if (err)
+ goto err_del_entry;
+
+ /* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */
+ fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name);
+
+ /* success, add entry to global cache */
+ fw_cfg_sysfs_cache_enlist(entry);
+ return 0;
+
+err_del_entry:
+ kobject_del(&entry->kobj);
+err_put_entry:
+ kobject_put(&entry->kobj);
+ return err;
+}
+
+/* iterate over all fw_cfg directory entries, registering each one */
+static int fw_cfg_register_dir_entries(void)
+{
+ int ret = 0;
+ __be32 files_count;
+ u32 count, i;
+ struct fw_cfg_file *dir;
+ size_t dir_size;
+
+ ret = fw_cfg_read_blob(FW_CFG_FILE_DIR, &files_count,
+ 0, sizeof(files_count));
+ if (ret < 0)
+ return ret;
+
+ count = be32_to_cpu(files_count);
+ dir_size = count * sizeof(struct fw_cfg_file);
+
+ dir = kmalloc(dir_size, GFP_KERNEL);
+ if (!dir)
+ return -ENOMEM;
+
+ ret = fw_cfg_read_blob(FW_CFG_FILE_DIR, dir,
+ sizeof(files_count), dir_size);
+ if (ret < 0)
+ goto end;
+
+ for (i = 0; i < count; i++) {
+ ret = fw_cfg_register_file(&dir[i]);
+ if (ret)
+ break;
+ }
+
+end:
+ kfree(dir);
+ return ret;
+}
+
+/* unregister top-level or by_key folder */
+static inline void fw_cfg_kobj_cleanup(struct kobject *kobj)
+{
+ kobject_del(kobj);
+ kobject_put(kobj);
+}
+
+static int fw_cfg_sysfs_probe(struct platform_device *pdev)
+{
+ int err;
+ __le32 rev;
+
+ /* NOTE: If we supported multiple fw_cfg devices, we'd first create
+ * a subdirectory named after e.g. pdev->id, then hang per-device
+ * by_key (and by_name) subdirectories underneath it. However, only
+ * one fw_cfg device exist system-wide, so if one was already found
+ * earlier, we might as well stop here.
+ */
+ if (fw_cfg_sel_ko)
+ return -EBUSY;
+
+ /* create by_key and by_name subdirs of /sys/firmware/qemu_fw_cfg/ */
+ err = -ENOMEM;
+ fw_cfg_sel_ko = kobject_create_and_add("by_key", fw_cfg_top_ko);
+ if (!fw_cfg_sel_ko)
+ goto err_sel;
+ fw_cfg_fname_kset = kset_create_and_add("by_name", NULL, fw_cfg_top_ko);
+ if (!fw_cfg_fname_kset)
+ goto err_name;
+
+ /* initialize fw_cfg device i/o from platform data */
+ err = fw_cfg_do_platform_probe(pdev);
+ if (err)
+ goto err_probe;
+
+ /* get revision number, add matching top-level attribute */
+ err = fw_cfg_read_blob(FW_CFG_ID, &rev, 0, sizeof(rev));
+ if (err < 0)
+ goto err_probe;
+
+ fw_cfg_rev = le32_to_cpu(rev);
+ err = sysfs_create_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+ if (err)
+ goto err_rev;
+
+ /* process fw_cfg file directory entry, registering each file */
+ err = fw_cfg_register_dir_entries();
+ if (err)
+ goto err_dir;
+
+ /* success */
+ pr_debug("fw_cfg: loaded.\n");
+ return 0;
+
+err_dir:
+ fw_cfg_sysfs_cache_cleanup();
+ sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+err_rev:
+ fw_cfg_io_cleanup();
+err_probe:
+ fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
+err_name:
+ fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
+err_sel:
+ return err;
+}
+
+static int fw_cfg_sysfs_remove(struct platform_device *pdev)
+{
+ pr_debug("fw_cfg: unloading.\n");
+ fw_cfg_sysfs_cache_cleanup();
+ sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+ fw_cfg_io_cleanup();
+ fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
+ fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
+ return 0;
+}
+
+static const struct of_device_id fw_cfg_sysfs_mmio_match[] = {
+ { .compatible = "qemu,fw-cfg-mmio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fw_cfg_sysfs_mmio_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id fw_cfg_sysfs_acpi_match[] = {
+ { FW_CFG_ACPI_DEVICE_ID, },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, fw_cfg_sysfs_acpi_match);
+#endif
+
+static struct platform_driver fw_cfg_sysfs_driver = {
+ .probe = fw_cfg_sysfs_probe,
+ .remove = fw_cfg_sysfs_remove,
+ .driver = {
+ .name = "fw_cfg",
+ .of_match_table = fw_cfg_sysfs_mmio_match,
+ .acpi_match_table = ACPI_PTR(fw_cfg_sysfs_acpi_match),
+ },
+};
+
+#ifdef CONFIG_FW_CFG_SYSFS_CMDLINE
+
+static struct platform_device *fw_cfg_cmdline_dev;
+
+/* this probably belongs in e.g. include/linux/types.h,
+ * but right now we are the only ones doing it...
+ */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define __PHYS_ADDR_PREFIX "ll"
+#else
+#define __PHYS_ADDR_PREFIX ""
+#endif
+
+/* use special scanf/printf modifier for phys_addr_t, resource_size_t */
+#define PH_ADDR_SCAN_FMT "@%" __PHYS_ADDR_PREFIX "i%n" \
+ ":%" __PHYS_ADDR_PREFIX "i" \
+ ":%" __PHYS_ADDR_PREFIX "i%n" \
+ ":%" __PHYS_ADDR_PREFIX "i%n"
+
+#define PH_ADDR_PR_1_FMT "0x%" __PHYS_ADDR_PREFIX "x@" \
+ "0x%" __PHYS_ADDR_PREFIX "x"
+
+#define PH_ADDR_PR_3_FMT PH_ADDR_PR_1_FMT \
+ ":%" __PHYS_ADDR_PREFIX "u" \
+ ":%" __PHYS_ADDR_PREFIX "u"
+
+#define PH_ADDR_PR_4_FMT PH_ADDR_PR_3_FMT \
+ ":%" __PHYS_ADDR_PREFIX "u"
+
+static int fw_cfg_cmdline_set(const char *arg, const struct kernel_param *kp)
+{
+ struct resource res[4] = {};
+ char *str;
+ phys_addr_t base;
+ resource_size_t size, ctrl_off, data_off, dma_off;
+ int processed, consumed = 0;
+
+ /* only one fw_cfg device can exist system-wide, so if one
+ * was processed on the command line already, we might as
+ * well stop here.
+ */
+ if (fw_cfg_cmdline_dev) {
+ /* avoid leaking previously registered device */
+ platform_device_unregister(fw_cfg_cmdline_dev);
+ return -EINVAL;
+ }
+
+ /* consume "<size>" portion of command line argument */
+ size = memparse(arg, &str);
+
+ /* get "@<base>[:<ctrl_off>:<data_off>[:<dma_off>]]" chunks */
+ processed = sscanf(str, PH_ADDR_SCAN_FMT,
+ &base, &consumed,
+ &ctrl_off, &data_off, &consumed,
+ &dma_off, &consumed);
+
+ /* sscanf() must process precisely 1, 3 or 4 chunks:
+ * <base> is mandatory, optionally followed by <ctrl_off>
+ * and <data_off>, and <dma_off>;
+ * there must be no extra characters after the last chunk,
+ * so str[consumed] must be '\0'.
+ */
+ if (str[consumed] ||
+ (processed != 1 && processed != 3 && processed != 4))
+ return -EINVAL;
+
+ res[0].start = base;
+ res[0].end = base + size - 1;
+ res[0].flags = !strcmp(kp->name, "mmio") ? IORESOURCE_MEM :
+ IORESOURCE_IO;
+
+ /* insert register offsets, if provided */
+ if (processed > 1) {
+ res[1].name = "ctrl";
+ res[1].start = ctrl_off;
+ res[1].flags = IORESOURCE_REG;
+ res[2].name = "data";
+ res[2].start = data_off;
+ res[2].flags = IORESOURCE_REG;
+ }
+ if (processed > 3) {
+ res[3].name = "dma";
+ res[3].start = dma_off;
+ res[3].flags = IORESOURCE_REG;
+ }
+
+ /* "processed" happens to nicely match the number of resources
+ * we need to pass in to this platform device.
+ */
+ fw_cfg_cmdline_dev = platform_device_register_simple("fw_cfg",
+ PLATFORM_DEVID_NONE, res, processed);
+
+ return PTR_ERR_OR_ZERO(fw_cfg_cmdline_dev);
+}
+
+static int fw_cfg_cmdline_get(char *buf, const struct kernel_param *kp)
+{
+ /* stay silent if device was not configured via the command
+ * line, or if the parameter name (ioport/mmio) doesn't match
+ * the device setting
+ */
+ if (!fw_cfg_cmdline_dev ||
+ (!strcmp(kp->name, "mmio") ^
+ (fw_cfg_cmdline_dev->resource[0].flags == IORESOURCE_MEM)))
+ return 0;
+
+ switch (fw_cfg_cmdline_dev->num_resources) {
+ case 1:
+ return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_1_FMT,
+ resource_size(&fw_cfg_cmdline_dev->resource[0]),
+ fw_cfg_cmdline_dev->resource[0].start);
+ case 3:
+ return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_3_FMT,
+ resource_size(&fw_cfg_cmdline_dev->resource[0]),
+ fw_cfg_cmdline_dev->resource[0].start,
+ fw_cfg_cmdline_dev->resource[1].start,
+ fw_cfg_cmdline_dev->resource[2].start);
+ case 4:
+ return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_4_FMT,
+ resource_size(&fw_cfg_cmdline_dev->resource[0]),
+ fw_cfg_cmdline_dev->resource[0].start,
+ fw_cfg_cmdline_dev->resource[1].start,
+ fw_cfg_cmdline_dev->resource[2].start,
+ fw_cfg_cmdline_dev->resource[3].start);
+ }
+
+ /* Should never get here */
+ WARN(1, "Unexpected number of resources: %d\n",
+ fw_cfg_cmdline_dev->num_resources);
+ return 0;
+}
+
+static const struct kernel_param_ops fw_cfg_cmdline_param_ops = {
+ .set = fw_cfg_cmdline_set,
+ .get = fw_cfg_cmdline_get,
+};
+
+device_param_cb(ioport, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
+device_param_cb(mmio, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
+
+#endif /* CONFIG_FW_CFG_SYSFS_CMDLINE */
+
+static int __init fw_cfg_sysfs_init(void)
+{
+ int ret;
+
+ /* create /sys/firmware/qemu_fw_cfg/ top level directory */
+ fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj);
+ if (!fw_cfg_top_ko)
+ return -ENOMEM;
+
+ ret = platform_driver_register(&fw_cfg_sysfs_driver);
+ if (ret)
+ fw_cfg_kobj_cleanup(fw_cfg_top_ko);
+
+ return ret;
+}
+
+static void __exit fw_cfg_sysfs_exit(void)
+{
+ platform_driver_unregister(&fw_cfg_sysfs_driver);
+
+#ifdef CONFIG_FW_CFG_SYSFS_CMDLINE
+ platform_device_unregister(fw_cfg_cmdline_dev);
+#endif
+
+ /* clean up /sys/firmware/qemu_fw_cfg/ */
+ fw_cfg_kobj_cleanup(fw_cfg_top_ko);
+}
+
+module_init(fw_cfg_sysfs_init);
+module_exit(fw_cfg_sysfs_exit);
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
new file mode 100644
index 0000000000..f66efaa519
--- /dev/null
+++ b/drivers/firmware/raspberrypi.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Defines interfaces for interacting with the Raspberry Pi firmware's
+ * property channel.
+ *
+ * Copyright © 2015 Broadcom
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/kref.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <soc/bcm2835/raspberrypi-firmware.h>
+
+#define MBOX_MSG(chan, data28) (((data28) & ~0xf) | ((chan) & 0xf))
+#define MBOX_CHAN(msg) ((msg) & 0xf)
+#define MBOX_DATA28(msg) ((msg) & ~0xf)
+#define MBOX_CHAN_PROPERTY 8
+
+static struct platform_device *rpi_hwmon;
+static struct platform_device *rpi_clk;
+
+struct rpi_firmware {
+ struct mbox_client cl;
+ struct mbox_chan *chan; /* The property channel. */
+ struct completion c;
+ u32 enabled;
+
+ struct kref consumers;
+};
+
+static DEFINE_MUTEX(transaction_lock);
+
+static void response_callback(struct mbox_client *cl, void *msg)
+{
+ struct rpi_firmware *fw = container_of(cl, struct rpi_firmware, cl);
+ complete(&fw->c);
+}
+
+/*
+ * Sends a request to the firmware through the BCM2835 mailbox driver,
+ * and synchronously waits for the reply.
+ */
+static int
+rpi_firmware_transaction(struct rpi_firmware *fw, u32 chan, u32 data)
+{
+ u32 message = MBOX_MSG(chan, data);
+ int ret;
+
+ WARN_ON(data & 0xf);
+
+ mutex_lock(&transaction_lock);
+ reinit_completion(&fw->c);
+ ret = mbox_send_message(fw->chan, &message);
+ if (ret >= 0) {
+ if (wait_for_completion_timeout(&fw->c, HZ)) {
+ ret = 0;
+ } else {
+ ret = -ETIMEDOUT;
+ WARN_ONCE(1, "Firmware transaction timeout");
+ }
+ } else {
+ dev_err(fw->cl.dev, "mbox_send_message returned %d\n", ret);
+ }
+ mutex_unlock(&transaction_lock);
+
+ return ret;
+}
+
+/**
+ * rpi_firmware_property_list - Submit firmware property list
+ * @fw: Pointer to firmware structure from rpi_firmware_get().
+ * @data: Buffer holding tags.
+ * @tag_size: Size of tags buffer.
+ *
+ * Submits a set of concatenated tags to the VPU firmware through the
+ * mailbox property interface.
+ *
+ * The buffer header and the ending tag are added by this function and
+ * don't need to be supplied, just the actual tags for your operation.
+ * See struct rpi_firmware_property_tag_header for the per-tag
+ * structure.
+ */
+int rpi_firmware_property_list(struct rpi_firmware *fw,
+ void *data, size_t tag_size)
+{
+ size_t size = tag_size + 12;
+ u32 *buf;
+ dma_addr_t bus_addr;
+ int ret;
+
+ /* Packets are processed a dword at a time. */
+ if (size & 3)
+ return -EINVAL;
+
+ buf = dma_alloc_coherent(fw->cl.dev, PAGE_ALIGN(size), &bus_addr,
+ GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ /* The firmware will error out without parsing in this case. */
+ WARN_ON(size >= 1024 * 1024);
+
+ buf[0] = size;
+ buf[1] = RPI_FIRMWARE_STATUS_REQUEST;
+ memcpy(&buf[2], data, tag_size);
+ buf[size / 4 - 1] = RPI_FIRMWARE_PROPERTY_END;
+ wmb();
+
+ ret = rpi_firmware_transaction(fw, MBOX_CHAN_PROPERTY, bus_addr);
+
+ rmb();
+ memcpy(data, &buf[2], tag_size);
+ if (ret == 0 && buf[1] != RPI_FIRMWARE_STATUS_SUCCESS) {
+ /*
+ * The tag name here might not be the one causing the
+ * error, if there were multiple tags in the request.
+ * But single-tag is the most common, so go with it.
+ */
+ dev_err(fw->cl.dev, "Request 0x%08x returned status 0x%08x\n",
+ buf[2], buf[1]);
+ ret = -EINVAL;
+ }
+
+ dma_free_coherent(fw->cl.dev, PAGE_ALIGN(size), buf, bus_addr);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_property_list);
+
+/**
+ * rpi_firmware_property - Submit single firmware property
+ * @fw: Pointer to firmware structure from rpi_firmware_get().
+ * @tag: One of enum_mbox_property_tag.
+ * @tag_data: Tag data buffer.
+ * @buf_size: Buffer size.
+ *
+ * Submits a single tag to the VPU firmware through the mailbox
+ * property interface.
+ *
+ * This is a convenience wrapper around
+ * rpi_firmware_property_list() to avoid some of the
+ * boilerplate in property calls.
+ */
+int rpi_firmware_property(struct rpi_firmware *fw,
+ u32 tag, void *tag_data, size_t buf_size)
+{
+ struct rpi_firmware_property_tag_header *header;
+ int ret;
+
+ /* Some mailboxes can use over 1k bytes. Rather than checking
+ * size and using stack or kmalloc depending on requirements,
+ * just use kmalloc. Mailboxes don't get called enough to worry
+ * too much about the time taken in the allocation.
+ */
+ void *data = kmalloc(sizeof(*header) + buf_size, GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ header = data;
+ header->tag = tag;
+ header->buf_size = buf_size;
+ header->req_resp_size = 0;
+ memcpy(data + sizeof(*header), tag_data, buf_size);
+
+ ret = rpi_firmware_property_list(fw, data, buf_size + sizeof(*header));
+
+ memcpy(tag_data, data + sizeof(*header), buf_size);
+
+ kfree(data);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_property);
+
+static void
+rpi_firmware_print_firmware_revision(struct rpi_firmware *fw)
+{
+ time64_t date_and_time;
+ u32 packet;
+ int ret = rpi_firmware_property(fw,
+ RPI_FIRMWARE_GET_FIRMWARE_REVISION,
+ &packet, sizeof(packet));
+
+ if (ret)
+ return;
+
+ /* This is not compatible with y2038 */
+ date_and_time = packet;
+ dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &date_and_time);
+}
+
+static void
+rpi_register_hwmon_driver(struct device *dev, struct rpi_firmware *fw)
+{
+ u32 packet;
+ int ret = rpi_firmware_property(fw, RPI_FIRMWARE_GET_THROTTLED,
+ &packet, sizeof(packet));
+
+ if (ret)
+ return;
+
+ rpi_hwmon = platform_device_register_data(dev, "raspberrypi-hwmon",
+ -1, NULL, 0);
+}
+
+static void rpi_register_clk_driver(struct device *dev)
+{
+ struct device_node *firmware;
+
+ /*
+ * Earlier DTs don't have a node for the firmware clocks but
+ * rely on us creating a platform device by hand. If we do
+ * have a node for the firmware clocks, just bail out here.
+ */
+ firmware = of_get_compatible_child(dev->of_node,
+ "raspberrypi,firmware-clocks");
+ if (firmware) {
+ of_node_put(firmware);
+ return;
+ }
+
+ rpi_clk = platform_device_register_data(dev, "raspberrypi-clk",
+ -1, NULL, 0);
+}
+
+unsigned int rpi_firmware_clk_get_max_rate(struct rpi_firmware *fw, unsigned int id)
+{
+ struct rpi_firmware_clk_rate_request msg =
+ RPI_FIRMWARE_CLK_RATE_REQUEST(id);
+ int ret;
+
+ ret = rpi_firmware_property(fw, RPI_FIRMWARE_GET_MAX_CLOCK_RATE,
+ &msg, sizeof(msg));
+ if (ret)
+ /*
+ * If our firmware doesn't support that operation, or fails, we
+ * assume the maximum clock rate is absolute maximum we can
+ * store over our type.
+ */
+ return UINT_MAX;
+
+ return le32_to_cpu(msg.rate);
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_clk_get_max_rate);
+
+static void rpi_firmware_delete(struct kref *kref)
+{
+ struct rpi_firmware *fw = container_of(kref, struct rpi_firmware,
+ consumers);
+
+ mbox_free_channel(fw->chan);
+ kfree(fw);
+}
+
+void rpi_firmware_put(struct rpi_firmware *fw)
+{
+ kref_put(&fw->consumers, rpi_firmware_delete);
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_put);
+
+static void devm_rpi_firmware_put(void *data)
+{
+ struct rpi_firmware *fw = data;
+
+ rpi_firmware_put(fw);
+}
+
+static int rpi_firmware_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpi_firmware *fw;
+
+ /*
+ * Memory will be freed by rpi_firmware_delete() once all users have
+ * released their firmware handles. Don't use devm_kzalloc() here.
+ */
+ fw = kzalloc(sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
+
+ fw->cl.dev = dev;
+ fw->cl.rx_callback = response_callback;
+ fw->cl.tx_block = true;
+
+ fw->chan = mbox_request_channel(&fw->cl, 0);
+ if (IS_ERR(fw->chan)) {
+ int ret = PTR_ERR(fw->chan);
+ kfree(fw);
+ return dev_err_probe(dev, ret, "Failed to get mbox channel\n");
+ }
+
+ init_completion(&fw->c);
+ kref_init(&fw->consumers);
+
+ platform_set_drvdata(pdev, fw);
+
+ rpi_firmware_print_firmware_revision(fw);
+ rpi_register_hwmon_driver(dev, fw);
+ rpi_register_clk_driver(dev);
+
+ return 0;
+}
+
+static void rpi_firmware_shutdown(struct platform_device *pdev)
+{
+ struct rpi_firmware *fw = platform_get_drvdata(pdev);
+
+ if (!fw)
+ return;
+
+ rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_REBOOT, NULL, 0);
+}
+
+static int rpi_firmware_remove(struct platform_device *pdev)
+{
+ struct rpi_firmware *fw = platform_get_drvdata(pdev);
+
+ platform_device_unregister(rpi_hwmon);
+ rpi_hwmon = NULL;
+ platform_device_unregister(rpi_clk);
+ rpi_clk = NULL;
+
+ rpi_firmware_put(fw);
+
+ return 0;
+}
+
+static const struct of_device_id rpi_firmware_of_match[] = {
+ { .compatible = "raspberrypi,bcm2835-firmware", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rpi_firmware_of_match);
+
+struct device_node *rpi_firmware_find_node(void)
+{
+ return of_find_matching_node(NULL, rpi_firmware_of_match);
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_find_node);
+
+/**
+ * rpi_firmware_get - Get pointer to rpi_firmware structure.
+ * @firmware_node: Pointer to the firmware Device Tree node.
+ *
+ * The reference to rpi_firmware has to be released with rpi_firmware_put().
+ *
+ * Returns NULL is the firmware device is not ready.
+ */
+struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
+{
+ struct platform_device *pdev = of_find_device_by_node(firmware_node);
+ struct rpi_firmware *fw;
+
+ if (!pdev)
+ return NULL;
+
+ fw = platform_get_drvdata(pdev);
+ if (!fw)
+ goto err_put_device;
+
+ if (!kref_get_unless_zero(&fw->consumers))
+ goto err_put_device;
+
+ put_device(&pdev->dev);
+
+ return fw;
+
+err_put_device:
+ put_device(&pdev->dev);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_get);
+
+/**
+ * devm_rpi_firmware_get - Get pointer to rpi_firmware structure.
+ * @firmware_node: Pointer to the firmware Device Tree node.
+ *
+ * Returns NULL is the firmware device is not ready.
+ */
+struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
+ struct device_node *firmware_node)
+{
+ struct rpi_firmware *fw;
+
+ fw = rpi_firmware_get(firmware_node);
+ if (!fw)
+ return NULL;
+
+ if (devm_add_action_or_reset(dev, devm_rpi_firmware_put, fw))
+ return NULL;
+
+ return fw;
+}
+EXPORT_SYMBOL_GPL(devm_rpi_firmware_get);
+
+static struct platform_driver rpi_firmware_driver = {
+ .driver = {
+ .name = "raspberrypi-firmware",
+ .of_match_table = rpi_firmware_of_match,
+ },
+ .probe = rpi_firmware_probe,
+ .shutdown = rpi_firmware_shutdown,
+ .remove = rpi_firmware_remove,
+};
+module_platform_driver(rpi_firmware_driver);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("Raspberry Pi firmware driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c
new file mode 100644
index 0000000000..2231e6dd20
--- /dev/null
+++ b/drivers/firmware/scpi_pm_domain.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SCPI Generic power domain support.
+ *
+ * Copyright (C) 2016 ARM Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/scpi_protocol.h>
+
+struct scpi_pm_domain {
+ struct generic_pm_domain genpd;
+ struct scpi_ops *ops;
+ u32 domain;
+};
+
+/*
+ * These device power state values are not well-defined in the specification.
+ * In case, different implementations use different values, we can make these
+ * specific to compatibles rather than getting these values from device tree.
+ */
+enum scpi_power_domain_state {
+ SCPI_PD_STATE_ON = 0,
+ SCPI_PD_STATE_OFF = 3,
+};
+
+#define to_scpi_pd(gpd) container_of(gpd, struct scpi_pm_domain, genpd)
+
+static int scpi_pd_power(struct scpi_pm_domain *pd, bool power_on)
+{
+ int ret;
+ enum scpi_power_domain_state state;
+
+ if (power_on)
+ state = SCPI_PD_STATE_ON;
+ else
+ state = SCPI_PD_STATE_OFF;
+
+ ret = pd->ops->device_set_power_state(pd->domain, state);
+ if (ret)
+ return ret;
+
+ return !(state == pd->ops->device_get_power_state(pd->domain));
+}
+
+static int scpi_pd_power_on(struct generic_pm_domain *domain)
+{
+ struct scpi_pm_domain *pd = to_scpi_pd(domain);
+
+ return scpi_pd_power(pd, true);
+}
+
+static int scpi_pd_power_off(struct generic_pm_domain *domain)
+{
+ struct scpi_pm_domain *pd = to_scpi_pd(domain);
+
+ return scpi_pd_power(pd, false);
+}
+
+static int scpi_pm_domain_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct scpi_pm_domain *scpi_pd;
+ struct genpd_onecell_data *scpi_pd_data;
+ struct generic_pm_domain **domains;
+ struct scpi_ops *scpi_ops;
+ int ret, num_domains, i;
+
+ scpi_ops = get_scpi_ops();
+ if (!scpi_ops)
+ return -EPROBE_DEFER;
+
+ if (!np) {
+ dev_err(dev, "device tree node not found\n");
+ return -ENODEV;
+ }
+
+ if (!scpi_ops->device_set_power_state ||
+ !scpi_ops->device_get_power_state) {
+ dev_err(dev, "power domains not supported in the firmware\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(np, "num-domains", &num_domains);
+ if (ret) {
+ dev_err(dev, "number of domains not found\n");
+ return -EINVAL;
+ }
+
+ scpi_pd = devm_kcalloc(dev, num_domains, sizeof(*scpi_pd), GFP_KERNEL);
+ if (!scpi_pd)
+ return -ENOMEM;
+
+ scpi_pd_data = devm_kzalloc(dev, sizeof(*scpi_pd_data), GFP_KERNEL);
+ if (!scpi_pd_data)
+ return -ENOMEM;
+
+ domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL);
+ if (!domains)
+ return -ENOMEM;
+
+ for (i = 0; i < num_domains; i++, scpi_pd++) {
+ domains[i] = &scpi_pd->genpd;
+
+ scpi_pd->domain = i;
+ scpi_pd->ops = scpi_ops;
+ scpi_pd->genpd.name = devm_kasprintf(dev, GFP_KERNEL,
+ "%pOFn.%d", np, i);
+ if (!scpi_pd->genpd.name) {
+ dev_err(dev, "Failed to allocate genpd name:%pOFn.%d\n",
+ np, i);
+ continue;
+ }
+ scpi_pd->genpd.power_off = scpi_pd_power_off;
+ scpi_pd->genpd.power_on = scpi_pd_power_on;
+
+ /*
+ * Treat all power domains as off at boot.
+ *
+ * The SCP firmware itself may have switched on some domains,
+ * but for reference counting purpose, keep it this way.
+ */
+ pm_genpd_init(&scpi_pd->genpd, NULL, true);
+ }
+
+ scpi_pd_data->domains = domains;
+ scpi_pd_data->num_domains = num_domains;
+
+ of_genpd_add_provider_onecell(np, scpi_pd_data);
+
+ return 0;
+}
+
+static const struct of_device_id scpi_power_domain_ids[] = {
+ { .compatible = "arm,scpi-power-domains", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, scpi_power_domain_ids);
+
+static struct platform_driver scpi_power_domain_driver = {
+ .driver = {
+ .name = "scpi_power_domain",
+ .of_match_table = scpi_power_domain_ids,
+ },
+ .probe = scpi_pm_domain_probe,
+};
+module_platform_driver(scpi_power_domain_driver);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCPI power domain driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/smccc/Kconfig b/drivers/firmware/smccc/Kconfig
new file mode 100644
index 0000000000..15e7466179
--- /dev/null
+++ b/drivers/firmware/smccc/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config HAVE_ARM_SMCCC
+ bool
+ help
+ Include support for the Secure Monitor Call (SMC) and Hypervisor
+ Call (HVC) instructions on Armv7 and above architectures.
+
+config HAVE_ARM_SMCCC_DISCOVERY
+ bool
+ depends on ARM_PSCI_FW
+ default y
+ help
+ SMCCC v1.0 lacked discoverability and hence PSCI v1.0 was updated
+ to add SMCCC discovery mechanism though the PSCI firmware
+ implementation of PSCI_FEATURES(SMCCC_VERSION) which returns
+ success on firmware compliant to SMCCC v1.1 and above.
+
+config ARM_SMCCC_SOC_ID
+ bool "SoC bus device for the ARM SMCCC SOC_ID"
+ depends on HAVE_ARM_SMCCC_DISCOVERY
+ default y
+ select SOC_BUS
+ help
+ Include support for the SoC bus on the ARM SMCCC firmware based
+ platforms providing some sysfs information about the SoC variant.
diff --git a/drivers/firmware/smccc/Makefile b/drivers/firmware/smccc/Makefile
new file mode 100644
index 0000000000..40d19144a8
--- /dev/null
+++ b/drivers/firmware/smccc/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o kvm_guest.o
+obj-$(CONFIG_ARM_SMCCC_SOC_ID) += soc_id.o
diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c
new file mode 100644
index 0000000000..89a68e7eea
--- /dev/null
+++ b/drivers/firmware/smccc/kvm_guest.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define pr_fmt(fmt) "smccc: KVM: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/bitmap.h>
+#include <linux/cache.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/hypervisor.h>
+
+static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { };
+
+void __init kvm_init_hyp_services(void)
+{
+ struct arm_smccc_res res;
+ u32 val[4];
+
+ if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC)
+ return;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
+ if (res.a0 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 ||
+ res.a1 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 ||
+ res.a2 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 ||
+ res.a3 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3)
+ return;
+
+ memset(&res, 0, sizeof(res));
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID, &res);
+
+ val[0] = lower_32_bits(res.a0);
+ val[1] = lower_32_bits(res.a1);
+ val[2] = lower_32_bits(res.a2);
+ val[3] = lower_32_bits(res.a3);
+
+ bitmap_from_arr32(__kvm_arm_hyp_services, val, ARM_SMCCC_KVM_NUM_FUNCS);
+
+ pr_info("hypervisor services detected (0x%08lx 0x%08lx 0x%08lx 0x%08lx)\n",
+ res.a3, res.a2, res.a1, res.a0);
+}
+
+bool kvm_arm_hyp_service_available(u32 func_id)
+{
+ if (func_id >= ARM_SMCCC_KVM_NUM_FUNCS)
+ return false;
+
+ return test_bit(func_id, __kvm_arm_hyp_services);
+}
+EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available);
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
new file mode 100644
index 0000000000..db818f9dcb
--- /dev/null
+++ b/drivers/firmware/smccc/smccc.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Arm Limited
+ */
+
+#define pr_fmt(fmt) "smccc: " fmt
+
+#include <linux/cache.h>
+#include <linux/init.h>
+#include <linux/arm-smccc.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <asm/archrandom.h>
+
+static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
+static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE;
+
+bool __ro_after_init smccc_trng_available = false;
+u64 __ro_after_init smccc_has_sve_hint = false;
+s32 __ro_after_init smccc_soc_id_version = SMCCC_RET_NOT_SUPPORTED;
+s32 __ro_after_init smccc_soc_id_revision = SMCCC_RET_NOT_SUPPORTED;
+
+void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
+{
+ struct arm_smccc_res res;
+
+ smccc_version = version;
+ smccc_conduit = conduit;
+
+ smccc_trng_available = smccc_probe_trng();
+ if (IS_ENABLED(CONFIG_ARM64_SVE) &&
+ smccc_version >= ARM_SMCCC_VERSION_1_3)
+ smccc_has_sve_hint = true;
+
+ if ((smccc_version >= ARM_SMCCC_VERSION_1_2) &&
+ (smccc_conduit != SMCCC_CONDUIT_NONE)) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_SOC_ID, &res);
+ if ((s32)res.a0 >= 0) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 0, &res);
+ smccc_soc_id_version = (s32)res.a0;
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 1, &res);
+ smccc_soc_id_revision = (s32)res.a0;
+ }
+ }
+}
+
+enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
+{
+ if (smccc_version < ARM_SMCCC_VERSION_1_1)
+ return SMCCC_CONDUIT_NONE;
+
+ return smccc_conduit;
+}
+EXPORT_SYMBOL_GPL(arm_smccc_1_1_get_conduit);
+
+u32 arm_smccc_get_version(void)
+{
+ return smccc_version;
+}
+EXPORT_SYMBOL_GPL(arm_smccc_get_version);
+
+s32 arm_smccc_get_soc_id_version(void)
+{
+ return smccc_soc_id_version;
+}
+
+s32 arm_smccc_get_soc_id_revision(void)
+{
+ return smccc_soc_id_revision;
+}
+
+static int __init smccc_devices_init(void)
+{
+ struct platform_device *pdev;
+
+ if (smccc_trng_available) {
+ pdev = platform_device_register_simple("smccc_trng", -1,
+ NULL, 0);
+ if (IS_ERR(pdev))
+ pr_err("smccc_trng: could not register device: %ld\n",
+ PTR_ERR(pdev));
+ }
+
+ return 0;
+}
+device_initcall(smccc_devices_init);
diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c
new file mode 100644
index 0000000000..1990263fbb
--- /dev/null
+++ b/drivers/firmware/smccc/soc_id.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Arm Limited
+ */
+
+#define pr_fmt(fmt) "SMCCC: SOC_ID: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#define SMCCC_SOC_ID_JEP106_BANK_IDX_MASK GENMASK(30, 24)
+/*
+ * As per the SMC Calling Convention specification v1.2 (ARM DEN 0028C)
+ * Section 7.4 SMCCC_ARCH_SOC_ID bits[23:16] are JEP-106 identification
+ * code with parity bit for the SiP. We can drop the parity bit.
+ */
+#define SMCCC_SOC_ID_JEP106_ID_CODE_MASK GENMASK(22, 16)
+#define SMCCC_SOC_ID_IMP_DEF_SOC_ID_MASK GENMASK(15, 0)
+
+#define JEP106_BANK_CONT_CODE(x) \
+ (u8)(FIELD_GET(SMCCC_SOC_ID_JEP106_BANK_IDX_MASK, (x)))
+#define JEP106_ID_CODE(x) \
+ (u8)(FIELD_GET(SMCCC_SOC_ID_JEP106_ID_CODE_MASK, (x)))
+#define IMP_DEF_SOC_ID(x) \
+ (u16)(FIELD_GET(SMCCC_SOC_ID_IMP_DEF_SOC_ID_MASK, (x)))
+
+static struct soc_device *soc_dev;
+static struct soc_device_attribute *soc_dev_attr;
+
+static int __init smccc_soc_init(void)
+{
+ int soc_id_rev, soc_id_version;
+ static char soc_id_str[20], soc_id_rev_str[12];
+ static char soc_id_jep106_id_str[12];
+
+ if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2)
+ return 0;
+
+ soc_id_version = arm_smccc_get_soc_id_version();
+ if (soc_id_version == SMCCC_RET_NOT_SUPPORTED) {
+ pr_info("ARCH_SOC_ID not implemented, skipping ....\n");
+ return 0;
+ }
+
+ if (soc_id_version < 0) {
+ pr_err("Invalid SoC Version: %x\n", soc_id_version);
+ return -EINVAL;
+ }
+
+ soc_id_rev = arm_smccc_get_soc_id_revision();
+ if (soc_id_rev < 0) {
+ pr_err("Invalid SoC Revision: %x\n", soc_id_rev);
+ return -EINVAL;
+ }
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ sprintf(soc_id_rev_str, "0x%08x", soc_id_rev);
+ sprintf(soc_id_jep106_id_str, "jep106:%02x%02x",
+ JEP106_BANK_CONT_CODE(soc_id_version),
+ JEP106_ID_CODE(soc_id_version));
+ sprintf(soc_id_str, "%s:%04x", soc_id_jep106_id_str,
+ IMP_DEF_SOC_ID(soc_id_version));
+
+ soc_dev_attr->soc_id = soc_id_str;
+ soc_dev_attr->revision = soc_id_rev_str;
+ soc_dev_attr->family = soc_id_jep106_id_str;
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+
+ pr_info("ID = %s Revision = %s\n", soc_dev_attr->soc_id,
+ soc_dev_attr->revision);
+
+ return 0;
+}
+module_init(smccc_soc_init);
+
+static void __exit smccc_soc_exit(void)
+{
+ if (soc_dev)
+ soc_device_unregister(soc_dev);
+ kfree(soc_dev_attr);
+}
+module_exit(smccc_soc_exit);
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
new file mode 100644
index 0000000000..4f7a7abada
--- /dev/null
+++ b/drivers/firmware/stratix10-rsu.c
@@ -0,0 +1,817 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2019, Intel Corporation
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/intel/stratix10-svc-client.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+
+#define RSU_STATE_MASK GENMASK_ULL(31, 0)
+#define RSU_VERSION_MASK GENMASK_ULL(63, 32)
+#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0)
+#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32)
+#define RSU_DCMF0_MASK GENMASK_ULL(31, 0)
+#define RSU_DCMF1_MASK GENMASK_ULL(63, 32)
+#define RSU_DCMF2_MASK GENMASK_ULL(31, 0)
+#define RSU_DCMF3_MASK GENMASK_ULL(63, 32)
+#define RSU_DCMF0_STATUS_MASK GENMASK_ULL(15, 0)
+#define RSU_DCMF1_STATUS_MASK GENMASK_ULL(31, 16)
+#define RSU_DCMF2_STATUS_MASK GENMASK_ULL(47, 32)
+#define RSU_DCMF3_STATUS_MASK GENMASK_ULL(63, 48)
+
+#define RSU_TIMEOUT (msecs_to_jiffies(SVC_RSU_REQUEST_TIMEOUT_MS))
+
+#define INVALID_RETRY_COUNTER 0xFF
+#define INVALID_DCMF_VERSION 0xFF
+#define INVALID_DCMF_STATUS 0xFFFFFFFF
+#define INVALID_SPT_ADDRESS 0x0
+
+#define RSU_GET_SPT_CMD 0x5A
+#define RSU_GET_SPT_RESP_LEN (4 * sizeof(unsigned int))
+
+typedef void (*rsu_callback)(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data);
+/**
+ * struct stratix10_rsu_priv - rsu data structure
+ * @chan: pointer to the allocated service channel
+ * @client: active service client
+ * @completion: state for callback completion
+ * @lock: a mutex to protect callback completion state
+ * @status.current_image: address of image currently running in flash
+ * @status.fail_image: address of failed image in flash
+ * @status.version: the interface version number of RSU firmware
+ * @status.state: the state of RSU system
+ * @status.error_details: error code
+ * @status.error_location: the error offset inside the image that failed
+ * @dcmf_version.dcmf0: Quartus dcmf0 version
+ * @dcmf_version.dcmf1: Quartus dcmf1 version
+ * @dcmf_version.dcmf2: Quartus dcmf2 version
+ * @dcmf_version.dcmf3: Quartus dcmf3 version
+ * @dcmf_status.dcmf0: dcmf0 status
+ * @dcmf_status.dcmf1: dcmf1 status
+ * @dcmf_status.dcmf2: dcmf2 status
+ * @dcmf_status.dcmf3: dcmf3 status
+ * @retry_counter: the current image's retry counter
+ * @max_retry: the preset max retry value
+ * @spt0_address: address of spt0
+ * @spt1_address: address of spt1
+ * @get_spt_response_buf: response from sdm for get_spt command
+ */
+struct stratix10_rsu_priv {
+ struct stratix10_svc_chan *chan;
+ struct stratix10_svc_client client;
+ struct completion completion;
+ struct mutex lock;
+ struct {
+ unsigned long current_image;
+ unsigned long fail_image;
+ unsigned int version;
+ unsigned int state;
+ unsigned int error_details;
+ unsigned int error_location;
+ } status;
+
+ struct {
+ unsigned int dcmf0;
+ unsigned int dcmf1;
+ unsigned int dcmf2;
+ unsigned int dcmf3;
+ } dcmf_version;
+
+ struct {
+ unsigned int dcmf0;
+ unsigned int dcmf1;
+ unsigned int dcmf2;
+ unsigned int dcmf3;
+ } dcmf_status;
+
+ unsigned int retry_counter;
+ unsigned int max_retry;
+
+ unsigned long spt0_address;
+ unsigned long spt1_address;
+
+ unsigned int *get_spt_response_buf;
+};
+
+/**
+ * rsu_status_callback() - Status callback from Intel Service Layer
+ * @client: pointer to service client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for RSU status request. Status is
+ * only updated after a system reboot, so a get updated status call is
+ * made during driver probe.
+ */
+static void rsu_status_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ struct arm_smccc_res *res = (struct arm_smccc_res *)data->kaddr1;
+
+ if (data->status == BIT(SVC_STATUS_OK)) {
+ priv->status.version = FIELD_GET(RSU_VERSION_MASK,
+ res->a2);
+ priv->status.state = FIELD_GET(RSU_STATE_MASK, res->a2);
+ priv->status.fail_image = res->a1;
+ priv->status.current_image = res->a0;
+ priv->status.error_location =
+ FIELD_GET(RSU_ERROR_LOCATION_MASK, res->a3);
+ priv->status.error_details =
+ FIELD_GET(RSU_ERROR_DETAIL_MASK, res->a3);
+ } else {
+ dev_err(client->dev, "COMMAND_RSU_STATUS returned 0x%lX\n",
+ res->a0);
+ priv->status.version = 0;
+ priv->status.state = 0;
+ priv->status.fail_image = 0;
+ priv->status.current_image = 0;
+ priv->status.error_location = 0;
+ priv->status.error_details = 0;
+ }
+
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_command_callback() - Update callback from Intel Service Layer
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for RSU commands.
+ */
+static void rsu_command_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+
+ if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
+ dev_warn(client->dev, "Secure FW doesn't support notify\n");
+ else if (data->status == BIT(SVC_STATUS_ERROR))
+ dev_err(client->dev, "Failure, returned status is %lu\n",
+ BIT(data->status));
+
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_retry_callback() - Callback from Intel service layer for getting
+ * the current image's retry counter from the firmware
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for retry counter, which is used by
+ * user to know how many times the images is still allowed to reload
+ * itself before giving up and starting RSU fail-over flow.
+ */
+static void rsu_retry_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned int *counter = (unsigned int *)data->kaddr1;
+
+ if (data->status == BIT(SVC_STATUS_OK))
+ priv->retry_counter = *counter;
+ else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
+ dev_warn(client->dev, "Secure FW doesn't support retry\n");
+ else
+ dev_err(client->dev, "Failed to get retry counter %lu\n",
+ BIT(data->status));
+
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_max_retry_callback() - Callback from Intel service layer for getting
+ * the max retry value from the firmware
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for max retry.
+ */
+static void rsu_max_retry_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned int *max_retry = (unsigned int *)data->kaddr1;
+
+ if (data->status == BIT(SVC_STATUS_OK))
+ priv->max_retry = *max_retry;
+ else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
+ dev_warn(client->dev, "Secure FW doesn't support max retry\n");
+ else
+ dev_err(client->dev, "Failed to get max retry %lu\n",
+ BIT(data->status));
+
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_dcmf_version_callback() - Callback from Intel service layer for getting
+ * the DCMF version
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for DCMF version number
+ */
+static void rsu_dcmf_version_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned long long *value1 = (unsigned long long *)data->kaddr1;
+ unsigned long long *value2 = (unsigned long long *)data->kaddr2;
+
+ if (data->status == BIT(SVC_STATUS_OK)) {
+ priv->dcmf_version.dcmf0 = FIELD_GET(RSU_DCMF0_MASK, *value1);
+ priv->dcmf_version.dcmf1 = FIELD_GET(RSU_DCMF1_MASK, *value1);
+ priv->dcmf_version.dcmf2 = FIELD_GET(RSU_DCMF2_MASK, *value2);
+ priv->dcmf_version.dcmf3 = FIELD_GET(RSU_DCMF3_MASK, *value2);
+ } else
+ dev_err(client->dev, "failed to get DCMF version\n");
+
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_dcmf_status_callback() - Callback from Intel service layer for getting
+ * the DCMF status
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for DCMF status
+ */
+static void rsu_dcmf_status_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned long long *value = (unsigned long long *)data->kaddr1;
+
+ if (data->status == BIT(SVC_STATUS_OK)) {
+ priv->dcmf_status.dcmf0 = FIELD_GET(RSU_DCMF0_STATUS_MASK,
+ *value);
+ priv->dcmf_status.dcmf1 = FIELD_GET(RSU_DCMF1_STATUS_MASK,
+ *value);
+ priv->dcmf_status.dcmf2 = FIELD_GET(RSU_DCMF2_STATUS_MASK,
+ *value);
+ priv->dcmf_status.dcmf3 = FIELD_GET(RSU_DCMF3_STATUS_MASK,
+ *value);
+ } else
+ dev_err(client->dev, "failed to get DCMF status\n");
+
+ complete(&priv->completion);
+}
+
+static void rsu_get_spt_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned long *mbox_err = (unsigned long *)data->kaddr1;
+ unsigned long *resp_len = (unsigned long *)data->kaddr2;
+
+ if (data->status != BIT(SVC_STATUS_OK) || (*mbox_err) ||
+ (*resp_len != RSU_GET_SPT_RESP_LEN))
+ goto error;
+
+ priv->spt0_address = priv->get_spt_response_buf[0];
+ priv->spt0_address <<= 32;
+ priv->spt0_address |= priv->get_spt_response_buf[1];
+
+ priv->spt1_address = priv->get_spt_response_buf[2];
+ priv->spt1_address <<= 32;
+ priv->spt1_address |= priv->get_spt_response_buf[3];
+
+ goto complete;
+
+error:
+ dev_err(client->dev, "failed to get SPTs\n");
+
+complete:
+ stratix10_svc_free_memory(priv->chan, priv->get_spt_response_buf);
+ priv->get_spt_response_buf = NULL;
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_send_msg() - send a message to Intel service layer
+ * @priv: pointer to rsu private data
+ * @command: RSU status or update command
+ * @arg: the request argument, the bitstream address or notify status
+ * @callback: function pointer for the callback (status or update)
+ *
+ * Start an Intel service layer transaction to perform the SMC call that
+ * is necessary to get RSU boot log or set the address of bitstream to
+ * boot after reboot.
+ *
+ * Returns 0 on success or -ETIMEDOUT on error.
+ */
+static int rsu_send_msg(struct stratix10_rsu_priv *priv,
+ enum stratix10_svc_command_code command,
+ unsigned long arg,
+ rsu_callback callback)
+{
+ struct stratix10_svc_client_msg msg;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ reinit_completion(&priv->completion);
+ priv->client.receive_cb = callback;
+
+ msg.command = command;
+ if (arg)
+ msg.arg[0] = arg;
+
+ if (command == COMMAND_MBOX_SEND_CMD) {
+ msg.arg[1] = 0;
+ msg.payload = NULL;
+ msg.payload_length = 0;
+ msg.payload_output = priv->get_spt_response_buf;
+ msg.payload_length_output = RSU_GET_SPT_RESP_LEN;
+ }
+
+ ret = stratix10_svc_send(priv->chan, &msg);
+ if (ret < 0)
+ goto status_done;
+
+ ret = wait_for_completion_interruptible_timeout(&priv->completion,
+ RSU_TIMEOUT);
+ if (!ret) {
+ dev_err(priv->client.dev,
+ "timeout waiting for SMC call\n");
+ ret = -ETIMEDOUT;
+ goto status_done;
+ } else if (ret < 0) {
+ dev_err(priv->client.dev,
+ "error %d waiting for SMC call\n", ret);
+ goto status_done;
+ } else {
+ ret = 0;
+ }
+
+status_done:
+ stratix10_svc_done(priv->chan);
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+/*
+ * This driver exposes some optional features of the Intel Stratix 10 SoC FPGA.
+ * The sysfs interfaces exposed here are FPGA Remote System Update (RSU)
+ * related. They allow user space software to query the configuration system
+ * status and to request optional reboot behavior specific to Intel FPGAs.
+ */
+
+static ssize_t current_image_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08lx\n", priv->status.current_image);
+}
+
+static ssize_t fail_image_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08lx\n", priv->status.fail_image);
+}
+
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->status.version);
+}
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->status.state);
+}
+
+static ssize_t error_location_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->status.error_location);
+}
+
+static ssize_t error_details_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->status.error_details);
+}
+
+static ssize_t retry_counter_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->retry_counter);
+}
+
+static ssize_t max_retry_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return scnprintf(buf, sizeof(priv->max_retry),
+ "0x%08x\n", priv->max_retry);
+}
+
+static ssize_t dcmf0_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf0);
+}
+
+static ssize_t dcmf1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf1);
+}
+
+static ssize_t dcmf2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf2);
+}
+
+static ssize_t dcmf3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf3);
+}
+
+static ssize_t dcmf0_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->dcmf_status.dcmf0 == INVALID_DCMF_STATUS)
+ return -EIO;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf0);
+}
+
+static ssize_t dcmf1_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->dcmf_status.dcmf1 == INVALID_DCMF_STATUS)
+ return -EIO;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf1);
+}
+
+static ssize_t dcmf2_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->dcmf_status.dcmf2 == INVALID_DCMF_STATUS)
+ return -EIO;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf2);
+}
+
+static ssize_t dcmf3_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->dcmf_status.dcmf3 == INVALID_DCMF_STATUS)
+ return -EIO;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf3);
+}
+static ssize_t reboot_image_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+ unsigned long address;
+ int ret;
+
+ if (!priv)
+ return -ENODEV;
+
+ ret = kstrtoul(buf, 0, &address);
+ if (ret)
+ return ret;
+
+ ret = rsu_send_msg(priv, COMMAND_RSU_UPDATE,
+ address, rsu_command_callback);
+ if (ret) {
+ dev_err(dev, "Error, RSU update returned %i\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+
+static ssize_t notify_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+ unsigned long status;
+ int ret;
+
+ if (!priv)
+ return -ENODEV;
+
+ ret = kstrtoul(buf, 0, &status);
+ if (ret)
+ return ret;
+
+ ret = rsu_send_msg(priv, COMMAND_RSU_NOTIFY,
+ status, rsu_command_callback);
+ if (ret) {
+ dev_err(dev, "Error, RSU notify returned %i\n", ret);
+ return ret;
+ }
+
+ /* to get the updated state */
+ ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
+ 0, rsu_status_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU status %i\n", ret);
+ return ret;
+ }
+
+ ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU retry %i\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+
+static ssize_t spt0_address_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->spt0_address == INVALID_SPT_ADDRESS)
+ return -EIO;
+
+ return scnprintf(buf, PAGE_SIZE, "0x%08lx\n", priv->spt0_address);
+}
+
+static ssize_t spt1_address_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->spt1_address == INVALID_SPT_ADDRESS)
+ return -EIO;
+
+ return scnprintf(buf, PAGE_SIZE, "0x%08lx\n", priv->spt1_address);
+}
+
+static DEVICE_ATTR_RO(current_image);
+static DEVICE_ATTR_RO(fail_image);
+static DEVICE_ATTR_RO(state);
+static DEVICE_ATTR_RO(version);
+static DEVICE_ATTR_RO(error_location);
+static DEVICE_ATTR_RO(error_details);
+static DEVICE_ATTR_RO(retry_counter);
+static DEVICE_ATTR_RO(max_retry);
+static DEVICE_ATTR_RO(dcmf0);
+static DEVICE_ATTR_RO(dcmf1);
+static DEVICE_ATTR_RO(dcmf2);
+static DEVICE_ATTR_RO(dcmf3);
+static DEVICE_ATTR_RO(dcmf0_status);
+static DEVICE_ATTR_RO(dcmf1_status);
+static DEVICE_ATTR_RO(dcmf2_status);
+static DEVICE_ATTR_RO(dcmf3_status);
+static DEVICE_ATTR_WO(reboot_image);
+static DEVICE_ATTR_WO(notify);
+static DEVICE_ATTR_RO(spt0_address);
+static DEVICE_ATTR_RO(spt1_address);
+
+static struct attribute *rsu_attrs[] = {
+ &dev_attr_current_image.attr,
+ &dev_attr_fail_image.attr,
+ &dev_attr_state.attr,
+ &dev_attr_version.attr,
+ &dev_attr_error_location.attr,
+ &dev_attr_error_details.attr,
+ &dev_attr_retry_counter.attr,
+ &dev_attr_max_retry.attr,
+ &dev_attr_dcmf0.attr,
+ &dev_attr_dcmf1.attr,
+ &dev_attr_dcmf2.attr,
+ &dev_attr_dcmf3.attr,
+ &dev_attr_dcmf0_status.attr,
+ &dev_attr_dcmf1_status.attr,
+ &dev_attr_dcmf2_status.attr,
+ &dev_attr_dcmf3_status.attr,
+ &dev_attr_reboot_image.attr,
+ &dev_attr_notify.attr,
+ &dev_attr_spt0_address.attr,
+ &dev_attr_spt1_address.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(rsu);
+
+static int stratix10_rsu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stratix10_rsu_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client.dev = dev;
+ priv->client.receive_cb = NULL;
+ priv->client.priv = priv;
+ priv->status.current_image = 0;
+ priv->status.fail_image = 0;
+ priv->status.error_location = 0;
+ priv->status.error_details = 0;
+ priv->status.version = 0;
+ priv->status.state = 0;
+ priv->retry_counter = INVALID_RETRY_COUNTER;
+ priv->dcmf_version.dcmf0 = INVALID_DCMF_VERSION;
+ priv->dcmf_version.dcmf1 = INVALID_DCMF_VERSION;
+ priv->dcmf_version.dcmf2 = INVALID_DCMF_VERSION;
+ priv->dcmf_version.dcmf3 = INVALID_DCMF_VERSION;
+ priv->dcmf_status.dcmf0 = INVALID_DCMF_STATUS;
+ priv->dcmf_status.dcmf1 = INVALID_DCMF_STATUS;
+ priv->dcmf_status.dcmf2 = INVALID_DCMF_STATUS;
+ priv->dcmf_status.dcmf3 = INVALID_DCMF_STATUS;
+ priv->max_retry = INVALID_RETRY_COUNTER;
+ priv->spt0_address = INVALID_SPT_ADDRESS;
+ priv->spt1_address = INVALID_SPT_ADDRESS;
+
+ mutex_init(&priv->lock);
+ priv->chan = stratix10_svc_request_channel_byname(&priv->client,
+ SVC_CLIENT_RSU);
+ if (IS_ERR(priv->chan)) {
+ dev_err(dev, "couldn't get service channel %s\n",
+ SVC_CLIENT_RSU);
+ return PTR_ERR(priv->chan);
+ }
+
+ init_completion(&priv->completion);
+ platform_set_drvdata(pdev, priv);
+
+ /* get the initial state from firmware */
+ ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
+ 0, rsu_status_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU status %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
+ /* get DCMF version from firmware */
+ ret = rsu_send_msg(priv, COMMAND_RSU_DCMF_VERSION,
+ 0, rsu_dcmf_version_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting DCMF version %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
+ ret = rsu_send_msg(priv, COMMAND_RSU_DCMF_STATUS,
+ 0, rsu_dcmf_status_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting DCMF status %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
+ ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU retry %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
+ ret = rsu_send_msg(priv, COMMAND_RSU_MAX_RETRY, 0,
+ rsu_max_retry_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU max retry %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
+ priv->get_spt_response_buf =
+ stratix10_svc_allocate_memory(priv->chan, RSU_GET_SPT_RESP_LEN);
+
+ if (IS_ERR(priv->get_spt_response_buf)) {
+ dev_err(dev, "failed to allocate get spt buffer\n");
+ } else {
+ ret = rsu_send_msg(priv, COMMAND_MBOX_SEND_CMD,
+ RSU_GET_SPT_CMD, rsu_get_spt_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting SPT table %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+ }
+
+ return ret;
+}
+
+static int stratix10_rsu_remove(struct platform_device *pdev)
+{
+ struct stratix10_rsu_priv *priv = platform_get_drvdata(pdev);
+
+ stratix10_svc_free_channel(priv->chan);
+ return 0;
+}
+
+static struct platform_driver stratix10_rsu_driver = {
+ .probe = stratix10_rsu_probe,
+ .remove = stratix10_rsu_remove,
+ .driver = {
+ .name = "stratix10-rsu",
+ .dev_groups = rsu_groups,
+ },
+};
+
+module_platform_driver(stratix10_rsu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Remote System Update Driver");
+MODULE_AUTHOR("Richard Gong <richard.gong@intel.com>");
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
new file mode 100644
index 0000000000..c693da60e9
--- /dev/null
+++ b/drivers/firmware/stratix10-svc.c
@@ -0,0 +1,1316 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017-2018, Intel Corporation
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/firmware/intel/stratix10-smc.h>
+#include <linux/firmware/intel/stratix10-svc-client.h>
+#include <linux/types.h>
+
+/**
+ * SVC_NUM_DATA_IN_FIFO - number of struct stratix10_svc_data in the FIFO
+ *
+ * SVC_NUM_CHANNEL - number of channel supported by service layer driver
+ *
+ * FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS - claim back the submitted buffer(s)
+ * from the secure world for FPGA manager to reuse, or to free the buffer(s)
+ * when all bit-stream data had be send.
+ *
+ * FPGA_CONFIG_STATUS_TIMEOUT_SEC - poll the FPGA configuration status,
+ * service layer will return error to FPGA manager when timeout occurs,
+ * timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC.
+ */
+#define SVC_NUM_DATA_IN_FIFO 32
+#define SVC_NUM_CHANNEL 3
+#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200
+#define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30
+#define BYTE_TO_WORD_SIZE 4
+
+/* stratix10 service layer clients */
+#define STRATIX10_RSU "stratix10-rsu"
+#define INTEL_FCS "intel-fcs"
+
+typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long,
+ struct arm_smccc_res *);
+struct stratix10_svc_chan;
+
+/**
+ * struct stratix10_svc - svc private data
+ * @stratix10_svc_rsu: pointer to stratix10 RSU device
+ */
+struct stratix10_svc {
+ struct platform_device *stratix10_svc_rsu;
+ struct platform_device *intel_svc_fcs;
+};
+
+/**
+ * struct stratix10_svc_sh_memory - service shared memory structure
+ * @sync_complete: state for a completion
+ * @addr: physical address of shared memory block
+ * @size: size of shared memory block
+ * @invoke_fn: function to issue secure monitor or hypervisor call
+ *
+ * This struct is used to save physical address and size of shared memory
+ * block. The shared memory blocked is allocated by secure monitor software
+ * at secure world.
+ *
+ * Service layer driver uses the physical address and size to create a memory
+ * pool, then allocates data buffer from that memory pool for service client.
+ */
+struct stratix10_svc_sh_memory {
+ struct completion sync_complete;
+ unsigned long addr;
+ unsigned long size;
+ svc_invoke_fn *invoke_fn;
+};
+
+/**
+ * struct stratix10_svc_data_mem - service memory structure
+ * @vaddr: virtual address
+ * @paddr: physical address
+ * @size: size of memory
+ * @node: link list head node
+ *
+ * This struct is used in a list that keeps track of buffers which have
+ * been allocated or freed from the memory pool. Service layer driver also
+ * uses this struct to transfer physical address to virtual address.
+ */
+struct stratix10_svc_data_mem {
+ void *vaddr;
+ phys_addr_t paddr;
+ size_t size;
+ struct list_head node;
+};
+
+/**
+ * struct stratix10_svc_data - service data structure
+ * @chan: service channel
+ * @paddr: physical address of to be processed payload
+ * @size: to be processed playload size
+ * @paddr_output: physical address of processed payload
+ * @size_output: processed payload size
+ * @command: service command requested by client
+ * @flag: configuration type (full or partial)
+ * @arg: args to be passed via registers and not physically mapped buffers
+ *
+ * This struct is used in service FIFO for inter-process communication.
+ */
+struct stratix10_svc_data {
+ struct stratix10_svc_chan *chan;
+ phys_addr_t paddr;
+ size_t size;
+ phys_addr_t paddr_output;
+ size_t size_output;
+ u32 command;
+ u32 flag;
+ u64 arg[3];
+};
+
+/**
+ * struct stratix10_svc_controller - service controller
+ * @dev: device
+ * @chans: array of service channels
+ * @num_chans: number of channels in 'chans' array
+ * @num_active_client: number of active service client
+ * @node: list management
+ * @genpool: memory pool pointing to the memory region
+ * @task: pointer to the thread task which handles SMC or HVC call
+ * @svc_fifo: a queue for storing service message data
+ * @complete_status: state for completion
+ * @svc_fifo_lock: protect access to service message data queue
+ * @invoke_fn: function to issue secure monitor call or hypervisor call
+ *
+ * This struct is used to create communication channels for service clients, to
+ * handle secure monitor or hypervisor call.
+ */
+struct stratix10_svc_controller {
+ struct device *dev;
+ struct stratix10_svc_chan *chans;
+ int num_chans;
+ int num_active_client;
+ struct list_head node;
+ struct gen_pool *genpool;
+ struct task_struct *task;
+ struct kfifo svc_fifo;
+ struct completion complete_status;
+ spinlock_t svc_fifo_lock;
+ svc_invoke_fn *invoke_fn;
+};
+
+/**
+ * struct stratix10_svc_chan - service communication channel
+ * @ctrl: pointer to service controller which is the provider of this channel
+ * @scl: pointer to service client which owns the channel
+ * @name: service client name associated with the channel
+ * @lock: protect access to the channel
+ *
+ * This struct is used by service client to communicate with service layer, each
+ * service client has its own channel created by service controller.
+ */
+struct stratix10_svc_chan {
+ struct stratix10_svc_controller *ctrl;
+ struct stratix10_svc_client *scl;
+ char *name;
+ spinlock_t lock;
+};
+
+static LIST_HEAD(svc_ctrl);
+static LIST_HEAD(svc_data_mem);
+
+/**
+ * svc_pa_to_va() - translate physical address to virtual address
+ * @addr: to be translated physical address
+ *
+ * Return: valid virtual address or NULL if the provided physical
+ * address doesn't exist.
+ */
+static void *svc_pa_to_va(unsigned long addr)
+{
+ struct stratix10_svc_data_mem *pmem;
+
+ pr_debug("claim back P-addr=0x%016x\n", (unsigned int)addr);
+ list_for_each_entry(pmem, &svc_data_mem, node)
+ if (pmem->paddr == addr)
+ return pmem->vaddr;
+
+ /* physical address is not found */
+ return NULL;
+}
+
+/**
+ * svc_thread_cmd_data_claim() - claim back buffer from the secure world
+ * @ctrl: pointer to service layer controller
+ * @p_data: pointer to service data structure
+ * @cb_data: pointer to callback data structure to service client
+ *
+ * Claim back the submitted buffers from the secure world and pass buffer
+ * back to service client (FPGA manager, etc) for reuse.
+ */
+static void svc_thread_cmd_data_claim(struct stratix10_svc_controller *ctrl,
+ struct stratix10_svc_data *p_data,
+ struct stratix10_svc_cb_data *cb_data)
+{
+ struct arm_smccc_res res;
+ unsigned long timeout;
+
+ reinit_completion(&ctrl->complete_status);
+ timeout = msecs_to_jiffies(FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS);
+
+ pr_debug("%s: claim back the submitted buffer\n", __func__);
+ do {
+ ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE,
+ 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 == INTEL_SIP_SMC_STATUS_OK) {
+ if (!res.a1) {
+ complete(&ctrl->complete_status);
+ break;
+ }
+ cb_data->status = BIT(SVC_STATUS_BUFFER_DONE);
+ cb_data->kaddr1 = svc_pa_to_va(res.a1);
+ cb_data->kaddr2 = (res.a2) ?
+ svc_pa_to_va(res.a2) : NULL;
+ cb_data->kaddr3 = (res.a3) ?
+ svc_pa_to_va(res.a3) : NULL;
+ p_data->chan->scl->receive_cb(p_data->chan->scl,
+ cb_data);
+ } else {
+ pr_debug("%s: secure world busy, polling again\n",
+ __func__);
+ }
+ } while (res.a0 == INTEL_SIP_SMC_STATUS_OK ||
+ res.a0 == INTEL_SIP_SMC_STATUS_BUSY ||
+ wait_for_completion_timeout(&ctrl->complete_status, timeout));
+}
+
+/**
+ * svc_thread_cmd_config_status() - check configuration status
+ * @ctrl: pointer to service layer controller
+ * @p_data: pointer to service data structure
+ * @cb_data: pointer to callback data structure to service client
+ *
+ * Check whether the secure firmware at secure world has finished the FPGA
+ * configuration, and then inform FPGA manager the configuration status.
+ */
+static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
+ struct stratix10_svc_data *p_data,
+ struct stratix10_svc_cb_data *cb_data)
+{
+ struct arm_smccc_res res;
+ int count_in_sec;
+ unsigned long a0, a1, a2;
+
+ cb_data->kaddr1 = NULL;
+ cb_data->kaddr2 = NULL;
+ cb_data->kaddr3 = NULL;
+ cb_data->status = BIT(SVC_STATUS_ERROR);
+
+ pr_debug("%s: polling config status\n", __func__);
+
+ a0 = INTEL_SIP_SMC_FPGA_CONFIG_ISDONE;
+ a1 = (unsigned long)p_data->paddr;
+ a2 = (unsigned long)p_data->size;
+
+ if (p_data->command == COMMAND_POLL_SERVICE_STATUS)
+ a0 = INTEL_SIP_SMC_SERVICE_COMPLETED;
+
+ count_in_sec = FPGA_CONFIG_STATUS_TIMEOUT_SEC;
+ while (count_in_sec) {
+ ctrl->invoke_fn(a0, a1, a2, 0, 0, 0, 0, 0, &res);
+ if ((res.a0 == INTEL_SIP_SMC_STATUS_OK) ||
+ (res.a0 == INTEL_SIP_SMC_STATUS_ERROR) ||
+ (res.a0 == INTEL_SIP_SMC_STATUS_REJECTED))
+ break;
+
+ /*
+ * request is still in progress, wait one second then
+ * poll again
+ */
+ msleep(1000);
+ count_in_sec--;
+ }
+
+ if (!count_in_sec) {
+ pr_err("%s: poll status timeout\n", __func__);
+ cb_data->status = BIT(SVC_STATUS_BUSY);
+ } else if (res.a0 == INTEL_SIP_SMC_STATUS_OK) {
+ cb_data->status = BIT(SVC_STATUS_COMPLETED);
+ cb_data->kaddr2 = (res.a2) ?
+ svc_pa_to_va(res.a2) : NULL;
+ cb_data->kaddr3 = (res.a3) ? &res.a3 : NULL;
+ } else {
+ pr_err("%s: poll status error\n", __func__);
+ cb_data->kaddr1 = &res.a1;
+ cb_data->kaddr2 = (res.a2) ?
+ svc_pa_to_va(res.a2) : NULL;
+ cb_data->kaddr3 = (res.a3) ? &res.a3 : NULL;
+ cb_data->status = BIT(SVC_STATUS_ERROR);
+ }
+
+ p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data);
+}
+
+/**
+ * svc_thread_recv_status_ok() - handle the successful status
+ * @p_data: pointer to service data structure
+ * @cb_data: pointer to callback data structure to service client
+ * @res: result from SMC or HVC call
+ *
+ * Send back the correspond status to the service clients.
+ */
+static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
+ struct stratix10_svc_cb_data *cb_data,
+ struct arm_smccc_res res)
+{
+ cb_data->kaddr1 = NULL;
+ cb_data->kaddr2 = NULL;
+ cb_data->kaddr3 = NULL;
+
+ switch (p_data->command) {
+ case COMMAND_RECONFIG:
+ case COMMAND_RSU_UPDATE:
+ case COMMAND_RSU_NOTIFY:
+ case COMMAND_FCS_REQUEST_SERVICE:
+ case COMMAND_FCS_SEND_CERTIFICATE:
+ case COMMAND_FCS_DATA_ENCRYPTION:
+ case COMMAND_FCS_DATA_DECRYPTION:
+ cb_data->status = BIT(SVC_STATUS_OK);
+ break;
+ case COMMAND_RECONFIG_DATA_SUBMIT:
+ cb_data->status = BIT(SVC_STATUS_BUFFER_SUBMITTED);
+ break;
+ case COMMAND_RECONFIG_STATUS:
+ cb_data->status = BIT(SVC_STATUS_COMPLETED);
+ break;
+ case COMMAND_RSU_RETRY:
+ case COMMAND_RSU_MAX_RETRY:
+ case COMMAND_RSU_DCMF_STATUS:
+ case COMMAND_FIRMWARE_VERSION:
+ cb_data->status = BIT(SVC_STATUS_OK);
+ cb_data->kaddr1 = &res.a1;
+ break;
+ case COMMAND_SMC_SVC_VERSION:
+ cb_data->status = BIT(SVC_STATUS_OK);
+ cb_data->kaddr1 = &res.a1;
+ cb_data->kaddr2 = &res.a2;
+ break;
+ case COMMAND_RSU_DCMF_VERSION:
+ cb_data->status = BIT(SVC_STATUS_OK);
+ cb_data->kaddr1 = &res.a1;
+ cb_data->kaddr2 = &res.a2;
+ break;
+ case COMMAND_FCS_RANDOM_NUMBER_GEN:
+ case COMMAND_FCS_GET_PROVISION_DATA:
+ case COMMAND_POLL_SERVICE_STATUS:
+ cb_data->status = BIT(SVC_STATUS_OK);
+ cb_data->kaddr1 = &res.a1;
+ cb_data->kaddr2 = svc_pa_to_va(res.a2);
+ cb_data->kaddr3 = &res.a3;
+ break;
+ case COMMAND_MBOX_SEND_CMD:
+ cb_data->status = BIT(SVC_STATUS_OK);
+ cb_data->kaddr1 = &res.a1;
+ /* SDM return size in u8. Convert size to u32 word */
+ res.a2 = res.a2 * BYTE_TO_WORD_SIZE;
+ cb_data->kaddr2 = &res.a2;
+ break;
+ default:
+ pr_warn("it shouldn't happen\n");
+ break;
+ }
+
+ pr_debug("%s: call receive_cb\n", __func__);
+ p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data);
+}
+
+/**
+ * svc_normal_to_secure_thread() - the function to run in the kthread
+ * @data: data pointer for kthread function
+ *
+ * Service layer driver creates stratix10_svc_smc_hvc_call kthread on CPU
+ * node 0, its function stratix10_svc_secure_call_thread is used to handle
+ * SMC or HVC calls between kernel driver and secure monitor software.
+ *
+ * Return: 0 for success or -ENOMEM on error.
+ */
+static int svc_normal_to_secure_thread(void *data)
+{
+ struct stratix10_svc_controller
+ *ctrl = (struct stratix10_svc_controller *)data;
+ struct stratix10_svc_data *pdata;
+ struct stratix10_svc_cb_data *cbdata;
+ struct arm_smccc_res res;
+ unsigned long a0, a1, a2, a3, a4, a5, a6, a7;
+ int ret_fifo = 0;
+
+ pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ cbdata = kmalloc(sizeof(*cbdata), GFP_KERNEL);
+ if (!cbdata) {
+ kfree(pdata);
+ return -ENOMEM;
+ }
+
+ /* default set, to remove build warning */
+ a0 = INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK;
+ a1 = 0;
+ a2 = 0;
+ a3 = 0;
+ a4 = 0;
+ a5 = 0;
+ a6 = 0;
+ a7 = 0;
+
+ pr_debug("smc_hvc_shm_thread is running\n");
+
+ while (!kthread_should_stop()) {
+ ret_fifo = kfifo_out_spinlocked(&ctrl->svc_fifo,
+ pdata, sizeof(*pdata),
+ &ctrl->svc_fifo_lock);
+
+ if (!ret_fifo)
+ continue;
+
+ pr_debug("get from FIFO pa=0x%016x, command=%u, size=%u\n",
+ (unsigned int)pdata->paddr, pdata->command,
+ (unsigned int)pdata->size);
+
+ switch (pdata->command) {
+ case COMMAND_RECONFIG_DATA_CLAIM:
+ svc_thread_cmd_data_claim(ctrl, pdata, cbdata);
+ continue;
+ case COMMAND_RECONFIG:
+ a0 = INTEL_SIP_SMC_FPGA_CONFIG_START;
+ pr_debug("conf_type=%u\n", (unsigned int)pdata->flag);
+ a1 = pdata->flag;
+ a2 = 0;
+ break;
+ case COMMAND_RECONFIG_DATA_SUBMIT:
+ a0 = INTEL_SIP_SMC_FPGA_CONFIG_WRITE;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = (unsigned long)pdata->size;
+ break;
+ case COMMAND_RECONFIG_STATUS:
+ a0 = INTEL_SIP_SMC_FPGA_CONFIG_ISDONE;
+ a1 = 0;
+ a2 = 0;
+ break;
+ case COMMAND_RSU_STATUS:
+ a0 = INTEL_SIP_SMC_RSU_STATUS;
+ a1 = 0;
+ a2 = 0;
+ break;
+ case COMMAND_RSU_UPDATE:
+ a0 = INTEL_SIP_SMC_RSU_UPDATE;
+ a1 = pdata->arg[0];
+ a2 = 0;
+ break;
+ case COMMAND_RSU_NOTIFY:
+ a0 = INTEL_SIP_SMC_RSU_NOTIFY;
+ a1 = pdata->arg[0];
+ a2 = 0;
+ break;
+ case COMMAND_RSU_RETRY:
+ a0 = INTEL_SIP_SMC_RSU_RETRY_COUNTER;
+ a1 = 0;
+ a2 = 0;
+ break;
+ case COMMAND_RSU_MAX_RETRY:
+ a0 = INTEL_SIP_SMC_RSU_MAX_RETRY;
+ a1 = 0;
+ a2 = 0;
+ break;
+ case COMMAND_RSU_DCMF_VERSION:
+ a0 = INTEL_SIP_SMC_RSU_DCMF_VERSION;
+ a1 = 0;
+ a2 = 0;
+ break;
+ case COMMAND_FIRMWARE_VERSION:
+ a0 = INTEL_SIP_SMC_FIRMWARE_VERSION;
+ a1 = 0;
+ a2 = 0;
+ break;
+
+ /* for FCS */
+ case COMMAND_FCS_DATA_ENCRYPTION:
+ a0 = INTEL_SIP_SMC_FCS_CRYPTION;
+ a1 = 1;
+ a2 = (unsigned long)pdata->paddr;
+ a3 = (unsigned long)pdata->size;
+ a4 = (unsigned long)pdata->paddr_output;
+ a5 = (unsigned long)pdata->size_output;
+ break;
+ case COMMAND_FCS_DATA_DECRYPTION:
+ a0 = INTEL_SIP_SMC_FCS_CRYPTION;
+ a1 = 0;
+ a2 = (unsigned long)pdata->paddr;
+ a3 = (unsigned long)pdata->size;
+ a4 = (unsigned long)pdata->paddr_output;
+ a5 = (unsigned long)pdata->size_output;
+ break;
+ case COMMAND_FCS_RANDOM_NUMBER_GEN:
+ a0 = INTEL_SIP_SMC_FCS_RANDOM_NUMBER;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = 0;
+ break;
+ case COMMAND_FCS_REQUEST_SERVICE:
+ a0 = INTEL_SIP_SMC_FCS_SERVICE_REQUEST;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = (unsigned long)pdata->size;
+ break;
+ case COMMAND_FCS_SEND_CERTIFICATE:
+ a0 = INTEL_SIP_SMC_FCS_SEND_CERTIFICATE;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = (unsigned long)pdata->size;
+ break;
+ case COMMAND_FCS_GET_PROVISION_DATA:
+ a0 = INTEL_SIP_SMC_FCS_GET_PROVISION_DATA;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = 0;
+ break;
+
+ /* for polling */
+ case COMMAND_POLL_SERVICE_STATUS:
+ a0 = INTEL_SIP_SMC_SERVICE_COMPLETED;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = (unsigned long)pdata->size;
+ break;
+ case COMMAND_RSU_DCMF_STATUS:
+ a0 = INTEL_SIP_SMC_RSU_DCMF_STATUS;
+ a1 = 0;
+ a2 = 0;
+ break;
+ case COMMAND_SMC_SVC_VERSION:
+ a0 = INTEL_SIP_SMC_SVC_VERSION;
+ a1 = 0;
+ a2 = 0;
+ break;
+ case COMMAND_MBOX_SEND_CMD:
+ a0 = INTEL_SIP_SMC_MBOX_SEND_CMD;
+ a1 = pdata->arg[0];
+ a2 = (unsigned long)pdata->paddr;
+ a3 = (unsigned long)pdata->size / BYTE_TO_WORD_SIZE;
+ a4 = pdata->arg[1];
+ a5 = (unsigned long)pdata->paddr_output;
+ a6 = (unsigned long)pdata->size_output / BYTE_TO_WORD_SIZE;
+ break;
+ default:
+ pr_warn("it shouldn't happen\n");
+ break;
+ }
+ pr_debug("%s: before SMC call -- a0=0x%016x a1=0x%016x",
+ __func__,
+ (unsigned int)a0,
+ (unsigned int)a1);
+ pr_debug(" a2=0x%016x\n", (unsigned int)a2);
+ pr_debug(" a3=0x%016x\n", (unsigned int)a3);
+ pr_debug(" a4=0x%016x\n", (unsigned int)a4);
+ pr_debug(" a5=0x%016x\n", (unsigned int)a5);
+ ctrl->invoke_fn(a0, a1, a2, a3, a4, a5, a6, a7, &res);
+
+ pr_debug("%s: after SMC call -- res.a0=0x%016x",
+ __func__, (unsigned int)res.a0);
+ pr_debug(" res.a1=0x%016x, res.a2=0x%016x",
+ (unsigned int)res.a1, (unsigned int)res.a2);
+ pr_debug(" res.a3=0x%016x\n", (unsigned int)res.a3);
+
+ if (pdata->command == COMMAND_RSU_STATUS) {
+ if (res.a0 == INTEL_SIP_SMC_RSU_ERROR)
+ cbdata->status = BIT(SVC_STATUS_ERROR);
+ else
+ cbdata->status = BIT(SVC_STATUS_OK);
+
+ cbdata->kaddr1 = &res;
+ cbdata->kaddr2 = NULL;
+ cbdata->kaddr3 = NULL;
+ pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
+ continue;
+ }
+
+ switch (res.a0) {
+ case INTEL_SIP_SMC_STATUS_OK:
+ svc_thread_recv_status_ok(pdata, cbdata, res);
+ break;
+ case INTEL_SIP_SMC_STATUS_BUSY:
+ switch (pdata->command) {
+ case COMMAND_RECONFIG_DATA_SUBMIT:
+ svc_thread_cmd_data_claim(ctrl,
+ pdata, cbdata);
+ break;
+ case COMMAND_RECONFIG_STATUS:
+ case COMMAND_POLL_SERVICE_STATUS:
+ svc_thread_cmd_config_status(ctrl,
+ pdata, cbdata);
+ break;
+ default:
+ pr_warn("it shouldn't happen\n");
+ break;
+ }
+ break;
+ case INTEL_SIP_SMC_STATUS_REJECTED:
+ pr_debug("%s: STATUS_REJECTED\n", __func__);
+ /* for FCS */
+ switch (pdata->command) {
+ case COMMAND_FCS_REQUEST_SERVICE:
+ case COMMAND_FCS_SEND_CERTIFICATE:
+ case COMMAND_FCS_GET_PROVISION_DATA:
+ case COMMAND_FCS_DATA_ENCRYPTION:
+ case COMMAND_FCS_DATA_DECRYPTION:
+ case COMMAND_FCS_RANDOM_NUMBER_GEN:
+ case COMMAND_MBOX_SEND_CMD:
+ cbdata->status = BIT(SVC_STATUS_INVALID_PARAM);
+ cbdata->kaddr1 = NULL;
+ cbdata->kaddr2 = NULL;
+ cbdata->kaddr3 = NULL;
+ pdata->chan->scl->receive_cb(pdata->chan->scl,
+ cbdata);
+ break;
+ }
+ break;
+ case INTEL_SIP_SMC_STATUS_ERROR:
+ case INTEL_SIP_SMC_RSU_ERROR:
+ pr_err("%s: STATUS_ERROR\n", __func__);
+ cbdata->status = BIT(SVC_STATUS_ERROR);
+ cbdata->kaddr1 = &res.a1;
+ cbdata->kaddr2 = (res.a2) ?
+ svc_pa_to_va(res.a2) : NULL;
+ cbdata->kaddr3 = (res.a3) ? &res.a3 : NULL;
+ pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
+ break;
+ default:
+ pr_warn("Secure firmware doesn't support...\n");
+
+ /*
+ * be compatible with older version firmware which
+ * doesn't support newer RSU commands
+ */
+ if ((pdata->command != COMMAND_RSU_UPDATE) &&
+ (pdata->command != COMMAND_RSU_STATUS)) {
+ cbdata->status =
+ BIT(SVC_STATUS_NO_SUPPORT);
+ cbdata->kaddr1 = NULL;
+ cbdata->kaddr2 = NULL;
+ cbdata->kaddr3 = NULL;
+ pdata->chan->scl->receive_cb(
+ pdata->chan->scl, cbdata);
+ }
+ break;
+
+ }
+ }
+
+ kfree(cbdata);
+ kfree(pdata);
+
+ return 0;
+}
+
+/**
+ * svc_normal_to_secure_shm_thread() - the function to run in the kthread
+ * @data: data pointer for kthread function
+ *
+ * Service layer driver creates stratix10_svc_smc_hvc_shm kthread on CPU
+ * node 0, its function stratix10_svc_secure_shm_thread is used to query the
+ * physical address of memory block reserved by secure monitor software at
+ * secure world.
+ *
+ * svc_normal_to_secure_shm_thread() terminates directly since it is a
+ * standlone thread for which no one will call kthread_stop() or return when
+ * 'kthread_should_stop()' is true.
+ */
+static int svc_normal_to_secure_shm_thread(void *data)
+{
+ struct stratix10_svc_sh_memory
+ *sh_mem = (struct stratix10_svc_sh_memory *)data;
+ struct arm_smccc_res res;
+
+ /* SMC or HVC call to get shared memory info from secure world */
+ sh_mem->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM,
+ 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 == INTEL_SIP_SMC_STATUS_OK) {
+ sh_mem->addr = res.a1;
+ sh_mem->size = res.a2;
+ } else {
+ pr_err("%s: after SMC call -- res.a0=0x%016x", __func__,
+ (unsigned int)res.a0);
+ sh_mem->addr = 0;
+ sh_mem->size = 0;
+ }
+
+ complete(&sh_mem->sync_complete);
+ return 0;
+}
+
+/**
+ * svc_get_sh_memory() - get memory block reserved by secure monitor SW
+ * @pdev: pointer to service layer device
+ * @sh_memory: pointer to service shared memory structure
+ *
+ * Return: zero for successfully getting the physical address of memory block
+ * reserved by secure monitor software, or negative value on error.
+ */
+static int svc_get_sh_memory(struct platform_device *pdev,
+ struct stratix10_svc_sh_memory *sh_memory)
+{
+ struct device *dev = &pdev->dev;
+ struct task_struct *sh_memory_task;
+ unsigned int cpu = 0;
+
+ init_completion(&sh_memory->sync_complete);
+
+ /* smc or hvc call happens on cpu 0 bound kthread */
+ sh_memory_task = kthread_create_on_node(svc_normal_to_secure_shm_thread,
+ (void *)sh_memory,
+ cpu_to_node(cpu),
+ "svc_smc_hvc_shm_thread");
+ if (IS_ERR(sh_memory_task)) {
+ dev_err(dev, "fail to create stratix10_svc_smc_shm_thread\n");
+ return -EINVAL;
+ }
+
+ wake_up_process(sh_memory_task);
+
+ if (!wait_for_completion_timeout(&sh_memory->sync_complete, 10 * HZ)) {
+ dev_err(dev,
+ "timeout to get sh-memory paras from secure world\n");
+ return -ETIMEDOUT;
+ }
+
+ if (!sh_memory->addr || !sh_memory->size) {
+ dev_err(dev,
+ "failed to get shared memory info from secure world\n");
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "SM software provides paddr: 0x%016x, size: 0x%08x\n",
+ (unsigned int)sh_memory->addr,
+ (unsigned int)sh_memory->size);
+
+ return 0;
+}
+
+/**
+ * svc_create_memory_pool() - create a memory pool from reserved memory block
+ * @pdev: pointer to service layer device
+ * @sh_memory: pointer to service shared memory structure
+ *
+ * Return: pool allocated from reserved memory block or ERR_PTR() on error.
+ */
+static struct gen_pool *
+svc_create_memory_pool(struct platform_device *pdev,
+ struct stratix10_svc_sh_memory *sh_memory)
+{
+ struct device *dev = &pdev->dev;
+ struct gen_pool *genpool;
+ unsigned long vaddr;
+ phys_addr_t paddr;
+ size_t size;
+ phys_addr_t begin;
+ phys_addr_t end;
+ void *va;
+ size_t page_mask = PAGE_SIZE - 1;
+ int min_alloc_order = 3;
+ int ret;
+
+ begin = roundup(sh_memory->addr, PAGE_SIZE);
+ end = rounddown(sh_memory->addr + sh_memory->size, PAGE_SIZE);
+ paddr = begin;
+ size = end - begin;
+ va = devm_memremap(dev, paddr, size, MEMREMAP_WC);
+ if (IS_ERR(va)) {
+ dev_err(dev, "fail to remap shared memory\n");
+ return ERR_PTR(-EINVAL);
+ }
+ vaddr = (unsigned long)va;
+ dev_dbg(dev,
+ "reserved memory vaddr: %p, paddr: 0x%16x size: 0x%8x\n",
+ va, (unsigned int)paddr, (unsigned int)size);
+ if ((vaddr & page_mask) || (paddr & page_mask) ||
+ (size & page_mask)) {
+ dev_err(dev, "page is not aligned\n");
+ return ERR_PTR(-EINVAL);
+ }
+ genpool = gen_pool_create(min_alloc_order, -1);
+ if (!genpool) {
+ dev_err(dev, "fail to create genpool\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ gen_pool_set_algo(genpool, gen_pool_best_fit, NULL);
+ ret = gen_pool_add_virt(genpool, vaddr, paddr, size, -1);
+ if (ret) {
+ dev_err(dev, "fail to add memory chunk to the pool\n");
+ gen_pool_destroy(genpool);
+ return ERR_PTR(ret);
+ }
+
+ return genpool;
+}
+
+/**
+ * svc_smccc_smc() - secure monitor call between normal and secure world
+ * @a0: argument passed in registers 0
+ * @a1: argument passed in registers 1
+ * @a2: argument passed in registers 2
+ * @a3: argument passed in registers 3
+ * @a4: argument passed in registers 4
+ * @a5: argument passed in registers 5
+ * @a6: argument passed in registers 6
+ * @a7: argument passed in registers 7
+ * @res: result values from register 0 to 3
+ */
+static void svc_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+/**
+ * svc_smccc_hvc() - hypervisor call between normal and secure world
+ * @a0: argument passed in registers 0
+ * @a1: argument passed in registers 1
+ * @a2: argument passed in registers 2
+ * @a3: argument passed in registers 3
+ * @a4: argument passed in registers 4
+ * @a5: argument passed in registers 5
+ * @a6: argument passed in registers 6
+ * @a7: argument passed in registers 7
+ * @res: result values from register 0 to 3
+ */
+static void svc_smccc_hvc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+/**
+ * get_invoke_func() - invoke SMC or HVC call
+ * @dev: pointer to device
+ *
+ * Return: function pointer to svc_smccc_smc or svc_smccc_hvc.
+ */
+static svc_invoke_fn *get_invoke_func(struct device *dev)
+{
+ const char *method;
+
+ if (of_property_read_string(dev->of_node, "method", &method)) {
+ dev_warn(dev, "missing \"method\" property\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ if (!strcmp(method, "smc"))
+ return svc_smccc_smc;
+ if (!strcmp(method, "hvc"))
+ return svc_smccc_hvc;
+
+ dev_warn(dev, "invalid \"method\" property: %s\n", method);
+
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * stratix10_svc_request_channel_byname() - request a service channel
+ * @client: pointer to service client
+ * @name: service client name
+ *
+ * This function is used by service client to request a service channel.
+ *
+ * Return: a pointer to channel assigned to the client on success,
+ * or ERR_PTR() on error.
+ */
+struct stratix10_svc_chan *stratix10_svc_request_channel_byname(
+ struct stratix10_svc_client *client, const char *name)
+{
+ struct device *dev = client->dev;
+ struct stratix10_svc_controller *controller;
+ struct stratix10_svc_chan *chan = NULL;
+ unsigned long flag;
+ int i;
+
+ /* if probe was called after client's, or error on probe */
+ if (list_empty(&svc_ctrl))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ controller = list_first_entry(&svc_ctrl,
+ struct stratix10_svc_controller, node);
+ for (i = 0; i < SVC_NUM_CHANNEL; i++) {
+ if (!strcmp(controller->chans[i].name, name)) {
+ chan = &controller->chans[i];
+ break;
+ }
+ }
+
+ /* if there was no channel match */
+ if (i == SVC_NUM_CHANNEL) {
+ dev_err(dev, "%s: channel not allocated\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (chan->scl || !try_module_get(controller->dev->driver->owner)) {
+ dev_dbg(dev, "%s: svc not free\n", __func__);
+ return ERR_PTR(-EBUSY);
+ }
+
+ spin_lock_irqsave(&chan->lock, flag);
+ chan->scl = client;
+ chan->ctrl->num_active_client++;
+ spin_unlock_irqrestore(&chan->lock, flag);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_request_channel_byname);
+
+/**
+ * stratix10_svc_free_channel() - free service channel
+ * @chan: service channel to be freed
+ *
+ * This function is used by service client to free a service channel.
+ */
+void stratix10_svc_free_channel(struct stratix10_svc_chan *chan)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&chan->lock, flag);
+ chan->scl = NULL;
+ chan->ctrl->num_active_client--;
+ module_put(chan->ctrl->dev->driver->owner);
+ spin_unlock_irqrestore(&chan->lock, flag);
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_free_channel);
+
+/**
+ * stratix10_svc_send() - send a message data to the remote
+ * @chan: service channel assigned to the client
+ * @msg: message data to be sent, in the format of
+ * "struct stratix10_svc_client_msg"
+ *
+ * This function is used by service client to add a message to the service
+ * layer driver's queue for being sent to the secure world.
+ *
+ * Return: 0 for success, -ENOMEM or -ENOBUFS on error.
+ */
+int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
+{
+ struct stratix10_svc_client_msg
+ *p_msg = (struct stratix10_svc_client_msg *)msg;
+ struct stratix10_svc_data_mem *p_mem;
+ struct stratix10_svc_data *p_data;
+ int ret = 0;
+ unsigned int cpu = 0;
+
+ p_data = kzalloc(sizeof(*p_data), GFP_KERNEL);
+ if (!p_data)
+ return -ENOMEM;
+
+ /* first client will create kernel thread */
+ if (!chan->ctrl->task) {
+ chan->ctrl->task =
+ kthread_create_on_node(svc_normal_to_secure_thread,
+ (void *)chan->ctrl,
+ cpu_to_node(cpu),
+ "svc_smc_hvc_thread");
+ if (IS_ERR(chan->ctrl->task)) {
+ dev_err(chan->ctrl->dev,
+ "failed to create svc_smc_hvc_thread\n");
+ kfree(p_data);
+ return -EINVAL;
+ }
+ kthread_bind(chan->ctrl->task, cpu);
+ wake_up_process(chan->ctrl->task);
+ }
+
+ pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__,
+ p_msg->payload, p_msg->command,
+ (unsigned int)p_msg->payload_length);
+
+ if (list_empty(&svc_data_mem)) {
+ if (p_msg->command == COMMAND_RECONFIG) {
+ struct stratix10_svc_command_config_type *ct =
+ (struct stratix10_svc_command_config_type *)
+ p_msg->payload;
+ p_data->flag = ct->flags;
+ }
+ } else {
+ list_for_each_entry(p_mem, &svc_data_mem, node)
+ if (p_mem->vaddr == p_msg->payload) {
+ p_data->paddr = p_mem->paddr;
+ p_data->size = p_msg->payload_length;
+ break;
+ }
+ if (p_msg->payload_output) {
+ list_for_each_entry(p_mem, &svc_data_mem, node)
+ if (p_mem->vaddr == p_msg->payload_output) {
+ p_data->paddr_output =
+ p_mem->paddr;
+ p_data->size_output =
+ p_msg->payload_length_output;
+ break;
+ }
+ }
+ }
+
+ p_data->command = p_msg->command;
+ p_data->arg[0] = p_msg->arg[0];
+ p_data->arg[1] = p_msg->arg[1];
+ p_data->arg[2] = p_msg->arg[2];
+ p_data->size = p_msg->payload_length;
+ p_data->chan = chan;
+ pr_debug("%s: put to FIFO pa=0x%016x, cmd=%x, size=%u\n", __func__,
+ (unsigned int)p_data->paddr, p_data->command,
+ (unsigned int)p_data->size);
+ ret = kfifo_in_spinlocked(&chan->ctrl->svc_fifo, p_data,
+ sizeof(*p_data),
+ &chan->ctrl->svc_fifo_lock);
+
+ kfree(p_data);
+
+ if (!ret)
+ return -ENOBUFS;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_send);
+
+/**
+ * stratix10_svc_done() - complete service request transactions
+ * @chan: service channel assigned to the client
+ *
+ * This function should be called when client has finished its request
+ * or there is an error in the request process. It allows the service layer
+ * to stop the running thread to have maximize savings in kernel resources.
+ */
+void stratix10_svc_done(struct stratix10_svc_chan *chan)
+{
+ /* stop thread when thread is running AND only one active client */
+ if (chan->ctrl->task && chan->ctrl->num_active_client <= 1) {
+ pr_debug("svc_smc_hvc_shm_thread is stopped\n");
+ kthread_stop(chan->ctrl->task);
+ chan->ctrl->task = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_done);
+
+/**
+ * stratix10_svc_allocate_memory() - allocate memory
+ * @chan: service channel assigned to the client
+ * @size: memory size requested by a specific service client
+ *
+ * Service layer allocates the requested number of bytes buffer from the
+ * memory pool, service client uses this function to get allocated buffers.
+ *
+ * Return: address of allocated memory on success, or ERR_PTR() on error.
+ */
+void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan,
+ size_t size)
+{
+ struct stratix10_svc_data_mem *pmem;
+ unsigned long va;
+ phys_addr_t pa;
+ struct gen_pool *genpool = chan->ctrl->genpool;
+ size_t s = roundup(size, 1 << genpool->min_alloc_order);
+
+ pmem = devm_kzalloc(chan->ctrl->dev, sizeof(*pmem), GFP_KERNEL);
+ if (!pmem)
+ return ERR_PTR(-ENOMEM);
+
+ va = gen_pool_alloc(genpool, s);
+ if (!va)
+ return ERR_PTR(-ENOMEM);
+
+ memset((void *)va, 0, s);
+ pa = gen_pool_virt_to_phys(genpool, va);
+
+ pmem->vaddr = (void *)va;
+ pmem->paddr = pa;
+ pmem->size = s;
+ list_add_tail(&pmem->node, &svc_data_mem);
+ pr_debug("%s: va=%p, pa=0x%016x\n", __func__,
+ pmem->vaddr, (unsigned int)pmem->paddr);
+
+ return (void *)va;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory);
+
+/**
+ * stratix10_svc_free_memory() - free allocated memory
+ * @chan: service channel assigned to the client
+ * @kaddr: memory to be freed
+ *
+ * This function is used by service client to free allocated buffers.
+ */
+void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
+{
+ struct stratix10_svc_data_mem *pmem;
+
+ list_for_each_entry(pmem, &svc_data_mem, node)
+ if (pmem->vaddr == kaddr) {
+ gen_pool_free(chan->ctrl->genpool,
+ (unsigned long)kaddr, pmem->size);
+ pmem->vaddr = NULL;
+ list_del(&pmem->node);
+ return;
+ }
+
+ list_del(&svc_data_mem);
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);
+
+static const struct of_device_id stratix10_svc_drv_match[] = {
+ {.compatible = "intel,stratix10-svc"},
+ {.compatible = "intel,agilex-svc"},
+ {},
+};
+
+static int stratix10_svc_drv_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stratix10_svc_controller *controller;
+ struct stratix10_svc_chan *chans;
+ struct gen_pool *genpool;
+ struct stratix10_svc_sh_memory *sh_memory;
+ struct stratix10_svc *svc;
+
+ svc_invoke_fn *invoke_fn;
+ size_t fifo_size;
+ int ret;
+
+ /* get SMC or HVC function */
+ invoke_fn = get_invoke_func(dev);
+ if (IS_ERR(invoke_fn))
+ return -EINVAL;
+
+ sh_memory = devm_kzalloc(dev, sizeof(*sh_memory), GFP_KERNEL);
+ if (!sh_memory)
+ return -ENOMEM;
+
+ sh_memory->invoke_fn = invoke_fn;
+ ret = svc_get_sh_memory(pdev, sh_memory);
+ if (ret)
+ return ret;
+
+ genpool = svc_create_memory_pool(pdev, sh_memory);
+ if (IS_ERR(genpool))
+ return PTR_ERR(genpool);
+
+ /* allocate service controller and supporting channel */
+ controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL);
+ if (!controller) {
+ ret = -ENOMEM;
+ goto err_destroy_pool;
+ }
+
+ chans = devm_kmalloc_array(dev, SVC_NUM_CHANNEL,
+ sizeof(*chans), GFP_KERNEL | __GFP_ZERO);
+ if (!chans) {
+ ret = -ENOMEM;
+ goto err_destroy_pool;
+ }
+
+ controller->dev = dev;
+ controller->num_chans = SVC_NUM_CHANNEL;
+ controller->num_active_client = 0;
+ controller->chans = chans;
+ controller->genpool = genpool;
+ controller->task = NULL;
+ controller->invoke_fn = invoke_fn;
+ init_completion(&controller->complete_status);
+
+ fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO;
+ ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL);
+ if (ret) {
+ dev_err(dev, "failed to allocate FIFO\n");
+ goto err_destroy_pool;
+ }
+ spin_lock_init(&controller->svc_fifo_lock);
+
+ chans[0].scl = NULL;
+ chans[0].ctrl = controller;
+ chans[0].name = SVC_CLIENT_FPGA;
+ spin_lock_init(&chans[0].lock);
+
+ chans[1].scl = NULL;
+ chans[1].ctrl = controller;
+ chans[1].name = SVC_CLIENT_RSU;
+ spin_lock_init(&chans[1].lock);
+
+ chans[2].scl = NULL;
+ chans[2].ctrl = controller;
+ chans[2].name = SVC_CLIENT_FCS;
+ spin_lock_init(&chans[2].lock);
+
+ list_add_tail(&controller->node, &svc_ctrl);
+ platform_set_drvdata(pdev, controller);
+
+ /* add svc client device(s) */
+ svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL);
+ if (!svc) {
+ ret = -ENOMEM;
+ goto err_free_kfifo;
+ }
+
+ svc->stratix10_svc_rsu = platform_device_alloc(STRATIX10_RSU, 0);
+ if (!svc->stratix10_svc_rsu) {
+ dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU);
+ ret = -ENOMEM;
+ goto err_free_kfifo;
+ }
+
+ ret = platform_device_add(svc->stratix10_svc_rsu);
+ if (ret) {
+ platform_device_put(svc->stratix10_svc_rsu);
+ goto err_free_kfifo;
+ }
+
+ svc->intel_svc_fcs = platform_device_alloc(INTEL_FCS, 1);
+ if (!svc->intel_svc_fcs) {
+ dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
+ ret = -ENOMEM;
+ goto err_unregister_dev;
+ }
+
+ ret = platform_device_add(svc->intel_svc_fcs);
+ if (ret) {
+ platform_device_put(svc->intel_svc_fcs);
+ goto err_unregister_dev;
+ }
+
+ dev_set_drvdata(dev, svc);
+
+ pr_info("Intel Service Layer Driver Initialized\n");
+
+ return 0;
+
+err_unregister_dev:
+ platform_device_unregister(svc->stratix10_svc_rsu);
+err_free_kfifo:
+ kfifo_free(&controller->svc_fifo);
+err_destroy_pool:
+ gen_pool_destroy(genpool);
+ return ret;
+}
+
+static int stratix10_svc_drv_remove(struct platform_device *pdev)
+{
+ struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
+ struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
+
+ platform_device_unregister(svc->intel_svc_fcs);
+ platform_device_unregister(svc->stratix10_svc_rsu);
+
+ kfifo_free(&ctrl->svc_fifo);
+ if (ctrl->task) {
+ kthread_stop(ctrl->task);
+ ctrl->task = NULL;
+ }
+ if (ctrl->genpool)
+ gen_pool_destroy(ctrl->genpool);
+ list_del(&ctrl->node);
+
+ return 0;
+}
+
+static struct platform_driver stratix10_svc_driver = {
+ .probe = stratix10_svc_drv_probe,
+ .remove = stratix10_svc_drv_remove,
+ .driver = {
+ .name = "stratix10-svc",
+ .of_match_table = stratix10_svc_drv_match,
+ },
+};
+
+static int __init stratix10_svc_init(void)
+{
+ struct device_node *fw_np;
+ struct device_node *np;
+ int ret;
+
+ fw_np = of_find_node_by_name(NULL, "firmware");
+ if (!fw_np)
+ return -ENODEV;
+
+ np = of_find_matching_node(fw_np, stratix10_svc_drv_match);
+ if (!np)
+ return -ENODEV;
+
+ of_node_put(np);
+ ret = of_platform_populate(fw_np, stratix10_svc_drv_match, NULL, NULL);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&stratix10_svc_driver);
+}
+
+static void __exit stratix10_svc_exit(void)
+{
+ return platform_driver_unregister(&stratix10_svc_driver);
+}
+
+subsys_initcall(stratix10_svc_init);
+module_exit(stratix10_svc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Stratix10 Service Layer Driver");
+MODULE_AUTHOR("Richard Gong <richard.gong@intel.com>");
+MODULE_ALIAS("platform:stratix10-svc");
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
new file mode 100644
index 0000000000..3c197db42c
--- /dev/null
+++ b/drivers/firmware/sysfb.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Generic System Framebuffers
+ * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
+ */
+
+/*
+ * Simple-Framebuffer support
+ * Create a platform-device for any available boot framebuffer. The
+ * simple-framebuffer platform device is already available on DT systems, so
+ * this module parses the global "screen_info" object and creates a suitable
+ * platform device compatible with the "simple-framebuffer" DT object. If
+ * the framebuffer is incompatible, we instead create a legacy
+ * "vesa-framebuffer", "efi-framebuffer" or "platform-framebuffer" device and
+ * pass the screen_info as platform_data. This allows legacy drivers
+ * to pick these devices up without messing with simple-framebuffer drivers.
+ * The global "screen_info" is still valid at all times.
+ *
+ * If CONFIG_SYSFB_SIMPLEFB is not selected, never register "simple-framebuffer"
+ * platform devices, but only use legacy framebuffer devices for
+ * backwards compatibility.
+ *
+ * TODO: We set the dev_id field of all platform-devices to 0. This allows
+ * other OF/DT parsers to create such devices, too. However, they must
+ * start at offset 1 for this to work.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+#include <linux/sysfb.h>
+
+static struct platform_device *pd;
+static DEFINE_MUTEX(disable_lock);
+static bool disabled;
+
+static bool sysfb_unregister(void)
+{
+ if (IS_ERR_OR_NULL(pd))
+ return false;
+
+ platform_device_unregister(pd);
+ pd = NULL;
+
+ return true;
+}
+
+/**
+ * sysfb_disable() - disable the Generic System Framebuffers support
+ *
+ * This disables the registration of system framebuffer devices that match the
+ * generic drivers that make use of the system framebuffer set up by firmware.
+ *
+ * It also unregisters a device if this was already registered by sysfb_init().
+ *
+ * Context: The function can sleep. A @disable_lock mutex is acquired to serialize
+ * against sysfb_init(), that registers a system framebuffer device.
+ */
+void sysfb_disable(void)
+{
+ mutex_lock(&disable_lock);
+ sysfb_unregister();
+ disabled = true;
+ mutex_unlock(&disable_lock);
+}
+EXPORT_SYMBOL_GPL(sysfb_disable);
+
+static __init int sysfb_init(void)
+{
+ struct screen_info *si = &screen_info;
+ struct simplefb_platform_data mode;
+ const char *name;
+ bool compatible;
+ int ret = 0;
+
+ mutex_lock(&disable_lock);
+ if (disabled)
+ goto unlock_mutex;
+
+ sysfb_apply_efi_quirks();
+
+ /* try to create a simple-framebuffer device */
+ compatible = sysfb_parse_mode(si, &mode);
+ if (compatible) {
+ pd = sysfb_create_simplefb(si, &mode);
+ if (!IS_ERR(pd))
+ goto unlock_mutex;
+ }
+
+ /* if the FB is incompatible, create a legacy framebuffer device */
+ if (si->orig_video_isVGA == VIDEO_TYPE_EFI)
+ name = "efi-framebuffer";
+ else if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
+ name = "vesa-framebuffer";
+ else if (si->orig_video_isVGA == VIDEO_TYPE_VGAC)
+ name = "vga-framebuffer";
+ else if (si->orig_video_isVGA == VIDEO_TYPE_EGAC)
+ name = "ega-framebuffer";
+ else
+ name = "platform-framebuffer";
+
+ pd = platform_device_alloc(name, 0);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ sysfb_set_efifb_fwnode(pd);
+
+ ret = platform_device_add_data(pd, si, sizeof(*si));
+ if (ret)
+ goto err;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ goto err;
+
+ goto unlock_mutex;
+err:
+ platform_device_put(pd);
+unlock_mutex:
+ mutex_unlock(&disable_lock);
+ return ret;
+}
+
+/* must execute after PCI subsystem for EFI quirks */
+device_initcall(sysfb_init);
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
new file mode 100644
index 0000000000..74363ed750
--- /dev/null
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Generic System Framebuffers
+ * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
+ */
+
+/*
+ * simple-framebuffer probing
+ * Try to convert "screen_info" into a "simple-framebuffer" compatible mode.
+ * If the mode is incompatible, we return "false" and let the caller create
+ * legacy nodes instead.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+#include <linux/sysfb.h>
+
+static const char simplefb_resname[] = "BOOTFB";
+static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
+
+/* try parsing screen_info into a simple-framebuffer mode struct */
+__init bool sysfb_parse_mode(const struct screen_info *si,
+ struct simplefb_platform_data *mode)
+{
+ __u8 type;
+ u32 bits_per_pixel;
+ unsigned int i;
+
+ type = si->orig_video_isVGA;
+ if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI)
+ return false;
+
+ /*
+ * The meaning of depth and bpp for direct-color formats is
+ * inconsistent:
+ *
+ * - DRM format info specifies depth as the number of color
+ * bits; including alpha, but not including filler bits.
+ * - Linux' EFI platform code computes lfb_depth from the
+ * individual color channels, including the reserved bits.
+ * - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later
+ * versions use 15.
+ * - On the kernel command line, 'bpp' of 32 is usually
+ * XRGB8888 including the filler bits, but 15 is XRGB1555
+ * not including the filler bit.
+ *
+ * It's not easily possible to fix this in struct screen_info,
+ * as this could break UAPI. The best solution is to compute
+ * bits_per_pixel from the color bits, reserved bits and
+ * reported lfb_depth, whichever is highest. In the loop below,
+ * ignore simplefb formats with alpha bits, as EFI and VESA
+ * don't specify alpha channels.
+ */
+ if (si->lfb_depth > 8) {
+ bits_per_pixel = max(max3(si->red_size + si->red_pos,
+ si->green_size + si->green_pos,
+ si->blue_size + si->blue_pos),
+ si->rsvd_size + si->rsvd_pos);
+ bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth);
+ } else {
+ bits_per_pixel = si->lfb_depth;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ const struct simplefb_format *f = &formats[i];
+
+ if (f->transp.length)
+ continue; /* transparent formats are unsupported by VESA/EFI */
+
+ if (bits_per_pixel == f->bits_per_pixel &&
+ si->red_size == f->red.length &&
+ si->red_pos == f->red.offset &&
+ si->green_size == f->green.length &&
+ si->green_pos == f->green.offset &&
+ si->blue_size == f->blue.length &&
+ si->blue_pos == f->blue.offset) {
+ mode->format = f->name;
+ mode->width = si->lfb_width;
+ mode->height = si->lfb_height;
+ mode->stride = si->lfb_linelength;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+__init struct platform_device *sysfb_create_simplefb(const struct screen_info *si,
+ const struct simplefb_platform_data *mode)
+{
+ struct platform_device *pd;
+ struct resource res;
+ u64 base, size;
+ u32 length;
+ int ret;
+
+ /*
+ * If the 64BIT_BASE capability is set, ext_lfb_base will contain the
+ * upper half of the base address. Assemble the address, then make sure
+ * it is valid and we can actually access it.
+ */
+ base = si->lfb_base;
+ if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ base |= (u64)si->ext_lfb_base << 32;
+ if (!base || (u64)(resource_size_t)base != base) {
+ printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Don't use lfb_size as IORESOURCE size, since it may contain the
+ * entire VMEM, and thus require huge mappings. Use just the part we
+ * need, that is, the part where the framebuffer is located. But verify
+ * that it does not exceed the advertised VMEM.
+ * Note that in case of VBE, the lfb_size is shifted by 16 bits for
+ * historical reasons.
+ */
+ size = si->lfb_size;
+ if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
+ size <<= 16;
+ length = mode->height * mode->stride;
+ if (length > size) {
+ printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
+ return ERR_PTR(-EINVAL);
+ }
+ length = PAGE_ALIGN(length);
+
+ /* setup IORESOURCE_MEM as framebuffer memory */
+ memset(&res, 0, sizeof(res));
+ res.flags = IORESOURCE_MEM;
+ res.name = simplefb_resname;
+ res.start = base;
+ res.end = res.start + length - 1;
+ if (res.end <= res.start)
+ return ERR_PTR(-EINVAL);
+
+ pd = platform_device_alloc("simple-framebuffer", 0);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ sysfb_set_efifb_fwnode(pd);
+
+ ret = platform_device_add_resources(pd, &res, 1);
+ if (ret)
+ goto err_put_device;
+
+ ret = platform_device_add_data(pd, mode, sizeof(*mode));
+ if (ret)
+ goto err_put_device;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ goto err_put_device;
+
+ return pd;
+
+err_put_device:
+ platform_device_put(pd);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig
new file mode 100644
index 0000000000..cde1ab8bd9
--- /dev/null
+++ b/drivers/firmware/tegra/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "Tegra firmware driver"
+
+config TEGRA_IVC
+ bool "Tegra IVC protocol"
+ depends on ARCH_TEGRA
+ help
+ IVC (Inter-VM Communication) protocol is part of the IPC
+ (Inter Processor Communication) framework on Tegra. It maintains the
+ data and the different communication channels in SysRAM or RAM and
+ keeps the content is synchronization between host CPU and remote
+ processors.
+
+config TEGRA_BPMP
+ bool "Tegra BPMP driver"
+ depends on ARCH_TEGRA && TEGRA_HSP_MBOX && TEGRA_IVC
+ depends on !CPU_BIG_ENDIAN
+ help
+ BPMP (Boot and Power Management Processor) is designed to off-loading
+ the PM functions which include clock/DVFS/thermal/power from the CPU.
+ It needs HSP as the HW synchronization and notification module and
+ IVC module as the message communication protocol.
+
+ This driver manages the IPC interface between host CPU and the
+ firmware running on BPMP.
+
+endmenu
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
new file mode 100644
index 0000000000..620cf3fdd6
--- /dev/null
+++ b/drivers/firmware/tegra/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+tegra-bpmp-y = bpmp.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_210_SOC) += bpmp-tegra210.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_186_SOC) += bpmp-tegra186.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_194_SOC) += bpmp-tegra186.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_234_SOC) += bpmp-tegra186.o
+tegra-bpmp-$(CONFIG_DEBUG_FS) += bpmp-debugfs.o
+obj-$(CONFIG_TEGRA_BPMP) += tegra-bpmp.o
+obj-$(CONFIG_TEGRA_IVC) += ivc.o
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
new file mode 100644
index 0000000000..6dfe3d3410
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -0,0 +1,804 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+
+static DEFINE_MUTEX(bpmp_debug_lock);
+
+struct seqbuf {
+ char *buf;
+ size_t pos;
+ size_t size;
+};
+
+static void seqbuf_init(struct seqbuf *seqbuf, void *buf, size_t size)
+{
+ seqbuf->buf = buf;
+ seqbuf->size = size;
+ seqbuf->pos = 0;
+}
+
+static size_t seqbuf_avail(struct seqbuf *seqbuf)
+{
+ return seqbuf->pos < seqbuf->size ? seqbuf->size - seqbuf->pos : 0;
+}
+
+static size_t seqbuf_status(struct seqbuf *seqbuf)
+{
+ return seqbuf->pos <= seqbuf->size ? 0 : -EOVERFLOW;
+}
+
+static int seqbuf_eof(struct seqbuf *seqbuf)
+{
+ return seqbuf->pos >= seqbuf->size;
+}
+
+static int seqbuf_read(struct seqbuf *seqbuf, void *buf, size_t nbyte)
+{
+ nbyte = min(nbyte, seqbuf_avail(seqbuf));
+ memcpy(buf, seqbuf->buf + seqbuf->pos, nbyte);
+ seqbuf->pos += nbyte;
+ return seqbuf_status(seqbuf);
+}
+
+static int seqbuf_read_u32(struct seqbuf *seqbuf, u32 *v)
+{
+ return seqbuf_read(seqbuf, v, 4);
+}
+
+static int seqbuf_read_str(struct seqbuf *seqbuf, const char **str)
+{
+ *str = seqbuf->buf + seqbuf->pos;
+ seqbuf->pos += strnlen(*str, seqbuf_avail(seqbuf));
+ seqbuf->pos++;
+ return seqbuf_status(seqbuf);
+}
+
+static void seqbuf_seek(struct seqbuf *seqbuf, ssize_t offset)
+{
+ seqbuf->pos += offset;
+}
+
+/* map filename in Linux debugfs to corresponding entry in BPMP */
+static const char *get_filename(struct tegra_bpmp *bpmp,
+ const struct file *file, char *buf, int size)
+{
+ const char *root_path, *filename = NULL;
+ char *root_path_buf;
+ size_t root_len;
+ size_t root_path_buf_len = 512;
+
+ root_path_buf = kzalloc(root_path_buf_len, GFP_KERNEL);
+ if (!root_path_buf)
+ goto out;
+
+ root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
+ root_path_buf_len);
+ if (IS_ERR(root_path))
+ goto out;
+
+ root_len = strlen(root_path);
+
+ filename = dentry_path(file->f_path.dentry, buf, size);
+ if (IS_ERR(filename)) {
+ filename = NULL;
+ goto out;
+ }
+
+ if (strlen(filename) < root_len || strncmp(filename, root_path, root_len)) {
+ filename = NULL;
+ goto out;
+ }
+
+ filename += root_len;
+
+out:
+ kfree(root_path_buf);
+ return filename;
+}
+
+static int mrq_debug_open(struct tegra_bpmp *bpmp, const char *name,
+ u32 *fd, u32 *len, bool write)
+{
+ struct mrq_debug_request req = {
+ .cmd = write ? CMD_DEBUG_OPEN_WO : CMD_DEBUG_OPEN_RO,
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ ssize_t sz_name;
+ int err = 0;
+
+ sz_name = strscpy(req.fop.name, name, sizeof(req.fop.name));
+ if (sz_name < 0) {
+ pr_err("File name too large: %s\n", name);
+ return -EINVAL;
+ }
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ *len = resp.fop.datalen;
+ *fd = resp.fop.fd;
+
+ return 0;
+}
+
+static int mrq_debug_close(struct tegra_bpmp *bpmp, u32 fd)
+{
+ struct mrq_debug_request req = {
+ .cmd = CMD_DEBUG_CLOSE,
+ .frd = {
+ .fd = fd,
+ },
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err = 0;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mrq_debug_read(struct tegra_bpmp *bpmp, const char *name,
+ char *data, size_t sz_data, u32 *nbytes)
+{
+ struct mrq_debug_request req = {
+ .cmd = CMD_DEBUG_READ,
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ u32 fd = 0, len = 0;
+ int remaining, err, close_err;
+
+ mutex_lock(&bpmp_debug_lock);
+ err = mrq_debug_open(bpmp, name, &fd, &len, 0);
+ if (err)
+ goto out;
+
+ if (len > sz_data) {
+ err = -EFBIG;
+ goto close;
+ }
+
+ req.frd.fd = fd;
+ remaining = len;
+
+ while (remaining > 0) {
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0) {
+ goto close;
+ } else if (msg.rx.ret < 0) {
+ err = -EINVAL;
+ goto close;
+ }
+
+ if (resp.frd.readlen > remaining) {
+ pr_err("%s: read data length invalid\n", __func__);
+ err = -EINVAL;
+ goto close;
+ }
+
+ memcpy(data, resp.frd.data, resp.frd.readlen);
+ data += resp.frd.readlen;
+ remaining -= resp.frd.readlen;
+ }
+
+ *nbytes = len;
+
+close:
+ close_err = mrq_debug_close(bpmp, fd);
+ if (!err)
+ err = close_err;
+out:
+ mutex_unlock(&bpmp_debug_lock);
+ return err;
+}
+
+static int mrq_debug_write(struct tegra_bpmp *bpmp, const char *name,
+ uint8_t *data, size_t sz_data)
+{
+ struct mrq_debug_request req = {
+ .cmd = CMD_DEBUG_WRITE
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ u32 fd = 0, len = 0;
+ size_t remaining;
+ int err;
+
+ mutex_lock(&bpmp_debug_lock);
+ err = mrq_debug_open(bpmp, name, &fd, &len, 1);
+ if (err)
+ goto out;
+
+ if (sz_data > len) {
+ err = -EINVAL;
+ goto close;
+ }
+
+ req.fwr.fd = fd;
+ remaining = sz_data;
+
+ while (remaining > 0) {
+ len = min(remaining, sizeof(req.fwr.data));
+ memcpy(req.fwr.data, data, len);
+ req.fwr.datalen = len;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0) {
+ goto close;
+ } else if (msg.rx.ret < 0) {
+ err = -EINVAL;
+ goto close;
+ }
+
+ data += req.fwr.datalen;
+ remaining -= req.fwr.datalen;
+ }
+
+close:
+ err = mrq_debug_close(bpmp, fd);
+out:
+ mutex_unlock(&bpmp_debug_lock);
+ return err;
+}
+
+static int bpmp_debug_show(struct seq_file *m, void *p)
+{
+ struct file *file = m->private;
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ char fnamebuf[256];
+ const char *filename;
+ struct mrq_debug_request req = {
+ .cmd = CMD_DEBUG_READ,
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ u32 fd = 0, len = 0;
+ int remaining, err, close_err;
+
+ filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
+ if (!filename)
+ return -ENOENT;
+
+ mutex_lock(&bpmp_debug_lock);
+ err = mrq_debug_open(bpmp, filename, &fd, &len, 0);
+ if (err)
+ goto out;
+
+ req.frd.fd = fd;
+ remaining = len;
+
+ while (remaining > 0) {
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0) {
+ goto close;
+ } else if (msg.rx.ret < 0) {
+ err = -EINVAL;
+ goto close;
+ }
+
+ if (resp.frd.readlen > remaining) {
+ pr_err("%s: read data length invalid\n", __func__);
+ err = -EINVAL;
+ goto close;
+ }
+
+ seq_write(m, resp.frd.data, resp.frd.readlen);
+ remaining -= resp.frd.readlen;
+ }
+
+close:
+ close_err = mrq_debug_close(bpmp, fd);
+ if (!err)
+ err = close_err;
+out:
+ mutex_unlock(&bpmp_debug_lock);
+ return err;
+}
+
+static ssize_t bpmp_debug_store(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ char *databuf = NULL;
+ char fnamebuf[256];
+ const char *filename;
+ ssize_t err;
+
+ filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
+ if (!filename)
+ return -ENOENT;
+
+ databuf = memdup_user(buf, count);
+ if (IS_ERR(databuf))
+ return PTR_ERR(databuf);
+
+ err = mrq_debug_write(bpmp, filename, databuf, count);
+ kfree(databuf);
+
+ return err ?: count;
+}
+
+static int bpmp_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open_size(file, bpmp_debug_show, file, SZ_256K);
+}
+
+static const struct file_operations bpmp_debug_fops = {
+ .open = bpmp_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = bpmp_debug_store,
+ .release = single_release,
+};
+
+static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp,
+ struct dentry *parent,
+ char *ppath)
+{
+ const size_t pathlen = SZ_256;
+ const size_t bufsize = SZ_16K;
+ struct dentry *dentry;
+ u32 dsize, attrs = 0;
+ struct seqbuf seqbuf;
+ char *buf, *pathbuf;
+ const char *name;
+ int err = 0;
+
+ if (!bpmp || !parent || !ppath)
+ return -EINVAL;
+
+ buf = kmalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pathbuf = kzalloc(pathlen, GFP_KERNEL);
+ if (!pathbuf) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ err = mrq_debug_read(bpmp, ppath, buf, bufsize, &dsize);
+ if (err)
+ goto out;
+
+ seqbuf_init(&seqbuf, buf, dsize);
+
+ while (!seqbuf_eof(&seqbuf)) {
+ err = seqbuf_read_u32(&seqbuf, &attrs);
+ if (err)
+ goto out;
+
+ err = seqbuf_read_str(&seqbuf, &name);
+ if (err < 0)
+ goto out;
+
+ if (attrs & DEBUGFS_S_ISDIR) {
+ size_t len;
+
+ dentry = debugfs_create_dir(name, parent);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out;
+ }
+
+ len = snprintf(pathbuf, pathlen, "%s%s/", ppath, name);
+ if (len >= pathlen) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = bpmp_populate_debugfs_inband(bpmp, dentry,
+ pathbuf);
+ if (err < 0)
+ goto out;
+ } else {
+ umode_t mode;
+
+ mode = attrs & DEBUGFS_S_IRUSR ? 0400 : 0;
+ mode |= attrs & DEBUGFS_S_IWUSR ? 0200 : 0;
+ dentry = debugfs_create_file(name, mode, parent, bpmp,
+ &bpmp_debug_fops);
+ if (IS_ERR(dentry)) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+ }
+
+out:
+ kfree(pathbuf);
+ kfree(buf);
+
+ return err;
+}
+
+static int mrq_debugfs_read(struct tegra_bpmp *bpmp,
+ dma_addr_t name, size_t sz_name,
+ dma_addr_t data, size_t sz_data,
+ size_t *nbytes)
+{
+ struct mrq_debugfs_request req = {
+ .cmd = CMD_DEBUGFS_READ,
+ .fop = {
+ .fnameaddr = (u32)name,
+ .fnamelen = (u32)sz_name,
+ .dataaddr = (u32)data,
+ .datalen = (u32)sz_data,
+ },
+ };
+ struct mrq_debugfs_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUGFS,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ *nbytes = (size_t)resp.fop.nbytes;
+
+ return 0;
+}
+
+static int mrq_debugfs_write(struct tegra_bpmp *bpmp,
+ dma_addr_t name, size_t sz_name,
+ dma_addr_t data, size_t sz_data)
+{
+ const struct mrq_debugfs_request req = {
+ .cmd = CMD_DEBUGFS_WRITE,
+ .fop = {
+ .fnameaddr = (u32)name,
+ .fnamelen = (u32)sz_name,
+ .dataaddr = (u32)data,
+ .datalen = (u32)sz_data,
+ },
+ };
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUGFS,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ };
+
+ return tegra_bpmp_transfer(bpmp, &msg);
+}
+
+static int mrq_debugfs_dumpdir(struct tegra_bpmp *bpmp, dma_addr_t addr,
+ size_t size, size_t *nbytes)
+{
+ const struct mrq_debugfs_request req = {
+ .cmd = CMD_DEBUGFS_DUMPDIR,
+ .dumpdir = {
+ .dataaddr = (u32)addr,
+ .datalen = (u32)size,
+ },
+ };
+ struct mrq_debugfs_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUGFS,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ *nbytes = (size_t)resp.dumpdir.nbytes;
+
+ return 0;
+}
+
+static int debugfs_show(struct seq_file *m, void *p)
+{
+ struct file *file = m->private;
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ const size_t datasize = m->size;
+ const size_t namesize = SZ_256;
+ void *datavirt, *namevirt;
+ dma_addr_t dataphys, namephys;
+ char buf[256];
+ const char *filename;
+ size_t len, nbytes;
+ int err;
+
+ filename = get_filename(bpmp, file, buf, sizeof(buf));
+ if (!filename)
+ return -ENOENT;
+
+ namevirt = dma_alloc_coherent(bpmp->dev, namesize, &namephys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!namevirt)
+ return -ENOMEM;
+
+ datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!datavirt) {
+ err = -ENOMEM;
+ goto free_namebuf;
+ }
+
+ len = strlen(filename);
+ strncpy(namevirt, filename, namesize);
+
+ err = mrq_debugfs_read(bpmp, namephys, len, dataphys, datasize,
+ &nbytes);
+
+ if (!err)
+ seq_write(m, datavirt, nbytes);
+
+ dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys);
+free_namebuf:
+ dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
+
+ return err;
+}
+
+static int debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open_size(file, debugfs_show, file, SZ_128K);
+}
+
+static ssize_t debugfs_store(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ const size_t datasize = count;
+ const size_t namesize = SZ_256;
+ void *datavirt, *namevirt;
+ dma_addr_t dataphys, namephys;
+ char fnamebuf[256];
+ const char *filename;
+ size_t len;
+ int err;
+
+ filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
+ if (!filename)
+ return -ENOENT;
+
+ namevirt = dma_alloc_coherent(bpmp->dev, namesize, &namephys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!namevirt)
+ return -ENOMEM;
+
+ datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!datavirt) {
+ err = -ENOMEM;
+ goto free_namebuf;
+ }
+
+ len = strlen(filename);
+ strncpy(namevirt, filename, namesize);
+
+ if (copy_from_user(datavirt, buf, count)) {
+ err = -EFAULT;
+ goto free_databuf;
+ }
+
+ err = mrq_debugfs_write(bpmp, namephys, len, dataphys,
+ count);
+
+free_databuf:
+ dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys);
+free_namebuf:
+ dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
+
+ return err ?: count;
+}
+
+static const struct file_operations debugfs_fops = {
+ .open = debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = debugfs_store,
+ .release = single_release,
+};
+
+static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf,
+ struct dentry *parent, u32 depth)
+{
+ int err;
+ u32 d, t;
+ const char *name;
+ struct dentry *dentry;
+
+ while (!seqbuf_eof(seqbuf)) {
+ err = seqbuf_read_u32(seqbuf, &d);
+ if (err < 0)
+ return err;
+
+ if (d < depth) {
+ seqbuf_seek(seqbuf, -4);
+ /* go up a level */
+ return 0;
+ } else if (d != depth) {
+ /* malformed data received from BPMP */
+ return -EIO;
+ }
+
+ err = seqbuf_read_u32(seqbuf, &t);
+ if (err < 0)
+ return err;
+ err = seqbuf_read_str(seqbuf, &name);
+ if (err < 0)
+ return err;
+
+ if (t & DEBUGFS_S_ISDIR) {
+ dentry = debugfs_create_dir(name, parent);
+ if (IS_ERR(dentry))
+ return -ENOMEM;
+ err = bpmp_populate_dir(bpmp, seqbuf, dentry, depth+1);
+ if (err < 0)
+ return err;
+ } else {
+ umode_t mode;
+
+ mode = t & DEBUGFS_S_IRUSR ? S_IRUSR : 0;
+ mode |= t & DEBUGFS_S_IWUSR ? S_IWUSR : 0;
+ dentry = debugfs_create_file(name, mode,
+ parent, bpmp,
+ &debugfs_fops);
+ if (IS_ERR(dentry))
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int bpmp_populate_debugfs_shmem(struct tegra_bpmp *bpmp)
+{
+ struct seqbuf seqbuf;
+ const size_t sz = SZ_512K;
+ dma_addr_t phys;
+ size_t nbytes;
+ void *virt;
+ int err;
+
+ virt = dma_alloc_coherent(bpmp->dev, sz, &phys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!virt)
+ return -ENOMEM;
+
+ err = mrq_debugfs_dumpdir(bpmp, phys, sz, &nbytes);
+ if (err < 0) {
+ goto free;
+ } else if (nbytes > sz) {
+ err = -EINVAL;
+ goto free;
+ }
+
+ seqbuf_init(&seqbuf, virt, nbytes);
+ err = bpmp_populate_dir(bpmp, &seqbuf, bpmp->debugfs_mirror, 0);
+free:
+ dma_free_coherent(bpmp->dev, sz, virt, phys);
+
+ return err;
+}
+
+int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp)
+{
+ struct dentry *root;
+ bool inband;
+ int err;
+
+ inband = tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUG);
+
+ if (!inband && !tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUGFS))
+ return 0;
+
+ root = debugfs_create_dir("bpmp", NULL);
+ if (IS_ERR(root))
+ return -ENOMEM;
+
+ bpmp->debugfs_mirror = debugfs_create_dir("debug", root);
+ if (IS_ERR(bpmp->debugfs_mirror)) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (inband)
+ err = bpmp_populate_debugfs_inband(bpmp, bpmp->debugfs_mirror,
+ "/");
+ else
+ err = bpmp_populate_debugfs_shmem(bpmp);
+
+out:
+ if (err < 0)
+ debugfs_remove_recursive(root);
+
+ return err;
+}
diff --git a/drivers/firmware/tegra/bpmp-private.h b/drivers/firmware/tegra/bpmp-private.h
new file mode 100644
index 0000000000..182bfe3965
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-private.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#ifndef __FIRMWARE_TEGRA_BPMP_PRIVATE_H
+#define __FIRMWARE_TEGRA_BPMP_PRIVATE_H
+
+#include <soc/tegra/bpmp.h>
+
+struct tegra_bpmp_ops {
+ int (*init)(struct tegra_bpmp *bpmp);
+ void (*deinit)(struct tegra_bpmp *bpmp);
+ bool (*is_response_ready)(struct tegra_bpmp_channel *channel);
+ bool (*is_request_ready)(struct tegra_bpmp_channel *channel);
+ int (*ack_response)(struct tegra_bpmp_channel *channel);
+ int (*ack_request)(struct tegra_bpmp_channel *channel);
+ bool (*is_response_channel_free)(struct tegra_bpmp_channel *channel);
+ bool (*is_request_channel_free)(struct tegra_bpmp_channel *channel);
+ int (*post_response)(struct tegra_bpmp_channel *channel);
+ int (*post_request)(struct tegra_bpmp_channel *channel);
+ int (*ring_doorbell)(struct tegra_bpmp *bpmp);
+ int (*resume)(struct tegra_bpmp *bpmp);
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+extern const struct tegra_bpmp_ops tegra186_bpmp_ops;
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+extern const struct tegra_bpmp_ops tegra210_bpmp_ops;
+#endif
+
+#endif
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
new file mode 100644
index 0000000000..6f0d0511b4
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-tegra186.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include <soc/tegra/ivc.h>
+
+#include "bpmp-private.h"
+
+struct tegra186_bpmp {
+ struct tegra_bpmp *parent;
+
+ struct {
+ struct gen_pool *pool;
+ union {
+ void __iomem *sram;
+ void *dram;
+ };
+ dma_addr_t phys;
+ } tx, rx;
+
+ struct {
+ struct mbox_client client;
+ struct mbox_chan *channel;
+ } mbox;
+};
+
+static inline struct tegra_bpmp *
+mbox_client_to_bpmp(struct mbox_client *client)
+{
+ struct tegra186_bpmp *priv;
+
+ priv = container_of(client, struct tegra186_bpmp, mbox.client);
+
+ return priv->parent;
+}
+
+static bool tegra186_bpmp_is_message_ready(struct tegra_bpmp_channel *channel)
+{
+ int err;
+
+ err = tegra_ivc_read_get_next_frame(channel->ivc, &channel->ib);
+ if (err) {
+ iosys_map_clear(&channel->ib);
+ return false;
+ }
+
+ return true;
+}
+
+static bool tegra186_bpmp_is_channel_free(struct tegra_bpmp_channel *channel)
+{
+ int err;
+
+ err = tegra_ivc_write_get_next_frame(channel->ivc, &channel->ob);
+ if (err) {
+ iosys_map_clear(&channel->ob);
+ return false;
+ }
+
+ return true;
+}
+
+static int tegra186_bpmp_ack_message(struct tegra_bpmp_channel *channel)
+{
+ return tegra_ivc_read_advance(channel->ivc);
+}
+
+static int tegra186_bpmp_post_message(struct tegra_bpmp_channel *channel)
+{
+ return tegra_ivc_write_advance(channel->ivc);
+}
+
+static int tegra186_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ int err;
+
+ err = mbox_send_message(priv->mbox.channel, NULL);
+ if (err < 0)
+ return err;
+
+ mbox_client_txdone(priv->mbox.channel, 0);
+
+ return 0;
+}
+
+static void tegra186_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
+{
+ struct tegra_bpmp *bpmp = data;
+ struct tegra186_bpmp *priv = bpmp->priv;
+
+ if (WARN_ON(priv->mbox.channel == NULL))
+ return;
+
+ tegra186_bpmp_ring_doorbell(bpmp);
+}
+
+static int tegra186_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+ struct tegra_bpmp *bpmp,
+ unsigned int index)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ size_t message_size, queue_size;
+ struct iosys_map rx, tx;
+ unsigned int offset;
+ int err;
+
+ channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
+ GFP_KERNEL);
+ if (!channel->ivc)
+ return -ENOMEM;
+
+ message_size = tegra_ivc_align(MSG_MIN_SZ);
+ queue_size = tegra_ivc_total_queue_size(message_size);
+ offset = queue_size * index;
+
+ if (priv->rx.pool) {
+ iosys_map_set_vaddr_iomem(&rx, priv->rx.sram + offset);
+ iosys_map_set_vaddr_iomem(&tx, priv->tx.sram + offset);
+ } else {
+ iosys_map_set_vaddr(&rx, priv->rx.dram + offset);
+ iosys_map_set_vaddr(&tx, priv->tx.dram + offset);
+ }
+
+ err = tegra_ivc_init(channel->ivc, NULL, &rx, priv->rx.phys + offset, &tx,
+ priv->tx.phys + offset, 1, message_size, tegra186_bpmp_ivc_notify,
+ bpmp);
+ if (err < 0) {
+ dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
+ index, err);
+ return err;
+ }
+
+ init_completion(&channel->completion);
+ channel->bpmp = bpmp;
+
+ return 0;
+}
+
+static void tegra186_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
+{
+ /* reset the channel state */
+ tegra_ivc_reset(channel->ivc);
+
+ /* sync the channel state with BPMP */
+ while (tegra_ivc_notified(channel->ivc))
+ ;
+}
+
+static void tegra186_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
+{
+ tegra_ivc_cleanup(channel->ivc);
+}
+
+static void mbox_handle_rx(struct mbox_client *client, void *data)
+{
+ struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
+
+ tegra_bpmp_handle_rx(bpmp);
+}
+
+static void tegra186_bpmp_teardown_channels(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ unsigned int i;
+
+ for (i = 0; i < bpmp->threaded.count; i++) {
+ if (!bpmp->threaded_channels[i].bpmp)
+ continue;
+
+ tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
+ }
+
+ tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
+ tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
+
+ if (priv->tx.pool) {
+ gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.sram, 4096);
+ gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.sram, 4096);
+ }
+}
+
+static int tegra186_bpmp_dram_init(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ struct device_node *np;
+ struct resource res;
+ size_t size;
+ int err;
+
+ np = of_parse_phandle(bpmp->dev->of_node, "memory-region", 0);
+ if (!np)
+ return -ENODEV;
+
+ err = of_address_to_resource(np, 0, &res);
+ if (err < 0) {
+ dev_warn(bpmp->dev, "failed to parse memory region: %d\n", err);
+ return err;
+ }
+
+ size = resource_size(&res);
+
+ if (size < SZ_8K) {
+ dev_warn(bpmp->dev, "DRAM region must be larger than 8 KiB\n");
+ return -EINVAL;
+ }
+
+ priv->tx.phys = res.start;
+ priv->rx.phys = res.start + SZ_4K;
+
+ priv->tx.dram = devm_memremap(bpmp->dev, priv->tx.phys, size,
+ MEMREMAP_WC);
+ if (IS_ERR(priv->tx.dram)) {
+ err = PTR_ERR(priv->tx.dram);
+ dev_warn(bpmp->dev, "failed to map DRAM region: %d\n", err);
+ return err;
+ }
+
+ priv->rx.dram = priv->tx.dram + SZ_4K;
+
+ return 0;
+}
+
+static int tegra186_bpmp_sram_init(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ int err;
+
+ priv->tx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 0);
+ if (!priv->tx.pool) {
+ dev_err(bpmp->dev, "TX shmem pool not found\n");
+ return -EPROBE_DEFER;
+ }
+
+ priv->tx.sram = (void __iomem *)gen_pool_dma_alloc(priv->tx.pool, 4096,
+ &priv->tx.phys);
+ if (!priv->tx.sram) {
+ dev_err(bpmp->dev, "failed to allocate from TX pool\n");
+ return -ENOMEM;
+ }
+
+ priv->rx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 1);
+ if (!priv->rx.pool) {
+ dev_err(bpmp->dev, "RX shmem pool not found\n");
+ err = -EPROBE_DEFER;
+ goto free_tx;
+ }
+
+ priv->rx.sram = (void __iomem *)gen_pool_dma_alloc(priv->rx.pool, 4096,
+ &priv->rx.phys);
+ if (!priv->rx.sram) {
+ dev_err(bpmp->dev, "failed to allocate from RX pool\n");
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ return 0;
+
+free_tx:
+ gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.sram, 4096);
+
+ return err;
+}
+
+static int tegra186_bpmp_setup_channels(struct tegra_bpmp *bpmp)
+{
+ unsigned int i;
+ int err;
+
+ err = tegra186_bpmp_dram_init(bpmp);
+ if (err == -ENODEV) {
+ err = tegra186_bpmp_sram_init(bpmp);
+ if (err < 0)
+ return err;
+ }
+
+ err = tegra186_bpmp_channel_init(bpmp->tx_channel, bpmp,
+ bpmp->soc->channels.cpu_tx.offset);
+ if (err < 0)
+ return err;
+
+ err = tegra186_bpmp_channel_init(bpmp->rx_channel, bpmp,
+ bpmp->soc->channels.cpu_rx.offset);
+ if (err < 0) {
+ tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
+ return err;
+ }
+
+ for (i = 0; i < bpmp->threaded.count; i++) {
+ unsigned int index = bpmp->soc->channels.thread.offset + i;
+
+ err = tegra186_bpmp_channel_init(&bpmp->threaded_channels[i],
+ bpmp, index);
+ if (err < 0)
+ break;
+ }
+
+ if (err < 0)
+ tegra186_bpmp_teardown_channels(bpmp);
+
+ return err;
+}
+
+static void tegra186_bpmp_reset_channels(struct tegra_bpmp *bpmp)
+{
+ unsigned int i;
+
+ /* reset message channels */
+ tegra186_bpmp_channel_reset(bpmp->tx_channel);
+ tegra186_bpmp_channel_reset(bpmp->rx_channel);
+
+ for (i = 0; i < bpmp->threaded.count; i++)
+ tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+}
+
+static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv;
+ int err;
+
+ priv = devm_kzalloc(bpmp->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->parent = bpmp;
+ bpmp->priv = priv;
+
+ err = tegra186_bpmp_setup_channels(bpmp);
+ if (err < 0)
+ return err;
+
+ /* mbox registration */
+ priv->mbox.client.dev = bpmp->dev;
+ priv->mbox.client.rx_callback = mbox_handle_rx;
+ priv->mbox.client.tx_block = false;
+ priv->mbox.client.knows_txdone = false;
+
+ priv->mbox.channel = mbox_request_channel(&priv->mbox.client, 0);
+ if (IS_ERR(priv->mbox.channel)) {
+ err = PTR_ERR(priv->mbox.channel);
+ dev_err(bpmp->dev, "failed to get HSP mailbox: %d\n", err);
+ tegra186_bpmp_teardown_channels(bpmp);
+ return err;
+ }
+
+ tegra186_bpmp_reset_channels(bpmp);
+
+ return 0;
+}
+
+static void tegra186_bpmp_deinit(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+
+ mbox_free_channel(priv->mbox.channel);
+
+ tegra186_bpmp_teardown_channels(bpmp);
+}
+
+static int tegra186_bpmp_resume(struct tegra_bpmp *bpmp)
+{
+ tegra186_bpmp_reset_channels(bpmp);
+
+ return 0;
+}
+
+const struct tegra_bpmp_ops tegra186_bpmp_ops = {
+ .init = tegra186_bpmp_init,
+ .deinit = tegra186_bpmp_deinit,
+ .is_response_ready = tegra186_bpmp_is_message_ready,
+ .is_request_ready = tegra186_bpmp_is_message_ready,
+ .ack_response = tegra186_bpmp_ack_message,
+ .ack_request = tegra186_bpmp_ack_message,
+ .is_response_channel_free = tegra186_bpmp_is_channel_free,
+ .is_request_channel_free = tegra186_bpmp_is_channel_free,
+ .post_response = tegra186_bpmp_post_message,
+ .post_request = tegra186_bpmp_post_message,
+ .ring_doorbell = tegra186_bpmp_ring_doorbell,
+ .resume = tegra186_bpmp_resume,
+};
diff --git a/drivers/firmware/tegra/bpmp-tegra210.c b/drivers/firmware/tegra/bpmp-tegra210.c
new file mode 100644
index 0000000000..6295f5640c
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-tegra210.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+
+#include "bpmp-private.h"
+
+#define TRIGGER_OFFSET 0x000
+#define RESULT_OFFSET(id) (0xc00 + id * 4)
+#define TRIGGER_ID_SHIFT 16
+#define TRIGGER_CMD_GET 4
+
+#define STA_OFFSET 0
+#define SET_OFFSET 4
+#define CLR_OFFSET 8
+
+#define CH_MASK(ch) (0x3 << ((ch) * 2))
+#define SL_SIGL(ch) (0x0 << ((ch) * 2))
+#define SL_QUED(ch) (0x1 << ((ch) * 2))
+#define MA_FREE(ch) (0x2 << ((ch) * 2))
+#define MA_ACKD(ch) (0x3 << ((ch) * 2))
+
+struct tegra210_bpmp {
+ void __iomem *atomics;
+ void __iomem *arb_sema;
+ struct irq_data *tx_irq_data;
+};
+
+static u32 bpmp_channel_status(struct tegra_bpmp *bpmp, unsigned int index)
+{
+ struct tegra210_bpmp *priv = bpmp->priv;
+
+ return __raw_readl(priv->arb_sema + STA_OFFSET) & CH_MASK(index);
+}
+
+static bool tegra210_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == MA_ACKD(index);
+}
+
+static bool tegra210_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == SL_SIGL(index);
+}
+
+static bool
+tegra210_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == MA_FREE(index);
+}
+
+static bool
+tegra210_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == SL_QUED(index);
+}
+
+static int tegra210_bpmp_post_request(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(CH_MASK(channel->index), priv->arb_sema + CLR_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_post_response(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(MA_ACKD(channel->index), priv->arb_sema + SET_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_ack_response(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(MA_ACKD(channel->index) ^ MA_FREE(channel->index),
+ priv->arb_sema + CLR_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_ack_request(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(SL_QUED(channel->index), priv->arb_sema + SET_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+ struct tegra210_bpmp *priv = bpmp->priv;
+ struct irq_data *irq_data = priv->tx_irq_data;
+
+ /*
+ * Tegra Legacy Interrupt Controller (LIC) is used to notify BPMP of
+ * available messages
+ */
+ if (irq_data->chip->irq_retrigger)
+ return irq_data->chip->irq_retrigger(irq_data);
+
+ return -EINVAL;
+}
+
+static irqreturn_t rx_irq(int irq, void *data)
+{
+ struct tegra_bpmp *bpmp = data;
+
+ tegra_bpmp_handle_rx(bpmp);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra210_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+ struct tegra_bpmp *bpmp,
+ unsigned int index)
+{
+ struct tegra210_bpmp *priv = bpmp->priv;
+ void __iomem *p;
+ u32 address;
+
+ /* Retrieve channel base address from BPMP */
+ writel(index << TRIGGER_ID_SHIFT | TRIGGER_CMD_GET,
+ priv->atomics + TRIGGER_OFFSET);
+ address = readl(priv->atomics + RESULT_OFFSET(index));
+
+ p = devm_ioremap(bpmp->dev, address, 0x80);
+ if (!p)
+ return -ENOMEM;
+
+ iosys_map_set_vaddr_iomem(&channel->ib, p);
+ iosys_map_set_vaddr_iomem(&channel->ob, p);
+
+ channel->index = index;
+ init_completion(&channel->completion);
+ channel->bpmp = bpmp;
+
+ return 0;
+}
+
+static int tegra210_bpmp_init(struct tegra_bpmp *bpmp)
+{
+ struct platform_device *pdev = to_platform_device(bpmp->dev);
+ struct tegra210_bpmp *priv;
+ unsigned int i;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ bpmp->priv = priv;
+
+ priv->atomics = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->atomics))
+ return PTR_ERR(priv->atomics);
+
+ priv->arb_sema = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->arb_sema))
+ return PTR_ERR(priv->arb_sema);
+
+ err = tegra210_bpmp_channel_init(bpmp->tx_channel, bpmp,
+ bpmp->soc->channels.cpu_tx.offset);
+ if (err < 0)
+ return err;
+
+ err = tegra210_bpmp_channel_init(bpmp->rx_channel, bpmp,
+ bpmp->soc->channels.cpu_rx.offset);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < bpmp->threaded.count; i++) {
+ unsigned int index = bpmp->soc->channels.thread.offset + i;
+
+ err = tegra210_bpmp_channel_init(&bpmp->threaded_channels[i],
+ bpmp, index);
+ if (err < 0)
+ return err;
+ }
+
+ err = platform_get_irq_byname(pdev, "tx");
+ if (err < 0)
+ return err;
+
+ priv->tx_irq_data = irq_get_irq_data(err);
+ if (!priv->tx_irq_data) {
+ dev_err(&pdev->dev, "failed to get IRQ data for TX IRQ\n");
+ return -ENOENT;
+ }
+
+ err = platform_get_irq_byname(pdev, "rx");
+ if (err < 0)
+ return err;
+
+ err = devm_request_irq(&pdev->dev, err, rx_irq,
+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), bpmp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+const struct tegra_bpmp_ops tegra210_bpmp_ops = {
+ .init = tegra210_bpmp_init,
+ .is_response_ready = tegra210_bpmp_is_response_ready,
+ .is_request_ready = tegra210_bpmp_is_request_ready,
+ .ack_response = tegra210_bpmp_ack_response,
+ .ack_request = tegra210_bpmp_ack_request,
+ .is_response_channel_free = tegra210_bpmp_is_response_channel_free,
+ .is_request_channel_free = tegra210_bpmp_is_request_channel_free,
+ .post_response = tegra210_bpmp_post_response,
+ .post_request = tegra210_bpmp_post_request,
+ .ring_doorbell = tegra210_bpmp_ring_doorbell,
+};
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
new file mode 100644
index 0000000000..c1590d3aa9
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp.c
@@ -0,0 +1,911 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk/tegra.h>
+#include <linux/genalloc.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/semaphore.h>
+#include <linux/sched/clock.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include <soc/tegra/ivc.h>
+
+#include "bpmp-private.h"
+
+#define MSG_ACK BIT(0)
+#define MSG_RING BIT(1)
+#define TAG_SZ 32
+
+static inline struct tegra_bpmp *
+mbox_client_to_bpmp(struct mbox_client *client)
+{
+ return container_of(client, struct tegra_bpmp, mbox.client);
+}
+
+static inline const struct tegra_bpmp_ops *
+channel_to_ops(struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+
+ return bpmp->soc->ops;
+}
+
+struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct tegra_bpmp *bpmp;
+ struct device_node *np;
+
+ np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ bpmp = ERR_PTR(-ENODEV);
+ goto put;
+ }
+
+ bpmp = platform_get_drvdata(pdev);
+ if (!bpmp) {
+ bpmp = ERR_PTR(-EPROBE_DEFER);
+ put_device(&pdev->dev);
+ goto put;
+ }
+
+put:
+ of_node_put(np);
+ return bpmp;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_get);
+
+void tegra_bpmp_put(struct tegra_bpmp *bpmp)
+{
+ if (bpmp)
+ put_device(bpmp->dev);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_put);
+
+static int
+tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ unsigned int count;
+ int index;
+
+ count = bpmp->soc->channels.thread.count;
+
+ index = channel - channel->bpmp->threaded_channels;
+ if (index < 0 || index >= count)
+ return -EINVAL;
+
+ return index;
+}
+
+static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
+{
+ return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
+ (msg->rx.size <= MSG_DATA_MIN_SZ) &&
+ (msg->tx.size == 0 || msg->tx.data) &&
+ (msg->rx.size == 0 || msg->rx.data);
+}
+
+static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->is_response_ready(channel);
+}
+
+static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->is_request_ready(channel);
+}
+
+static int tegra_bpmp_wait_response(struct tegra_bpmp_channel *channel)
+{
+ unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
+ ktime_t end;
+
+ end = ktime_add_us(ktime_get(), timeout);
+
+ do {
+ if (tegra_bpmp_is_response_ready(channel))
+ return 0;
+ } while (ktime_before(ktime_get(), end));
+
+ return -ETIMEDOUT;
+}
+
+static int tegra_bpmp_ack_response(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->ack_response(channel);
+}
+
+static int tegra_bpmp_ack_request(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->ack_request(channel);
+}
+
+static bool
+tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->is_request_channel_free(channel);
+}
+
+static bool
+tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->is_response_channel_free(channel);
+}
+
+static int
+tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel *channel)
+{
+ unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
+ ktime_t start, now;
+
+ start = ns_to_ktime(local_clock());
+
+ do {
+ if (tegra_bpmp_is_request_channel_free(channel))
+ return 0;
+
+ now = ns_to_ktime(local_clock());
+ } while (ktime_us_delta(now, start) < timeout);
+
+ return -ETIMEDOUT;
+}
+
+static int tegra_bpmp_post_request(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->post_request(channel);
+}
+
+static int tegra_bpmp_post_response(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->post_response(channel);
+}
+
+static int tegra_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+ return bpmp->soc->ops->ring_doorbell(bpmp);
+}
+
+static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
+ void *data, size_t size, int *ret)
+{
+ int err;
+
+ if (data && size > 0)
+ tegra_bpmp_mb_read(data, &channel->ib, size);
+
+ err = tegra_bpmp_ack_response(channel);
+ if (err < 0)
+ return err;
+
+ *ret = tegra_bpmp_mb_read_field(&channel->ib, code);
+
+ return 0;
+}
+
+static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
+ void *data, size_t size, int *ret)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ unsigned long flags;
+ ssize_t err;
+ int index;
+
+ index = tegra_bpmp_channel_get_thread_index(channel);
+ if (index < 0) {
+ err = index;
+ goto unlock;
+ }
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+ err = __tegra_bpmp_channel_read(channel, data, size, ret);
+ clear_bit(index, bpmp->threaded.allocated);
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+
+unlock:
+ up(&bpmp->threaded.lock);
+
+ return err;
+}
+
+static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+ unsigned int mrq, unsigned long flags,
+ const void *data, size_t size)
+{
+ tegra_bpmp_mb_write_field(&channel->ob, code, mrq);
+ tegra_bpmp_mb_write_field(&channel->ob, flags, flags);
+
+ if (data && size > 0)
+ tegra_bpmp_mb_write(&channel->ob, data, size);
+
+ return tegra_bpmp_post_request(channel);
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
+ const void *data, size_t size)
+{
+ unsigned long timeout = bpmp->soc->channels.thread.timeout;
+ unsigned int count = bpmp->soc->channels.thread.count;
+ struct tegra_bpmp_channel *channel;
+ unsigned long flags;
+ unsigned int index;
+ int err;
+
+ err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
+ if (err < 0)
+ return ERR_PTR(err);
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ index = find_first_zero_bit(bpmp->threaded.allocated, count);
+ if (index == count) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ channel = &bpmp->threaded_channels[index];
+
+ if (!tegra_bpmp_is_request_channel_free(channel)) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ set_bit(index, bpmp->threaded.allocated);
+
+ err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
+ data, size);
+ if (err < 0)
+ goto clear_allocated;
+
+ set_bit(index, bpmp->threaded.busy);
+
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+ return channel;
+
+clear_allocated:
+ clear_bit(index, bpmp->threaded.allocated);
+unlock:
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+ up(&bpmp->threaded.lock);
+
+ return ERR_PTR(err);
+}
+
+static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+ unsigned int mrq, unsigned long flags,
+ const void *data, size_t size)
+{
+ int err;
+
+ err = tegra_bpmp_wait_request_channel_free(channel);
+ if (err < 0)
+ return err;
+
+ return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
+}
+
+static int __maybe_unused tegra_bpmp_resume(struct device *dev);
+
+int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg)
+{
+ struct tegra_bpmp_channel *channel;
+ int err;
+
+ if (WARN_ON(!irqs_disabled()))
+ return -EPERM;
+
+ if (!tegra_bpmp_message_valid(msg))
+ return -EINVAL;
+
+ if (bpmp->suspended) {
+ /* Reset BPMP IPC channels during resume based on flags passed */
+ if (msg->flags & TEGRA_BPMP_MESSAGE_RESET)
+ tegra_bpmp_resume(bpmp->dev);
+ else
+ return -EAGAIN;
+ }
+
+ channel = bpmp->tx_channel;
+
+ spin_lock(&bpmp->atomic_tx_lock);
+
+ err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
+ msg->tx.data, msg->tx.size);
+ if (err < 0) {
+ spin_unlock(&bpmp->atomic_tx_lock);
+ return err;
+ }
+
+ spin_unlock(&bpmp->atomic_tx_lock);
+
+ err = tegra_bpmp_ring_doorbell(bpmp);
+ if (err < 0)
+ return err;
+
+ err = tegra_bpmp_wait_response(channel);
+ if (err < 0)
+ return err;
+
+ return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
+ &msg->rx.ret);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
+
+int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg)
+{
+ struct tegra_bpmp_channel *channel;
+ unsigned long timeout;
+ int err;
+
+ if (WARN_ON(irqs_disabled()))
+ return -EPERM;
+
+ if (!tegra_bpmp_message_valid(msg))
+ return -EINVAL;
+
+ if (bpmp->suspended) {
+ /* Reset BPMP IPC channels during resume based on flags passed */
+ if (msg->flags & TEGRA_BPMP_MESSAGE_RESET)
+ tegra_bpmp_resume(bpmp->dev);
+ else
+ return -EAGAIN;
+ }
+
+ channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
+ msg->tx.size);
+ if (IS_ERR(channel))
+ return PTR_ERR(channel);
+
+ err = tegra_bpmp_ring_doorbell(bpmp);
+ if (err < 0)
+ return err;
+
+ timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
+
+ err = wait_for_completion_timeout(&channel->completion, timeout);
+ if (err == 0)
+ return -ETIMEDOUT;
+
+ return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
+ &msg->rx.ret);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
+
+static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
+ unsigned int mrq)
+{
+ struct tegra_bpmp_mrq *entry;
+
+ list_for_each_entry(entry, &bpmp->mrqs, list)
+ if (entry->mrq == mrq)
+ return entry;
+
+ return NULL;
+}
+
+void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
+ const void *data, size_t size)
+{
+ unsigned long flags = tegra_bpmp_mb_read_field(&channel->ib, flags);
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ int err;
+
+ if (WARN_ON(size > MSG_DATA_MIN_SZ))
+ return;
+
+ err = tegra_bpmp_ack_request(channel);
+ if (WARN_ON(err < 0))
+ return;
+
+ if ((flags & MSG_ACK) == 0)
+ return;
+
+ if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel)))
+ return;
+
+ tegra_bpmp_mb_write_field(&channel->ob, code, code);
+
+ if (data && size > 0)
+ tegra_bpmp_mb_write(&channel->ob, data, size);
+
+ err = tegra_bpmp_post_response(channel);
+ if (WARN_ON(err < 0))
+ return;
+
+ if (flags & MSG_RING) {
+ err = tegra_bpmp_ring_doorbell(bpmp);
+ if (WARN_ON(err < 0))
+ return;
+ }
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
+
+static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
+ unsigned int mrq,
+ struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp_mrq *entry;
+ u32 zero = 0;
+
+ spin_lock(&bpmp->lock);
+
+ entry = tegra_bpmp_find_mrq(bpmp, mrq);
+ if (!entry) {
+ spin_unlock(&bpmp->lock);
+ tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
+ return;
+ }
+
+ entry->handler(mrq, channel, entry->data);
+
+ spin_unlock(&bpmp->lock);
+}
+
+int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
+ tegra_bpmp_mrq_handler_t handler, void *data)
+{
+ struct tegra_bpmp_mrq *entry;
+ unsigned long flags;
+
+ if (!handler)
+ return -EINVAL;
+
+ entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ entry->mrq = mrq;
+ entry->handler = handler;
+ entry->data = data;
+ list_add(&entry->list, &bpmp->mrqs);
+
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
+
+void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
+{
+ struct tegra_bpmp_mrq *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ entry = tegra_bpmp_find_mrq(bpmp, mrq);
+ if (!entry)
+ goto unlock;
+
+ list_del(&entry->list);
+ devm_kfree(bpmp->dev, entry);
+
+unlock:
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
+
+bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
+{
+ struct mrq_query_abi_request req = { .mrq = mrq };
+ struct mrq_query_abi_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_QUERY_ABI,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err || msg.rx.ret)
+ return false;
+
+ return resp.status == 0;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_is_supported);
+
+static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
+ struct tegra_bpmp_channel *channel,
+ void *data)
+{
+ struct mrq_ping_request request;
+ struct mrq_ping_response response;
+
+ tegra_bpmp_mb_read(&request, &channel->ib, sizeof(request));
+
+ memset(&response, 0, sizeof(response));
+ response.reply = request.challenge << 1;
+
+ tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
+}
+
+static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
+{
+ struct mrq_ping_response response;
+ struct mrq_ping_request request;
+ struct tegra_bpmp_message msg;
+ unsigned long flags;
+ ktime_t start, end;
+ int err;
+
+ memset(&request, 0, sizeof(request));
+ request.challenge = 1;
+
+ memset(&response, 0, sizeof(response));
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_PING;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+ msg.rx.data = &response;
+ msg.rx.size = sizeof(response);
+
+ local_irq_save(flags);
+ start = ktime_get();
+ err = tegra_bpmp_transfer_atomic(bpmp, &msg);
+ end = ktime_get();
+ local_irq_restore(flags);
+
+ if (!err)
+ dev_dbg(bpmp->dev,
+ "ping ok: challenge: %u, response: %u, time: %lld\n",
+ request.challenge, response.reply,
+ ktime_to_us(ktime_sub(end, start)));
+
+ return err;
+}
+
+/* deprecated version of tag query */
+static int tegra_bpmp_get_firmware_tag_old(struct tegra_bpmp *bpmp, char *tag,
+ size_t size)
+{
+ struct mrq_query_tag_request request;
+ struct tegra_bpmp_message msg;
+ unsigned long flags;
+ dma_addr_t phys;
+ void *virt;
+ int err;
+
+ if (size != TAG_SZ)
+ return -EINVAL;
+
+ virt = dma_alloc_coherent(bpmp->dev, TAG_SZ, &phys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!virt)
+ return -ENOMEM;
+
+ memset(&request, 0, sizeof(request));
+ request.addr = phys;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_QUERY_TAG;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+
+ local_irq_save(flags);
+ err = tegra_bpmp_transfer_atomic(bpmp, &msg);
+ local_irq_restore(flags);
+
+ if (err == 0)
+ memcpy(tag, virt, TAG_SZ);
+
+ dma_free_coherent(bpmp->dev, TAG_SZ, virt, phys);
+
+ return err;
+}
+
+static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
+ size_t size)
+{
+ if (tegra_bpmp_mrq_is_supported(bpmp, MRQ_QUERY_FW_TAG)) {
+ struct mrq_query_fw_tag_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_QUERY_FW_TAG,
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err;
+
+ if (size != sizeof(resp.tag))
+ return -EINVAL;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+
+ if (err)
+ return err;
+ if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ memcpy(tag, resp.tag, sizeof(resp.tag));
+ return 0;
+ }
+
+ return tegra_bpmp_get_firmware_tag_old(bpmp, tag, size);
+}
+
+static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
+{
+ unsigned long flags = tegra_bpmp_mb_read_field(&channel->ob, flags);
+
+ if ((flags & MSG_RING) == 0)
+ return;
+
+ complete(&channel->completion);
+}
+
+void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp)
+{
+ struct tegra_bpmp_channel *channel;
+ unsigned int i, count;
+ unsigned long *busy;
+
+ channel = bpmp->rx_channel;
+ count = bpmp->soc->channels.thread.count;
+ busy = bpmp->threaded.busy;
+
+ if (tegra_bpmp_is_request_ready(channel)) {
+ unsigned int mrq = tegra_bpmp_mb_read_field(&channel->ib, code);
+
+ tegra_bpmp_handle_mrq(bpmp, mrq, channel);
+ }
+
+ spin_lock(&bpmp->lock);
+
+ for_each_set_bit(i, busy, count) {
+ struct tegra_bpmp_channel *channel;
+
+ channel = &bpmp->threaded_channels[i];
+
+ if (tegra_bpmp_is_response_ready(channel)) {
+ tegra_bpmp_channel_signal(channel);
+ clear_bit(i, busy);
+ }
+ }
+
+ spin_unlock(&bpmp->lock);
+}
+
+static int tegra_bpmp_probe(struct platform_device *pdev)
+{
+ struct tegra_bpmp *bpmp;
+ char tag[TAG_SZ];
+ size_t size;
+ int err;
+
+ bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
+ if (!bpmp)
+ return -ENOMEM;
+
+ bpmp->soc = of_device_get_match_data(&pdev->dev);
+ bpmp->dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&bpmp->mrqs);
+ spin_lock_init(&bpmp->lock);
+
+ bpmp->threaded.count = bpmp->soc->channels.thread.count;
+ sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
+
+ size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
+
+ bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!bpmp->threaded.allocated)
+ return -ENOMEM;
+
+ bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!bpmp->threaded.busy)
+ return -ENOMEM;
+
+ spin_lock_init(&bpmp->atomic_tx_lock);
+ bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
+ GFP_KERNEL);
+ if (!bpmp->tx_channel)
+ return -ENOMEM;
+
+ bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
+ GFP_KERNEL);
+ if (!bpmp->rx_channel)
+ return -ENOMEM;
+
+ bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
+ sizeof(*bpmp->threaded_channels),
+ GFP_KERNEL);
+ if (!bpmp->threaded_channels)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, bpmp);
+
+ err = bpmp->soc->ops->init(bpmp);
+ if (err < 0)
+ return err;
+
+ err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
+ tegra_bpmp_mrq_handle_ping, bpmp);
+ if (err < 0)
+ goto deinit;
+
+ err = tegra_bpmp_ping(bpmp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
+ goto free_mrq;
+ }
+
+ err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag));
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
+ goto free_mrq;
+ }
+
+ dev_info(&pdev->dev, "firmware: %.*s\n", (int)sizeof(tag), tag);
+
+ err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
+ if (err < 0)
+ goto free_mrq;
+
+ if (of_property_present(pdev->dev.of_node, "#clock-cells")) {
+ err = tegra_bpmp_init_clocks(bpmp);
+ if (err < 0)
+ goto free_mrq;
+ }
+
+ if (of_property_present(pdev->dev.of_node, "#reset-cells")) {
+ err = tegra_bpmp_init_resets(bpmp);
+ if (err < 0)
+ goto free_mrq;
+ }
+
+ if (of_property_present(pdev->dev.of_node, "#power-domain-cells")) {
+ err = tegra_bpmp_init_powergates(bpmp);
+ if (err < 0)
+ goto free_mrq;
+ }
+
+ err = tegra_bpmp_init_debugfs(bpmp);
+ if (err < 0)
+ dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err);
+
+ return 0;
+
+free_mrq:
+ tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
+deinit:
+ if (bpmp->soc->ops->deinit)
+ bpmp->soc->ops->deinit(bpmp);
+
+ return err;
+}
+
+static int __maybe_unused tegra_bpmp_suspend(struct device *dev)
+{
+ struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
+
+ bpmp->suspended = true;
+
+ return 0;
+}
+
+static int __maybe_unused tegra_bpmp_resume(struct device *dev)
+{
+ struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
+
+ bpmp->suspended = false;
+
+ if (bpmp->soc->ops->resume)
+ return bpmp->soc->ops->resume(bpmp);
+ else
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_bpmp_pm_ops = {
+ .suspend_noirq = tegra_bpmp_suspend,
+ .resume_noirq = tegra_bpmp_resume,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+static const struct tegra_bpmp_soc tegra186_soc = {
+ .channels = {
+ .cpu_tx = {
+ .offset = 3,
+ .timeout = 60 * USEC_PER_SEC,
+ },
+ .thread = {
+ .offset = 0,
+ .count = 3,
+ .timeout = 600 * USEC_PER_SEC,
+ },
+ .cpu_rx = {
+ .offset = 13,
+ .timeout = 0,
+ },
+ },
+ .ops = &tegra186_bpmp_ops,
+ .num_resets = 193,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+static const struct tegra_bpmp_soc tegra210_soc = {
+ .channels = {
+ .cpu_tx = {
+ .offset = 0,
+ .count = 1,
+ .timeout = 60 * USEC_PER_SEC,
+ },
+ .thread = {
+ .offset = 4,
+ .count = 1,
+ .timeout = 600 * USEC_PER_SEC,
+ },
+ .cpu_rx = {
+ .offset = 8,
+ .count = 1,
+ .timeout = 0,
+ },
+ },
+ .ops = &tegra210_bpmp_ops,
+};
+#endif
+
+static const struct of_device_id tegra_bpmp_match[] = {
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+ { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+ { .compatible = "nvidia,tegra210-bpmp", .data = &tegra210_soc },
+#endif
+ { }
+};
+
+static struct platform_driver tegra_bpmp_driver = {
+ .driver = {
+ .name = "tegra-bpmp",
+ .of_match_table = tegra_bpmp_match,
+ .pm = &tegra_bpmp_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tegra_bpmp_probe,
+};
+builtin_platform_driver(tegra_bpmp_driver);
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c
new file mode 100644
index 0000000000..8c9aff9804
--- /dev/null
+++ b/drivers/firmware/tegra/ivc.c
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <soc/tegra/ivc.h>
+
+#define TEGRA_IVC_ALIGN 64
+
+/*
+ * IVC channel reset protocol.
+ *
+ * Each end uses its tx_channel.state to indicate its synchronization state.
+ */
+enum tegra_ivc_state {
+ /*
+ * This value is zero for backwards compatibility with services that
+ * assume channels to be initially zeroed. Such channels are in an
+ * initially valid state, but cannot be asynchronously reset, and must
+ * maintain a valid state at all times.
+ *
+ * The transmitting end can enter the established state from the sync or
+ * ack state when it observes the receiving endpoint in the ack or
+ * established state, indicating that has cleared the counters in our
+ * rx_channel.
+ */
+ TEGRA_IVC_STATE_ESTABLISHED = 0,
+
+ /*
+ * If an endpoint is observed in the sync state, the remote endpoint is
+ * allowed to clear the counters it owns asynchronously with respect to
+ * the current endpoint. Therefore, the current endpoint is no longer
+ * allowed to communicate.
+ */
+ TEGRA_IVC_STATE_SYNC,
+
+ /*
+ * When the transmitting end observes the receiving end in the sync
+ * state, it can clear the w_count and r_count and transition to the ack
+ * state. If the remote endpoint observes us in the ack state, it can
+ * return to the established state once it has cleared its counters.
+ */
+ TEGRA_IVC_STATE_ACK
+};
+
+/*
+ * This structure is divided into two-cache aligned parts, the first is only
+ * written through the tx.channel pointer, while the second is only written
+ * through the rx.channel pointer. This delineates ownership of the cache
+ * lines, which is critical to performance and necessary in non-cache coherent
+ * implementations.
+ */
+struct tegra_ivc_header {
+ union {
+ struct {
+ /* fields owned by the transmitting end */
+ u32 count;
+ u32 state;
+ };
+
+ u8 pad[TEGRA_IVC_ALIGN];
+ } tx;
+
+ union {
+ /* fields owned by the receiving end */
+ u32 count;
+ u8 pad[TEGRA_IVC_ALIGN];
+ } rx;
+};
+
+#define tegra_ivc_header_read_field(hdr, field) \
+ iosys_map_rd_field(hdr, 0, struct tegra_ivc_header, field)
+
+#define tegra_ivc_header_write_field(hdr, field, value) \
+ iosys_map_wr_field(hdr, 0, struct tegra_ivc_header, field, value)
+
+static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
+{
+ if (!ivc->peer)
+ return;
+
+ dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
+ DMA_FROM_DEVICE);
+}
+
+static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
+{
+ if (!ivc->peer)
+ return;
+
+ dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
+ DMA_TO_DEVICE);
+}
+
+static inline bool tegra_ivc_empty(struct tegra_ivc *ivc, struct iosys_map *map)
+{
+ /*
+ * This function performs multiple checks on the same values with
+ * security implications, so create snapshots with READ_ONCE() to
+ * ensure that these checks use the same values.
+ */
+ u32 tx = tegra_ivc_header_read_field(map, tx.count);
+ u32 rx = tegra_ivc_header_read_field(map, rx.count);
+
+ /*
+ * Perform an over-full check to prevent denial of service attacks
+ * where a server could be easily fooled into believing that there's
+ * an extremely large number of frames ready, since receivers are not
+ * expected to check for full or over-full conditions.
+ *
+ * Although the channel isn't empty, this is an invalid case caused by
+ * a potentially malicious peer, so returning empty is safer, because
+ * it gives the impression that the channel has gone silent.
+ */
+ if (tx - rx > ivc->num_frames)
+ return true;
+
+ return tx == rx;
+}
+
+static inline bool tegra_ivc_full(struct tegra_ivc *ivc, struct iosys_map *map)
+{
+ u32 tx = tegra_ivc_header_read_field(map, tx.count);
+ u32 rx = tegra_ivc_header_read_field(map, rx.count);
+
+ /*
+ * Invalid cases where the counters indicate that the queue is over
+ * capacity also appear full.
+ */
+ return tx - rx >= ivc->num_frames;
+}
+
+static inline u32 tegra_ivc_available(struct tegra_ivc *ivc, struct iosys_map *map)
+{
+ u32 tx = tegra_ivc_header_read_field(map, tx.count);
+ u32 rx = tegra_ivc_header_read_field(map, rx.count);
+
+ /*
+ * This function isn't expected to be used in scenarios where an
+ * over-full situation can lead to denial of service attacks. See the
+ * comment in tegra_ivc_empty() for an explanation about special
+ * over-full considerations.
+ */
+ return tx - rx;
+}
+
+static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
+{
+ unsigned int count = tegra_ivc_header_read_field(&ivc->tx.map, tx.count);
+
+ tegra_ivc_header_write_field(&ivc->tx.map, tx.count, count + 1);
+
+ if (ivc->tx.position == ivc->num_frames - 1)
+ ivc->tx.position = 0;
+ else
+ ivc->tx.position++;
+}
+
+static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
+{
+ unsigned int count = tegra_ivc_header_read_field(&ivc->rx.map, rx.count);
+
+ tegra_ivc_header_write_field(&ivc->rx.map, rx.count, count + 1);
+
+ if (ivc->rx.position == ivc->num_frames - 1)
+ ivc->rx.position = 0;
+ else
+ ivc->rx.position++;
+}
+
+static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+ unsigned int state;
+
+ /*
+ * tx.channel->state is set locally, so it is not synchronized with
+ * state from the remote peer. The remote peer cannot reset its
+ * transmit counters until we've acknowledged its synchronization
+ * request, so no additional synchronization is required because an
+ * asynchronous transition of rx.channel->state to
+ * TEGRA_IVC_STATE_ACK is not allowed.
+ */
+ state = tegra_ivc_header_read_field(&ivc->tx.map, tx.state);
+ if (state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -ECONNRESET;
+
+ /*
+ * Avoid unnecessary invalidations when performing repeated accesses
+ * to an IVC channel by checking the old queue pointers first.
+ *
+ * Synchronization is only necessary when these pointers indicate
+ * empty or full.
+ */
+ if (!tegra_ivc_empty(ivc, &ivc->rx.map))
+ return 0;
+
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
+
+ if (tegra_ivc_empty(ivc, &ivc->rx.map))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
+ unsigned int state;
+
+ state = tegra_ivc_header_read_field(&ivc->tx.map, tx.state);
+ if (state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -ECONNRESET;
+
+ if (!tegra_ivc_full(ivc, &ivc->tx.map))
+ return 0;
+
+ tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
+
+ if (tegra_ivc_full(ivc, &ivc->tx.map))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static int tegra_ivc_frame_virt(struct tegra_ivc *ivc, const struct iosys_map *header,
+ unsigned int frame, struct iosys_map *map)
+{
+ size_t offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
+
+ if (WARN_ON(frame >= ivc->num_frames))
+ return -EINVAL;
+
+ *map = IOSYS_MAP_INIT_OFFSET(header, offset);
+
+ return 0;
+}
+
+static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame)
+{
+ unsigned long offset;
+
+ offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
+
+ return phys + offset;
+}
+
+static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame,
+ unsigned int offset,
+ size_t size)
+{
+ if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
+ return;
+
+ phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
+
+ dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
+}
+
+static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame,
+ unsigned int offset,
+ size_t size)
+{
+ if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
+ return;
+
+ phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
+
+ dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
+}
+
+/* directly peek at the next frame rx'ed */
+int tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc, struct iosys_map *map)
+{
+ int err;
+
+ if (WARN_ON(ivc == NULL))
+ return -EINVAL;
+
+ err = tegra_ivc_check_read(ivc);
+ if (err < 0)
+ return err;
+
+ /*
+ * Order observation of ivc->rx.position potentially indicating new
+ * data before data read.
+ */
+ smp_rmb();
+
+ tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
+ ivc->frame_size);
+
+ return tegra_ivc_frame_virt(ivc, &ivc->rx.map, ivc->rx.position, map);
+}
+EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
+
+int tegra_ivc_read_advance(struct tegra_ivc *ivc)
+{
+ unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
+ unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
+ int err;
+
+ /*
+ * No read barriers or synchronization here: the caller is expected to
+ * have already observed the channel non-empty. This check is just to
+ * catch programming errors.
+ */
+ err = tegra_ivc_check_read(ivc);
+ if (err < 0)
+ return err;
+
+ tegra_ivc_advance_rx(ivc);
+
+ tegra_ivc_flush(ivc, ivc->rx.phys + rx);
+
+ /*
+ * Ensure our write to ivc->rx.position occurs before our read from
+ * ivc->tx.position.
+ */
+ smp_mb();
+
+ /*
+ * Notify only upon transition from full to non-full. The available
+ * count can only asynchronously increase, so the worst possible
+ * side-effect will be a spurious notification.
+ */
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
+
+ if (tegra_ivc_available(ivc, &ivc->rx.map) == ivc->num_frames - 1)
+ ivc->notify(ivc, ivc->notify_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_read_advance);
+
+/* directly poke at the next frame to be tx'ed */
+int tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc, struct iosys_map *map)
+{
+ int err;
+
+ err = tegra_ivc_check_write(ivc);
+ if (err < 0)
+ return err;
+
+ return tegra_ivc_frame_virt(ivc, &ivc->tx.map, ivc->tx.position, map);
+}
+EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
+
+/* advance the tx buffer */
+int tegra_ivc_write_advance(struct tegra_ivc *ivc)
+{
+ unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
+ unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
+ int err;
+
+ err = tegra_ivc_check_write(ivc);
+ if (err < 0)
+ return err;
+
+ tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
+ ivc->frame_size);
+
+ /*
+ * Order any possible stores to the frame before update of
+ * ivc->tx.position.
+ */
+ smp_wmb();
+
+ tegra_ivc_advance_tx(ivc);
+ tegra_ivc_flush(ivc, ivc->tx.phys + tx);
+
+ /*
+ * Ensure our write to ivc->tx.position occurs before our read from
+ * ivc->rx.position.
+ */
+ smp_mb();
+
+ /*
+ * Notify only upon transition from empty to non-empty. The available
+ * count can only asynchronously decrease, so the worst possible
+ * side-effect will be a spurious notification.
+ */
+ tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
+
+ if (tegra_ivc_available(ivc, &ivc->tx.map) == 1)
+ ivc->notify(ivc, ivc->notify_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_write_advance);
+
+void tegra_ivc_reset(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_SYNC);
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+ ivc->notify(ivc, ivc->notify_data);
+}
+EXPORT_SYMBOL(tegra_ivc_reset);
+
+/*
+ * =======================================================
+ * IVC State Transition Table - see tegra_ivc_notified()
+ * =======================================================
+ *
+ * local remote action
+ * ----- ------ -----------------------------------
+ * SYNC EST <none>
+ * SYNC ACK reset counters; move to EST; notify
+ * SYNC SYNC reset counters; move to ACK; notify
+ * ACK EST move to EST; notify
+ * ACK ACK move to EST; notify
+ * ACK SYNC reset counters; move to ACK; notify
+ * EST EST <none>
+ * EST ACK <none>
+ * EST SYNC reset counters; move to ACK; notify
+ *
+ * ===============================================================
+ */
+
+int tegra_ivc_notified(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+ enum tegra_ivc_state rx_state, tx_state;
+
+ /* Copy the receiver's state out of shared memory. */
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
+ rx_state = tegra_ivc_header_read_field(&ivc->rx.map, tx.state);
+ tx_state = tegra_ivc_header_read_field(&ivc->tx.map, tx.state);
+
+ if (rx_state == TEGRA_IVC_STATE_SYNC) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * Order observation of TEGRA_IVC_STATE_SYNC before stores
+ * clearing tx.channel.
+ */
+ smp_rmb();
+
+ /*
+ * Reset tx.channel counters. The remote end is in the SYNC
+ * state and won't make progress until we change our state,
+ * so the counters are not in use at this time.
+ */
+ tegra_ivc_header_write_field(&ivc->tx.map, tx.count, 0);
+ tegra_ivc_header_write_field(&ivc->rx.map, rx.count, 0);
+
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ /*
+ * Ensure that counters appear cleared before new state can be
+ * observed.
+ */
+ smp_wmb();
+
+ /*
+ * Move to ACK state. We have just cleared our counters, so it
+ * is now safe for the remote end to start using these values.
+ */
+ tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_ACK);
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else if (tx_state == TEGRA_IVC_STATE_SYNC &&
+ rx_state == TEGRA_IVC_STATE_ACK) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * Order observation of ivc_state_sync before stores clearing
+ * tx_channel.
+ */
+ smp_rmb();
+
+ /*
+ * Reset tx.channel counters. The remote end is in the ACK
+ * state and won't make progress until we change our state,
+ * so the counters are not in use at this time.
+ */
+ tegra_ivc_header_write_field(&ivc->tx.map, tx.count, 0);
+ tegra_ivc_header_write_field(&ivc->rx.map, rx.count, 0);
+
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ /*
+ * Ensure that counters appear cleared before new state can be
+ * observed.
+ */
+ smp_wmb();
+
+ /*
+ * Move to ESTABLISHED state. We know that the remote end has
+ * already cleared its counters, so it is safe to start
+ * writing/reading on this channel.
+ */
+ tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_ESTABLISHED);
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else if (tx_state == TEGRA_IVC_STATE_ACK) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * At this point, we have observed the peer to be in either
+ * the ACK or ESTABLISHED state. Next, order observation of
+ * peer state before storing to tx.channel.
+ */
+ smp_rmb();
+
+ /*
+ * Move to ESTABLISHED state. We know that we have previously
+ * cleared our counters, and we know that the remote end has
+ * cleared its counters, so it is safe to start writing/reading
+ * on this channel.
+ */
+ tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_ESTABLISHED);
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else {
+ /*
+ * There is no need to handle any further action. Either the
+ * channel is already fully established, or we are waiting for
+ * the remote end to catch up with our current state. Refer
+ * to the diagram in "IVC State Transition Table" above.
+ */
+ }
+
+ if (tx_state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -EAGAIN;
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_notified);
+
+size_t tegra_ivc_align(size_t size)
+{
+ return ALIGN(size, TEGRA_IVC_ALIGN);
+}
+EXPORT_SYMBOL(tegra_ivc_align);
+
+unsigned tegra_ivc_total_queue_size(unsigned queue_size)
+{
+ if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
+ pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
+ __func__, queue_size, TEGRA_IVC_ALIGN);
+ return 0;
+ }
+
+ return queue_size + sizeof(struct tegra_ivc_header);
+}
+EXPORT_SYMBOL(tegra_ivc_total_queue_size);
+
+static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
+ unsigned int num_frames, size_t frame_size)
+{
+ BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
+ TEGRA_IVC_ALIGN));
+ BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
+ TEGRA_IVC_ALIGN));
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
+ TEGRA_IVC_ALIGN));
+
+ if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
+ pr_err("num_frames * frame_size overflows\n");
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
+ pr_err("frame size not adequately aligned: %zu\n", frame_size);
+ return -EINVAL;
+ }
+
+ /*
+ * The headers must at least be aligned enough for counters
+ * to be accessed atomically.
+ */
+ if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
+ pr_err("IVC channel start not aligned: %#lx\n", rx);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
+ pr_err("IVC channel start not aligned: %#lx\n", tx);
+ return -EINVAL;
+ }
+
+ if (rx < tx) {
+ if (rx + frame_size * num_frames > tx) {
+ pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
+ rx, frame_size * num_frames, tx);
+ return -EINVAL;
+ }
+ } else {
+ if (tx + frame_size * num_frames > rx) {
+ pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
+ tx, frame_size * num_frames, rx);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static inline void iosys_map_copy(struct iosys_map *dst, const struct iosys_map *src)
+{
+ *dst = *src;
+}
+
+static inline unsigned long iosys_map_get_address(const struct iosys_map *map)
+{
+ if (map->is_iomem)
+ return (unsigned long)map->vaddr_iomem;
+
+ return (unsigned long)map->vaddr;
+}
+
+static inline void *iosys_map_get_vaddr(const struct iosys_map *map)
+{
+ if (WARN_ON(map->is_iomem))
+ return NULL;
+
+ return map->vaddr;
+}
+
+int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, const struct iosys_map *rx,
+ dma_addr_t rx_phys, const struct iosys_map *tx, dma_addr_t tx_phys,
+ unsigned int num_frames, size_t frame_size,
+ void (*notify)(struct tegra_ivc *ivc, void *data),
+ void *data)
+{
+ size_t queue_size;
+ int err;
+
+ if (WARN_ON(!ivc || !notify))
+ return -EINVAL;
+
+ /*
+ * All sizes that can be returned by communication functions should
+ * fit in an int.
+ */
+ if (frame_size > INT_MAX)
+ return -E2BIG;
+
+ err = tegra_ivc_check_params(iosys_map_get_address(rx), iosys_map_get_address(tx),
+ num_frames, frame_size);
+ if (err < 0)
+ return err;
+
+ queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
+
+ if (peer) {
+ ivc->rx.phys = dma_map_single(peer, iosys_map_get_vaddr(rx), queue_size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(peer, ivc->rx.phys))
+ return -ENOMEM;
+
+ ivc->tx.phys = dma_map_single(peer, iosys_map_get_vaddr(tx), queue_size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(peer, ivc->tx.phys)) {
+ dma_unmap_single(peer, ivc->rx.phys, queue_size,
+ DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+ }
+ } else {
+ ivc->rx.phys = rx_phys;
+ ivc->tx.phys = tx_phys;
+ }
+
+ iosys_map_copy(&ivc->rx.map, rx);
+ iosys_map_copy(&ivc->tx.map, tx);
+ ivc->peer = peer;
+ ivc->notify = notify;
+ ivc->notify_data = data;
+ ivc->frame_size = frame_size;
+ ivc->num_frames = num_frames;
+
+ /*
+ * These values aren't necessarily correct until the channel has been
+ * reset.
+ */
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_init);
+
+void tegra_ivc_cleanup(struct tegra_ivc *ivc)
+{
+ if (ivc->peer) {
+ size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
+ ivc->frame_size);
+
+ dma_unmap_single(ivc->peer, ivc->rx.phys, size,
+ DMA_BIDIRECTIONAL);
+ dma_unmap_single(ivc->peer, ivc->tx.phys, size,
+ DMA_BIDIRECTIONAL);
+ }
+}
+EXPORT_SYMBOL(tegra_ivc_cleanup);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
new file mode 100644
index 0000000000..3b4c9355cb
--- /dev/null
+++ b/drivers/firmware/ti_sci.c
@@ -0,0 +1,3452 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments System Control Interface Protocol Driver
+ *
+ * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Nishanth Menon
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/soc/ti/ti-msgmgr.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/reboot.h>
+
+#include "ti_sci.h"
+
+/* List of all TI SCI devices active in system */
+static LIST_HEAD(ti_sci_list);
+/* Protection for the entire list */
+static DEFINE_MUTEX(ti_sci_list_mutex);
+
+/**
+ * struct ti_sci_xfer - Structure representing a message flow
+ * @tx_message: Transmit message
+ * @rx_len: Receive message length
+ * @xfer_buf: Preallocated buffer to store receive message
+ * Since we work with request-ACK protocol, we can
+ * reuse the same buffer for the rx path as we
+ * use for the tx path.
+ * @done: completion event
+ */
+struct ti_sci_xfer {
+ struct ti_msgmgr_message tx_message;
+ u8 rx_len;
+ u8 *xfer_buf;
+ struct completion done;
+};
+
+/**
+ * struct ti_sci_xfers_info - Structure to manage transfer information
+ * @sem_xfer_count: Counting Semaphore for managing max simultaneous
+ * Messages.
+ * @xfer_block: Preallocated Message array
+ * @xfer_alloc_table: Bitmap table for allocated messages.
+ * Index of this bitmap table is also used for message
+ * sequence identifier.
+ * @xfer_lock: Protection for message allocation
+ */
+struct ti_sci_xfers_info {
+ struct semaphore sem_xfer_count;
+ struct ti_sci_xfer *xfer_block;
+ unsigned long *xfer_alloc_table;
+ /* protect transfer allocation */
+ spinlock_t xfer_lock;
+};
+
+/**
+ * struct ti_sci_desc - Description of SoC integration
+ * @default_host_id: Host identifier representing the compute entity
+ * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
+ * @max_msgs: Maximum number of messages that can be pending
+ * simultaneously in the system
+ * @max_msg_size: Maximum size of data per message that can be handled.
+ */
+struct ti_sci_desc {
+ u8 default_host_id;
+ int max_rx_timeout_ms;
+ int max_msgs;
+ int max_msg_size;
+};
+
+/**
+ * struct ti_sci_info - Structure representing a TI SCI instance
+ * @dev: Device pointer
+ * @desc: SoC description for this instance
+ * @nb: Reboot Notifier block
+ * @d: Debugfs file entry
+ * @debug_region: Memory region where the debug message are available
+ * @debug_region_size: Debug region size
+ * @debug_buffer: Buffer allocated to copy debug messages.
+ * @handle: Instance of TI SCI handle to send to clients.
+ * @cl: Mailbox Client
+ * @chan_tx: Transmit mailbox channel
+ * @chan_rx: Receive mailbox channel
+ * @minfo: Message info
+ * @node: list head
+ * @host_id: Host ID
+ * @users: Number of users of this instance
+ */
+struct ti_sci_info {
+ struct device *dev;
+ struct notifier_block nb;
+ const struct ti_sci_desc *desc;
+ struct dentry *d;
+ void __iomem *debug_region;
+ char *debug_buffer;
+ size_t debug_region_size;
+ struct ti_sci_handle handle;
+ struct mbox_client cl;
+ struct mbox_chan *chan_tx;
+ struct mbox_chan *chan_rx;
+ struct ti_sci_xfers_info minfo;
+ struct list_head node;
+ u8 host_id;
+ /* protected by ti_sci_list_mutex */
+ int users;
+};
+
+#define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
+#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
+#define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
+
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * ti_sci_debug_show() - Helper to dump the debug log
+ * @s: sequence file pointer
+ * @unused: unused.
+ *
+ * Return: 0
+ */
+static int ti_sci_debug_show(struct seq_file *s, void *unused)
+{
+ struct ti_sci_info *info = s->private;
+
+ memcpy_fromio(info->debug_buffer, info->debug_region,
+ info->debug_region_size);
+ /*
+ * We don't trust firmware to leave NULL terminated last byte (hence
+ * we have allocated 1 extra 0 byte). Since we cannot guarantee any
+ * specific data format for debug messages, We just present the data
+ * in the buffer as is - we expect the messages to be self explanatory.
+ */
+ seq_puts(s, info->debug_buffer);
+ return 0;
+}
+
+/* Provide the log file operations interface*/
+DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
+
+/**
+ * ti_sci_debugfs_create() - Create log debug file
+ * @pdev: platform device pointer
+ * @info: Pointer to SCI entity information
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static int ti_sci_debugfs_create(struct platform_device *pdev,
+ struct ti_sci_info *info)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ char debug_name[50];
+
+ /* Debug region is optional */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "debug_messages");
+ info->debug_region = devm_ioremap_resource(dev, res);
+ if (IS_ERR(info->debug_region))
+ return 0;
+ info->debug_region_size = resource_size(res);
+
+ info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
+ sizeof(char), GFP_KERNEL);
+ if (!info->debug_buffer)
+ return -ENOMEM;
+ /* Setup NULL termination */
+ info->debug_buffer[info->debug_region_size] = 0;
+
+ snprintf(debug_name, sizeof(debug_name), "ti_sci_debug@%s",
+ dev_name(dev));
+ info->d = debugfs_create_file(debug_name, 0444, NULL, info,
+ &ti_sci_debug_fops);
+ if (IS_ERR(info->d))
+ return PTR_ERR(info->d);
+
+ dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
+ info->debug_region, info->debug_region_size, res);
+ return 0;
+}
+
+#else /* CONFIG_DEBUG_FS */
+static inline int ti_sci_debugfs_create(struct platform_device *dev,
+ struct ti_sci_info *info)
+{
+ return 0;
+}
+
+static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
+ struct ti_sci_info *info)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * ti_sci_dump_header_dbg() - Helper to dump a message header.
+ * @dev: Device pointer corresponding to the SCI entity
+ * @hdr: pointer to header.
+ */
+static inline void ti_sci_dump_header_dbg(struct device *dev,
+ struct ti_sci_msg_hdr *hdr)
+{
+ dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
+ hdr->type, hdr->host, hdr->seq, hdr->flags);
+}
+
+/**
+ * ti_sci_rx_callback() - mailbox client callback for receive messages
+ * @cl: client pointer
+ * @m: mailbox message
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
+{
+ struct ti_sci_info *info = cl_to_ti_sci_info(cl);
+ struct device *dev = info->dev;
+ struct ti_sci_xfers_info *minfo = &info->minfo;
+ struct ti_msgmgr_message *mbox_msg = m;
+ struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
+ struct ti_sci_xfer *xfer;
+ u8 xfer_id;
+
+ xfer_id = hdr->seq;
+
+ /*
+ * Are we even expecting this?
+ * NOTE: barriers were implicit in locks used for modifying the bitmap
+ */
+ if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
+ dev_err(dev, "Message for %d is not expected!\n", xfer_id);
+ return;
+ }
+
+ xfer = &minfo->xfer_block[xfer_id];
+
+ /* Is the message of valid length? */
+ if (mbox_msg->len > info->desc->max_msg_size) {
+ dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
+ mbox_msg->len, info->desc->max_msg_size);
+ ti_sci_dump_header_dbg(dev, hdr);
+ return;
+ }
+ if (mbox_msg->len < xfer->rx_len) {
+ dev_err(dev, "Recv xfer %zu < expected %d length\n",
+ mbox_msg->len, xfer->rx_len);
+ ti_sci_dump_header_dbg(dev, hdr);
+ return;
+ }
+
+ ti_sci_dump_header_dbg(dev, hdr);
+ /* Take a copy to the rx buffer.. */
+ memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
+ complete(&xfer->done);
+}
+
+/**
+ * ti_sci_get_one_xfer() - Allocate one message
+ * @info: Pointer to SCI entity information
+ * @msg_type: Message type
+ * @msg_flags: Flag to set for the message
+ * @tx_message_size: transmit message size
+ * @rx_message_size: receive message size
+ *
+ * Helper function which is used by various command functions that are
+ * exposed to clients of this driver for allocating a message traffic event.
+ *
+ * This function can sleep depending on pending requests already in the system
+ * for the SCI entity. Further, this also holds a spinlock to maintain integrity
+ * of internal data structures.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
+ u16 msg_type, u32 msg_flags,
+ size_t tx_message_size,
+ size_t rx_message_size)
+{
+ struct ti_sci_xfers_info *minfo = &info->minfo;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_msg_hdr *hdr;
+ unsigned long flags;
+ unsigned long bit_pos;
+ u8 xfer_id;
+ int ret;
+ int timeout;
+
+ /* Ensure we have sane transfer sizes */
+ if (rx_message_size > info->desc->max_msg_size ||
+ tx_message_size > info->desc->max_msg_size ||
+ rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
+ return ERR_PTR(-ERANGE);
+
+ /*
+ * Ensure we have only controlled number of pending messages.
+ * Ideally, we might just have to wait a single message, be
+ * conservative and wait 5 times that..
+ */
+ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
+ ret = down_timeout(&minfo->sem_xfer_count, timeout);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Keep the locked section as small as possible */
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
+ info->desc->max_msgs);
+ set_bit(bit_pos, minfo->xfer_alloc_table);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ /*
+ * We already ensured in probe that we can have max messages that can
+ * fit in hdr.seq - NOTE: this improves access latencies
+ * to predictable O(1) access, BUT, it opens us to risk if
+ * remote misbehaves with corrupted message sequence responses.
+ * If that happens, we are going to be messed up anyways..
+ */
+ xfer_id = (u8)bit_pos;
+
+ xfer = &minfo->xfer_block[xfer_id];
+
+ hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+ xfer->tx_message.len = tx_message_size;
+ xfer->tx_message.chan_rx = info->chan_rx;
+ xfer->tx_message.timeout_rx_ms = info->desc->max_rx_timeout_ms;
+ xfer->rx_len = (u8)rx_message_size;
+
+ reinit_completion(&xfer->done);
+
+ hdr->seq = xfer_id;
+ hdr->type = msg_type;
+ hdr->host = info->host_id;
+ hdr->flags = msg_flags;
+
+ return xfer;
+}
+
+/**
+ * ti_sci_put_one_xfer() - Release a message
+ * @minfo: transfer info pointer
+ * @xfer: message that was reserved by ti_sci_get_one_xfer
+ *
+ * This holds a spinlock to maintain integrity of internal data structures.
+ */
+static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
+ struct ti_sci_xfer *xfer)
+{
+ unsigned long flags;
+ struct ti_sci_msg_hdr *hdr;
+ u8 xfer_id;
+
+ hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+ xfer_id = hdr->seq;
+
+ /*
+ * Keep the locked section as small as possible
+ * NOTE: we might escape with smp_mb and no lock here..
+ * but just be conservative and symmetric.
+ */
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ clear_bit(xfer_id, minfo->xfer_alloc_table);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ /* Increment the count for the next user to get through */
+ up(&minfo->sem_xfer_count);
+}
+
+/**
+ * ti_sci_do_xfer() - Do one transfer
+ * @info: Pointer to SCI entity information
+ * @xfer: Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no response, if transmit error,
+ * return corresponding error, else if all goes well,
+ * return 0.
+ */
+static inline int ti_sci_do_xfer(struct ti_sci_info *info,
+ struct ti_sci_xfer *xfer)
+{
+ int ret;
+ int timeout;
+ struct device *dev = info->dev;
+ bool done_state = true;
+
+ ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
+ if (ret < 0)
+ return ret;
+
+ ret = 0;
+
+ if (system_state <= SYSTEM_RUNNING) {
+ /* And we wait for the response. */
+ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
+ if (!wait_for_completion_timeout(&xfer->done, timeout))
+ ret = -ETIMEDOUT;
+ } else {
+ /*
+ * If we are !running, we cannot use wait_for_completion_timeout
+ * during noirq phase, so we must manually poll the completion.
+ */
+ ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
+ done_state, 1,
+ info->desc->max_rx_timeout_ms * 1000,
+ false, &xfer->done);
+ }
+
+ if (ret == -ETIMEDOUT)
+ dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
+ (void *)_RET_IP_);
+
+ /*
+ * NOTE: we might prefer not to need the mailbox ticker to manage the
+ * transfer queueing since the protocol layer queues things by itself.
+ * Unfortunately, we have to kick the mailbox framework after we have
+ * received our message.
+ */
+ mbox_client_txdone(info->chan_tx, ret);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
+ * @info: Pointer to SCI entity information
+ *
+ * Updates the SCI information in the internal data structure.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
+{
+ struct device *dev = info->dev;
+ struct ti_sci_handle *handle = &info->handle;
+ struct ti_sci_version_info *ver = &handle->version;
+ struct ti_sci_msg_resp_version *rev_info;
+ struct ti_sci_xfer *xfer;
+ int ret;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(struct ti_sci_msg_hdr),
+ sizeof(*rev_info));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ ver->abi_major = rev_info->abi_major;
+ ver->abi_minor = rev_info->abi_minor;
+ ver->firmware_revision = rev_info->firmware_revision;
+ strncpy(ver->firmware_description, rev_info->firmware_description,
+ sizeof(ver->firmware_description));
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ return ret;
+}
+
+/**
+ * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
+ * @r: pointer to response buffer
+ *
+ * Return: true if the response was an ACK, else returns false.
+ */
+static inline bool ti_sci_is_response_ack(void *r)
+{
+ struct ti_sci_msg_hdr *hdr = r;
+
+ return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
+}
+
+/**
+ * ti_sci_set_device_state() - Set device state helper
+ * @handle: pointer to TI SCI handle
+ * @id: Device identifier
+ * @flags: flags to setup for the device
+ * @state: State to move the device to
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
+ u32 id, u32 flags, u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_device_state *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
+ flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
+ req->id = id;
+ req->state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_get_device_state() - Get device state helper
+ * @handle: Handle to the device
+ * @id: Device Identifier
+ * @clcnt: Pointer to Context Loss Count
+ * @resets: pointer to resets
+ * @p_state: pointer to p_state
+ * @c_state: pointer to c_state
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
+ u32 id, u32 *clcnt, u32 *resets,
+ u8 *p_state, u8 *c_state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_device_state *req;
+ struct ti_sci_msg_resp_get_device_state *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ if (!clcnt && !resets && !p_state && !c_state)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
+ req->id = id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (clcnt)
+ *clcnt = resp->context_loss_count;
+ if (resets)
+ *resets = resp->resets;
+ if (p_state)
+ *p_state = resp->programmed_state;
+ if (c_state)
+ *c_state = resp->current_state;
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_device() - command to request for device managed by TISCI
+ * that can be shared with other hosts.
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
+{
+ return ti_sci_set_device_state(handle, id, 0,
+ MSG_DEVICE_SW_STATE_ON);
+}
+
+/**
+ * ti_sci_cmd_get_device_exclusive() - command to request for device managed by
+ * TISCI that is exclusively owned by the
+ * requesting host.
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
+ u32 id)
+{
+ return ti_sci_set_device_state(handle, id,
+ MSG_FLAG_DEVICE_EXCLUSIVE,
+ MSG_DEVICE_SW_STATE_ON);
+}
+
+/**
+ * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
+{
+ return ti_sci_set_device_state(handle, id, 0,
+ MSG_DEVICE_SW_STATE_RETENTION);
+}
+
+/**
+ * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
+ * TISCI that is exclusively owned by
+ * requesting host.
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
+ u32 id)
+{
+ return ti_sci_set_device_state(handle, id,
+ MSG_FLAG_DEVICE_EXCLUSIVE,
+ MSG_DEVICE_SW_STATE_RETENTION);
+}
+
+/**
+ * ti_sci_cmd_put_device() - command to release a device managed by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
+{
+ return ti_sci_set_device_state(handle, id,
+ 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
+}
+
+/**
+ * ti_sci_cmd_dev_is_valid() - Is the device valid
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Return: 0 if all went fine and the device ID is valid, else return
+ * appropriate error.
+ */
+static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
+{
+ u8 unused;
+
+ /* check the device state which will also tell us if the ID is valid */
+ return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
+}
+
+/**
+ * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @count: Pointer to Context Loss counter to populate
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
+ u32 *count)
+{
+ return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
+}
+
+/**
+ * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @r_state: true if requested to be idle
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
+ bool *r_state)
+{
+ int ret;
+ u8 state;
+
+ if (!r_state)
+ return -EINVAL;
+
+ ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
+ if (ret)
+ return ret;
+
+ *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @r_state: true if requested to be stopped
+ * @curr_state: true if currently stopped.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
+ bool *r_state, bool *curr_state)
+{
+ int ret;
+ u8 p_state, c_state;
+
+ if (!r_state && !curr_state)
+ return -EINVAL;
+
+ ret =
+ ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (r_state)
+ *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
+ if (curr_state)
+ *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @r_state: true if requested to be ON
+ * @curr_state: true if currently ON and active
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
+ bool *r_state, bool *curr_state)
+{
+ int ret;
+ u8 p_state, c_state;
+
+ if (!r_state && !curr_state)
+ return -EINVAL;
+
+ ret =
+ ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (r_state)
+ *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
+ if (curr_state)
+ *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @curr_state: true if currently transitioning.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
+ bool *curr_state)
+{
+ int ret;
+ u8 state;
+
+ if (!curr_state)
+ return -EINVAL;
+
+ ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
+ if (ret)
+ return ret;
+
+ *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_set_device_resets() - command to set resets for device managed
+ * by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ * @reset_state: Device specific reset bit field
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
+ u32 id, u32 reset_state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_device_resets *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
+ req->id = id;
+ req->resets = reset_state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_device_resets() - Get reset state for device managed
+ * by TISCI
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @reset_state: Pointer to reset state to populate
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
+ u32 id, u32 *reset_state)
+{
+ return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
+ NULL);
+}
+
+/**
+ * ti_sci_set_clock_state() - Set clock state helper
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @flags: Header flags as needed
+ * @state: State to request for the clock.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id,
+ u32 flags, u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_clock_state *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
+ flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ if (clk_id < 255) {
+ req->clk_id = clk_id;
+ } else {
+ req->clk_id = 255;
+ req->clk_id_32 = clk_id;
+ }
+ req->request_state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_clock_state() - Get clock state helper
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @programmed_state: State requested for clock to move to
+ * @current_state: State that the clock is currently in
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id,
+ u8 *programmed_state, u8 *current_state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_state *req;
+ struct ti_sci_msg_resp_get_clock_state *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ if (!programmed_state && !current_state)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ if (clk_id < 255) {
+ req->clk_id = clk_id;
+ } else {
+ req->clk_id = 255;
+ req->clk_id_32 = clk_id;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (programmed_state)
+ *programmed_state = resp->programmed_state;
+ if (current_state)
+ *current_state = resp->current_state;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
+ * @can_change_freq: 'true' if frequency change is desired, else 'false'
+ * @enable_input_term: 'true' if input termination is desired, else 'false'
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
+ u32 clk_id, bool needs_ssc,
+ bool can_change_freq, bool enable_input_term)
+{
+ u32 flags = 0;
+
+ flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
+ flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
+ flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
+
+ return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
+ MSG_CLOCK_SW_STATE_REQ);
+}
+
+/**
+ * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ *
+ * NOTE: This clock must have been requested by get_clock previously.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id)
+{
+ return ti_sci_set_clock_state(handle, dev_id, clk_id,
+ MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
+ MSG_CLOCK_SW_STATE_UNREQ);
+}
+
+/**
+ * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ *
+ * NOTE: This clock must have been requested by get_clock previously.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id)
+{
+ return ti_sci_set_clock_state(handle, dev_id, clk_id,
+ MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
+ MSG_CLOCK_SW_STATE_AUTO);
+}
+
+/**
+ * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @req_state: state indicating if the clock is auto managed
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id, bool *req_state)
+{
+ u8 state = 0;
+ int ret;
+
+ if (!req_state)
+ return -EINVAL;
+
+ ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
+ if (ret)
+ return ret;
+
+ *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_is_on() - Is the clock ON
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @req_state: state indicating if the clock is managed by us and enabled
+ * @curr_state: state indicating if the clock is ready for operation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
+ u32 clk_id, bool *req_state, bool *curr_state)
+{
+ u8 c_state = 0, r_state = 0;
+ int ret;
+
+ if (!req_state && !curr_state)
+ return -EINVAL;
+
+ ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
+ &r_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (req_state)
+ *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
+ if (curr_state)
+ *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_is_off() - Is the clock OFF
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @req_state: state indicating if the clock is managed by us and disabled
+ * @curr_state: state indicating if the clock is NOT ready for operation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
+ u32 clk_id, bool *req_state, bool *curr_state)
+{
+ u8 c_state = 0, r_state = 0;
+ int ret;
+
+ if (!req_state && !curr_state)
+ return -EINVAL;
+
+ ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
+ &r_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (req_state)
+ *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
+ if (curr_state)
+ *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @parent_id: Parent clock identifier to set
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id, u32 parent_id)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_clock_parent *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ if (clk_id < 255) {
+ req->clk_id = clk_id;
+ } else {
+ req->clk_id = 255;
+ req->clk_id_32 = clk_id;
+ }
+ if (parent_id < 255) {
+ req->parent_id = parent_id;
+ } else {
+ req->parent_id = 255;
+ req->parent_id_32 = parent_id;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_parent() - Get current parent clock source
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @parent_id: Current clock parent
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id, u32 *parent_id)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_parent *req;
+ struct ti_sci_msg_resp_get_clock_parent *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !parent_id)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ if (clk_id < 255) {
+ req->clk_id = clk_id;
+ } else {
+ req->clk_id = 255;
+ req->clk_id_32 = clk_id;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ } else {
+ if (resp->parent_id < 255)
+ *parent_id = resp->parent_id;
+ else
+ *parent_id = resp->parent_id_32;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @num_parents: Returns he number of parents to the current clock.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id,
+ u32 *num_parents)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_num_parents *req;
+ struct ti_sci_msg_resp_get_clock_num_parents *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !num_parents)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ if (clk_id < 255) {
+ req->clk_id = clk_id;
+ } else {
+ req->clk_id = 255;
+ req->clk_id_32 = clk_id;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ } else {
+ if (resp->num_parents < 255)
+ *num_parents = resp->num_parents;
+ else
+ *num_parents = resp->num_parents_32;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @min_freq: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq: The target clock frequency in Hz. A frequency will be
+ * processed as close to this target frequency as possible.
+ * @max_freq: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @match_freq: Frequency match in Hz response.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id, u64 min_freq,
+ u64 target_freq, u64 max_freq,
+ u64 *match_freq)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_query_clock_freq *req;
+ struct ti_sci_msg_resp_query_clock_freq *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !match_freq)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ if (clk_id < 255) {
+ req->clk_id = clk_id;
+ } else {
+ req->clk_id = 255;
+ req->clk_id_32 = clk_id;
+ }
+ req->min_freq_hz = min_freq;
+ req->target_freq_hz = target_freq;
+ req->max_freq_hz = max_freq;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ *match_freq = resp->freq_hz;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @min_freq: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq: The target clock frequency in Hz. A frequency will be
+ * processed as close to this target frequency as possible.
+ * @max_freq: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id, u64 min_freq,
+ u64 target_freq, u64 max_freq)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_clock_freq *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ if (clk_id < 255) {
+ req->clk_id = clk_id;
+ } else {
+ req->clk_id = 255;
+ req->clk_id_32 = clk_id;
+ }
+ req->min_freq_hz = min_freq;
+ req->target_freq_hz = target_freq;
+ req->max_freq_hz = max_freq;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_freq() - Get current frequency
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @freq: Currently frequency in Hz
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id, u64 *freq)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_freq *req;
+ struct ti_sci_msg_resp_get_clock_freq *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !freq)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ if (clk_id < 255) {
+ req->clk_id = clk_id;
+ } else {
+ req->clk_id = 255;
+ req->clk_id_32 = clk_id;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ *freq = resp->freq_hz;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_reboot *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ ret = 0;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_get_resource_range - Helper to get a range of resources assigned
+ * to a host. Resource is uniquely identified by
+ * type and subtype.
+ * @handle: Pointer to TISCI handle.
+ * @dev_id: TISCI device ID.
+ * @subtype: Resource assignment subtype that is being requested
+ * from the given device.
+ * @s_host: Host processor ID to which the resources are allocated
+ * @desc: Pointer to ti_sci_resource_desc to be updated with the
+ * resource range start index and number of resources
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 subtype, u8 s_host,
+ struct ti_sci_resource_desc *desc)
+{
+ struct ti_sci_msg_resp_get_resource_range *resp;
+ struct ti_sci_msg_req_get_resource_range *req;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !desc)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
+ req->secondary_host = s_host;
+ req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
+ req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ } else if (!resp->range_num && !resp->range_num_sec) {
+ /* Neither of the two resource range is valid */
+ ret = -ENODEV;
+ } else {
+ desc->start = resp->range_start;
+ desc->num = resp->range_num;
+ desc->start_sec = resp->range_start_sec;
+ desc->num_sec = resp->range_num_sec;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
+ * that is same as ti sci interface host.
+ * @handle: Pointer to TISCI handle.
+ * @dev_id: TISCI device ID.
+ * @subtype: Resource assignment subtype that is being requested
+ * from the given device.
+ * @desc: Pointer to ti_sci_resource_desc to be updated with the
+ * resource range start index and number of resources
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 subtype,
+ struct ti_sci_resource_desc *desc)
+{
+ return ti_sci_get_resource_range(handle, dev_id, subtype,
+ TI_SCI_IRQ_SECONDARY_HOST_INVALID,
+ desc);
+}
+
+/**
+ * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
+ * assigned to a specified host.
+ * @handle: Pointer to TISCI handle.
+ * @dev_id: TISCI device ID.
+ * @subtype: Resource assignment subtype that is being requested
+ * from the given device.
+ * @s_host: Host processor ID to which the resources are allocated
+ * @desc: Pointer to ti_sci_resource_desc to be updated with the
+ * resource range start index and number of resources
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static
+int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 subtype, u8 s_host,
+ struct ti_sci_resource_desc *desc)
+{
+ return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, desc);
+}
+
+/**
+ * ti_sci_manage_irq() - Helper api to configure/release the irq route between
+ * the requested source and destination
+ * @handle: Pointer to TISCI handle.
+ * @valid_params: Bit fields defining the validity of certain params
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ * @s_host: Secondary host ID to which the irq/event is being
+ * requested for.
+ * @type: Request type irq set or release.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
+ u32 valid_params, u16 src_id, u16 src_index,
+ u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
+ u16 global_event, u8 vint_status_bit, u8 s_host,
+ u16 type)
+{
+ struct ti_sci_msg_req_manage_irq *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
+ req->valid_params = valid_params;
+ req->src_id = src_id;
+ req->src_index = src_index;
+ req->dst_id = dst_id;
+ req->dst_host_irq = dst_host_irq;
+ req->ia_id = ia_id;
+ req->vint = vint;
+ req->global_event = global_event;
+ req->vint_status_bit = vint_status_bit;
+ req->secondary_host = s_host;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_set_irq() - Helper api to configure the irq route between the
+ * requested source and destination
+ * @handle: Pointer to TISCI handle.
+ * @valid_params: Bit fields defining the validity of certain params
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ * @s_host: Secondary host ID to which the irq/event is being
+ * requested for.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
+ u16 src_id, u16 src_index, u16 dst_id,
+ u16 dst_host_irq, u16 ia_id, u16 vint,
+ u16 global_event, u8 vint_status_bit, u8 s_host)
+{
+ pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
+ __func__, valid_params, src_id, src_index,
+ dst_id, dst_host_irq, ia_id, vint, global_event,
+ vint_status_bit);
+
+ return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
+ dst_id, dst_host_irq, ia_id, vint,
+ global_event, vint_status_bit, s_host,
+ TI_SCI_MSG_SET_IRQ);
+}
+
+/**
+ * ti_sci_free_irq() - Helper api to free the irq route between the
+ * requested source and destination
+ * @handle: Pointer to TISCI handle.
+ * @valid_params: Bit fields defining the validity of certain params
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ * @s_host: Secondary host ID to which the irq/event is being
+ * requested for.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
+ u16 src_id, u16 src_index, u16 dst_id,
+ u16 dst_host_irq, u16 ia_id, u16 vint,
+ u16 global_event, u8 vint_status_bit, u8 s_host)
+{
+ pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
+ __func__, valid_params, src_id, src_index,
+ dst_id, dst_host_irq, ia_id, vint, global_event,
+ vint_status_bit);
+
+ return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
+ dst_id, dst_host_irq, ia_id, vint,
+ global_event, vint_status_bit, s_host,
+ TI_SCI_MSG_FREE_IRQ);
+}
+
+/**
+ * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
+ * source and destination.
+ * @handle: Pointer to TISCI handle.
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
+ u16 src_index, u16 dst_id, u16 dst_host_irq)
+{
+ u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
+
+ return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
+ dst_host_irq, 0, 0, 0, 0, 0);
+}
+
+/**
+ * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
+ * requested source and Interrupt Aggregator.
+ * @handle: Pointer to TISCI handle.
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
+ u16 src_id, u16 src_index, u16 ia_id,
+ u16 vint, u16 global_event,
+ u8 vint_status_bit)
+{
+ u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
+ MSG_FLAG_GLB_EVNT_VALID |
+ MSG_FLAG_VINT_STS_BIT_VALID;
+
+ return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
+ ia_id, vint, global_event, vint_status_bit, 0);
+}
+
+/**
+ * ti_sci_cmd_free_irq() - Free a host irq route between the between the
+ * requested source and destination.
+ * @handle: Pointer to TISCI handle.
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
+ u16 src_index, u16 dst_id, u16 dst_host_irq)
+{
+ u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
+
+ return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
+ dst_host_irq, 0, 0, 0, 0, 0);
+}
+
+/**
+ * ti_sci_cmd_free_event_map() - Free an event map between the requested source
+ * and Interrupt Aggregator.
+ * @handle: Pointer to TISCI handle.
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
+ u16 src_id, u16 src_index, u16 ia_id,
+ u16 vint, u16 global_event,
+ u8 vint_status_bit)
+{
+ u32 valid_params = MSG_FLAG_IA_ID_VALID |
+ MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
+ MSG_FLAG_VINT_STS_BIT_VALID;
+
+ return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
+ ia_id, vint, global_event, vint_status_bit, 0);
+}
+
+/**
+ * ti_sci_cmd_rm_ring_cfg() - Configure a NAVSS ring
+ * @handle: Pointer to TI SCI handle.
+ * @params: Pointer to ti_sci_msg_rm_ring_cfg ring config structure
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_ring_cfg and @ti_sci_msg_rm_ring_cfg_req for
+ * more info.
+ */
+static int ti_sci_cmd_rm_ring_cfg(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_ring_cfg *params)
+{
+ struct ti_sci_msg_rm_ring_cfg_req *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(handle))
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
+ req->valid_params = params->valid_params;
+ req->nav_id = params->nav_id;
+ req->index = params->index;
+ req->addr_lo = params->addr_lo;
+ req->addr_hi = params->addr_hi;
+ req->count = params->count;
+ req->mode = params->mode;
+ req->size = params->size;
+ req->order_id = params->order_id;
+ req->virtid = params->virtid;
+ req->asel = params->asel;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", params->index, ret);
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
+ * @handle: Pointer to TI SCI handle.
+ * @nav_id: Device ID of Navigator Subsystem which should be used for
+ * pairing
+ * @src_thread: Source PSI-L thread ID
+ * @dst_thread: Destination PSI-L thread ID
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
+ u32 nav_id, u32 src_thread, u32 dst_thread)
+{
+ struct ti_sci_msg_psil_pair *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
+ req->nav_id = nav_id;
+ req->src_thread = src_thread;
+ req->dst_thread = dst_thread;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
+ * @handle: Pointer to TI SCI handle.
+ * @nav_id: Device ID of Navigator Subsystem which should be used for
+ * unpairing
+ * @src_thread: Source PSI-L thread ID
+ * @dst_thread: Destination PSI-L thread ID
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
+ u32 nav_id, u32 src_thread, u32 dst_thread)
+{
+ struct ti_sci_msg_psil_unpair *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
+ req->nav_id = nav_id;
+ req->src_thread = src_thread;
+ req->dst_thread = dst_thread;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
+ * @handle: Pointer to TI SCI handle.
+ * @params: Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
+ * structure
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
+ * more info.
+ */
+static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
+{
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(handle))
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
+ req->valid_params = params->valid_params;
+ req->nav_id = params->nav_id;
+ req->index = params->index;
+ req->tx_pause_on_err = params->tx_pause_on_err;
+ req->tx_filt_einfo = params->tx_filt_einfo;
+ req->tx_filt_pswords = params->tx_filt_pswords;
+ req->tx_atype = params->tx_atype;
+ req->tx_chan_type = params->tx_chan_type;
+ req->tx_supr_tdpkt = params->tx_supr_tdpkt;
+ req->tx_fetch_size = params->tx_fetch_size;
+ req->tx_credit_count = params->tx_credit_count;
+ req->txcq_qnum = params->txcq_qnum;
+ req->tx_priority = params->tx_priority;
+ req->tx_qos = params->tx_qos;
+ req->tx_orderid = params->tx_orderid;
+ req->fdepth = params->fdepth;
+ req->tx_sched_priority = params->tx_sched_priority;
+ req->tx_burst_size = params->tx_burst_size;
+ req->tx_tdtype = params->tx_tdtype;
+ req->extended_ch_type = params->extended_ch_type;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
+ * @handle: Pointer to TI SCI handle.
+ * @params: Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
+ * structure
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
+ * more info.
+ */
+static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
+{
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(handle))
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
+ req->valid_params = params->valid_params;
+ req->nav_id = params->nav_id;
+ req->index = params->index;
+ req->rx_fetch_size = params->rx_fetch_size;
+ req->rxcq_qnum = params->rxcq_qnum;
+ req->rx_priority = params->rx_priority;
+ req->rx_qos = params->rx_qos;
+ req->rx_orderid = params->rx_orderid;
+ req->rx_sched_priority = params->rx_sched_priority;
+ req->flowid_start = params->flowid_start;
+ req->flowid_cnt = params->flowid_cnt;
+ req->rx_pause_on_err = params->rx_pause_on_err;
+ req->rx_atype = params->rx_atype;
+ req->rx_chan_type = params->rx_chan_type;
+ req->rx_ignore_short = params->rx_ignore_short;
+ req->rx_ignore_long = params->rx_ignore_long;
+ req->rx_burst_size = params->rx_burst_size;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
+ * @handle: Pointer to TI SCI handle.
+ * @params: Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
+ * structure
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
+ * more info.
+ */
+static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_udmap_flow_cfg *params)
+{
+ struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(handle))
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
+ req->valid_params = params->valid_params;
+ req->nav_id = params->nav_id;
+ req->flow_index = params->flow_index;
+ req->rx_einfo_present = params->rx_einfo_present;
+ req->rx_psinfo_present = params->rx_psinfo_present;
+ req->rx_error_handling = params->rx_error_handling;
+ req->rx_desc_type = params->rx_desc_type;
+ req->rx_sop_offset = params->rx_sop_offset;
+ req->rx_dest_qnum = params->rx_dest_qnum;
+ req->rx_src_tag_hi = params->rx_src_tag_hi;
+ req->rx_src_tag_lo = params->rx_src_tag_lo;
+ req->rx_dest_tag_hi = params->rx_dest_tag_hi;
+ req->rx_dest_tag_lo = params->rx_dest_tag_lo;
+ req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
+ req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
+ req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
+ req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
+ req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
+ req->rx_fdq1_qnum = params->rx_fdq1_qnum;
+ req->rx_fdq2_qnum = params->rx_fdq2_qnum;
+ req->rx_fdq3_qnum = params->rx_fdq3_qnum;
+ req->rx_ps_location = params->rx_ps_location;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_request() - Command to request a physical processor control
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
+ u8 proc_id)
+{
+ struct ti_sci_msg_req_proc_request *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_release() - Command to release a physical processor control
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
+ u8 proc_id)
+{
+ struct ti_sci_msg_req_proc_release *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_handover() - Command to handover a physical processor
+ * control to a host in the processor's access
+ * control list.
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ * @host_id: Host ID to get the control of the processor
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
+ u8 proc_id, u8 host_id)
+{
+ struct ti_sci_msg_req_proc_handover *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+ req->host_id = host_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_set_config() - Command to set the processor boot
+ * configuration flags
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ * @bootvector: Processor Boot vector (start address)
+ * @config_flags_set: Configuration flags to be set
+ * @config_flags_clear: Configuration flags to be cleared.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
+ u8 proc_id, u64 bootvector,
+ u32 config_flags_set,
+ u32 config_flags_clear)
+{
+ struct ti_sci_msg_req_set_config *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+ req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
+ req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
+ TI_SCI_ADDR_HIGH_SHIFT;
+ req->config_flags_set = config_flags_set;
+ req->config_flags_clear = config_flags_clear;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_set_control() - Command to set the processor boot
+ * control flags
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ * @control_flags_set: Control flags to be set
+ * @control_flags_clear: Control flags to be cleared
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
+ u8 proc_id, u32 control_flags_set,
+ u32 control_flags_clear)
+{
+ struct ti_sci_msg_req_set_ctrl *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+ req->control_flags_set = control_flags_set;
+ req->control_flags_clear = control_flags_clear;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_get_status() - Command to get the processor boot status
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ * @bv: Processor Boot vector (start address)
+ * @cfg_flags: Processor specific configuration flags
+ * @ctrl_flags: Processor specific control flags
+ * @sts_flags: Processor specific status flags
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
+ u8 proc_id, u64 *bv, u32 *cfg_flags,
+ u32 *ctrl_flags, u32 *sts_flags)
+{
+ struct ti_sci_msg_resp_get_status *resp;
+ struct ti_sci_msg_req_get_status *req;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ } else {
+ *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
+ (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
+ TI_SCI_ADDR_HIGH_MASK);
+ *cfg_flags = resp->config_flags;
+ *ctrl_flags = resp->control_flags;
+ *sts_flags = resp->status_flags;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/*
+ * ti_sci_setup_ops() - Setup the operations structures
+ * @info: pointer to TISCI pointer
+ */
+static void ti_sci_setup_ops(struct ti_sci_info *info)
+{
+ struct ti_sci_ops *ops = &info->handle.ops;
+ struct ti_sci_core_ops *core_ops = &ops->core_ops;
+ struct ti_sci_dev_ops *dops = &ops->dev_ops;
+ struct ti_sci_clk_ops *cops = &ops->clk_ops;
+ struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
+ struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
+ struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
+ struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
+ struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
+ struct ti_sci_proc_ops *pops = &ops->proc_ops;
+
+ core_ops->reboot_device = ti_sci_cmd_core_reboot;
+
+ dops->get_device = ti_sci_cmd_get_device;
+ dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
+ dops->idle_device = ti_sci_cmd_idle_device;
+ dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
+ dops->put_device = ti_sci_cmd_put_device;
+
+ dops->is_valid = ti_sci_cmd_dev_is_valid;
+ dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
+ dops->is_idle = ti_sci_cmd_dev_is_idle;
+ dops->is_stop = ti_sci_cmd_dev_is_stop;
+ dops->is_on = ti_sci_cmd_dev_is_on;
+ dops->is_transitioning = ti_sci_cmd_dev_is_trans;
+ dops->set_device_resets = ti_sci_cmd_set_device_resets;
+ dops->get_device_resets = ti_sci_cmd_get_device_resets;
+
+ cops->get_clock = ti_sci_cmd_get_clock;
+ cops->idle_clock = ti_sci_cmd_idle_clock;
+ cops->put_clock = ti_sci_cmd_put_clock;
+ cops->is_auto = ti_sci_cmd_clk_is_auto;
+ cops->is_on = ti_sci_cmd_clk_is_on;
+ cops->is_off = ti_sci_cmd_clk_is_off;
+
+ cops->set_parent = ti_sci_cmd_clk_set_parent;
+ cops->get_parent = ti_sci_cmd_clk_get_parent;
+ cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
+
+ cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
+ cops->set_freq = ti_sci_cmd_clk_set_freq;
+ cops->get_freq = ti_sci_cmd_clk_get_freq;
+
+ rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
+ rm_core_ops->get_range_from_shost =
+ ti_sci_cmd_get_resource_range_from_shost;
+
+ iops->set_irq = ti_sci_cmd_set_irq;
+ iops->set_event_map = ti_sci_cmd_set_event_map;
+ iops->free_irq = ti_sci_cmd_free_irq;
+ iops->free_event_map = ti_sci_cmd_free_event_map;
+
+ rops->set_cfg = ti_sci_cmd_rm_ring_cfg;
+
+ psilops->pair = ti_sci_cmd_rm_psil_pair;
+ psilops->unpair = ti_sci_cmd_rm_psil_unpair;
+
+ udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
+ udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
+ udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
+
+ pops->request = ti_sci_cmd_proc_request;
+ pops->release = ti_sci_cmd_proc_release;
+ pops->handover = ti_sci_cmd_proc_handover;
+ pops->set_config = ti_sci_cmd_proc_set_config;
+ pops->set_control = ti_sci_cmd_proc_set_control;
+ pops->get_status = ti_sci_cmd_proc_get_status;
+}
+
+/**
+ * ti_sci_get_handle() - Get the TI SCI handle for a device
+ * @dev: Pointer to device for which we want SCI handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
+ * Return: pointer to handle if successful, else:
+ * -EPROBE_DEFER if the instance is not ready
+ * -ENODEV if the required node handler is missing
+ * -EINVAL if invalid conditions are encountered.
+ */
+const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
+{
+ struct device_node *ti_sci_np;
+ struct list_head *p;
+ struct ti_sci_handle *handle = NULL;
+ struct ti_sci_info *info;
+
+ if (!dev) {
+ pr_err("I need a device pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+ ti_sci_np = of_get_parent(dev->of_node);
+ if (!ti_sci_np) {
+ dev_err(dev, "No OF information\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mutex_lock(&ti_sci_list_mutex);
+ list_for_each(p, &ti_sci_list) {
+ info = list_entry(p, struct ti_sci_info, node);
+ if (ti_sci_np == info->dev->of_node) {
+ handle = &info->handle;
+ info->users++;
+ break;
+ }
+ }
+ mutex_unlock(&ti_sci_list_mutex);
+ of_node_put(ti_sci_np);
+
+ if (!handle)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_handle);
+
+/**
+ * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
+ * @handle: Handle acquired by ti_sci_get_handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
+ *
+ * Return: 0 is successfully released
+ * if an error pointer was passed, it returns the error value back,
+ * if null was passed, it returns -EINVAL;
+ */
+int ti_sci_put_handle(const struct ti_sci_handle *handle)
+{
+ struct ti_sci_info *info;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ mutex_lock(&ti_sci_list_mutex);
+ if (!WARN_ON(!info->users))
+ info->users--;
+ mutex_unlock(&ti_sci_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ti_sci_put_handle);
+
+static void devm_ti_sci_release(struct device *dev, void *res)
+{
+ const struct ti_sci_handle **ptr = res;
+ const struct ti_sci_handle *handle = *ptr;
+ int ret;
+
+ ret = ti_sci_put_handle(handle);
+ if (ret)
+ dev_err(dev, "failed to put handle %d\n", ret);
+}
+
+/**
+ * devm_ti_sci_get_handle() - Managed get handle
+ * @dev: device for which we want SCI handle for.
+ *
+ * NOTE: This releases the handle once the device resources are
+ * no longer needed. MUST NOT BE released with ti_sci_put_handle.
+ * The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
+{
+ const struct ti_sci_handle **ptr;
+ const struct ti_sci_handle *handle;
+
+ ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+ handle = ti_sci_get_handle(dev);
+
+ if (!IS_ERR(handle)) {
+ *ptr = handle;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
+
+/**
+ * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
+ * @np: device node
+ * @property: property name containing phandle on TISCI node
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
+ * Return: pointer to handle if successful, else:
+ * -EPROBE_DEFER if the instance is not ready
+ * -ENODEV if the required node handler is missing
+ * -EINVAL if invalid conditions are encountered.
+ */
+const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
+ const char *property)
+{
+ struct ti_sci_handle *handle = NULL;
+ struct device_node *ti_sci_np;
+ struct ti_sci_info *info;
+ struct list_head *p;
+
+ if (!np) {
+ pr_err("I need a device pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ti_sci_np = of_parse_phandle(np, property, 0);
+ if (!ti_sci_np)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&ti_sci_list_mutex);
+ list_for_each(p, &ti_sci_list) {
+ info = list_entry(p, struct ti_sci_info, node);
+ if (ti_sci_np == info->dev->of_node) {
+ handle = &info->handle;
+ info->users++;
+ break;
+ }
+ }
+ mutex_unlock(&ti_sci_list_mutex);
+ of_node_put(ti_sci_np);
+
+ if (!handle)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
+
+/**
+ * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
+ * @dev: Device pointer requesting TISCI handle
+ * @property: property name containing phandle on TISCI node
+ *
+ * NOTE: This releases the handle once the device resources are
+ * no longer needed. MUST NOT BE released with ti_sci_put_handle.
+ * The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
+ const char *property)
+{
+ const struct ti_sci_handle *handle;
+ const struct ti_sci_handle **ptr;
+
+ ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+ handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
+
+ if (!IS_ERR(handle)) {
+ *ptr = handle;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
+
+/**
+ * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
+ * @res: Pointer to the TISCI resource
+ *
+ * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
+ */
+u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
+{
+ unsigned long flags;
+ u16 set, free_bit;
+
+ raw_spin_lock_irqsave(&res->lock, flags);
+ for (set = 0; set < res->sets; set++) {
+ struct ti_sci_resource_desc *desc = &res->desc[set];
+ int res_count = desc->num + desc->num_sec;
+
+ free_bit = find_first_zero_bit(desc->res_map, res_count);
+ if (free_bit != res_count) {
+ __set_bit(free_bit, desc->res_map);
+ raw_spin_unlock_irqrestore(&res->lock, flags);
+
+ if (desc->num && free_bit < desc->num)
+ return desc->start + free_bit;
+ else
+ return desc->start_sec + free_bit;
+ }
+ }
+ raw_spin_unlock_irqrestore(&res->lock, flags);
+
+ return TI_SCI_RESOURCE_NULL;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
+
+/**
+ * ti_sci_release_resource() - Release a resource from TISCI resource.
+ * @res: Pointer to the TISCI resource
+ * @id: Resource id to be released.
+ */
+void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
+{
+ unsigned long flags;
+ u16 set;
+
+ raw_spin_lock_irqsave(&res->lock, flags);
+ for (set = 0; set < res->sets; set++) {
+ struct ti_sci_resource_desc *desc = &res->desc[set];
+
+ if (desc->num && desc->start <= id &&
+ (desc->start + desc->num) > id)
+ __clear_bit(id - desc->start, desc->res_map);
+ else if (desc->num_sec && desc->start_sec <= id &&
+ (desc->start_sec + desc->num_sec) > id)
+ __clear_bit(id - desc->start_sec, desc->res_map);
+ }
+ raw_spin_unlock_irqrestore(&res->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ti_sci_release_resource);
+
+/**
+ * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
+ * @res: Pointer to the TISCI resource
+ *
+ * Return: Total number of available resources.
+ */
+u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
+{
+ u32 set, count = 0;
+
+ for (set = 0; set < res->sets; set++)
+ count += res->desc[set].num + res->desc[set].num_sec;
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
+
+/**
+ * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device
+ * @handle: TISCI handle
+ * @dev: Device pointer to which the resource is assigned
+ * @dev_id: TISCI device id to which the resource is assigned
+ * @sub_types: Array of sub_types assigned corresponding to device
+ * @sets: Number of sub_types
+ *
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
+ * error pointer.
+ */
+static struct ti_sci_resource *
+devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
+ struct device *dev, u32 dev_id, u32 *sub_types,
+ u32 sets)
+{
+ struct ti_sci_resource *res;
+ bool valid_set = false;
+ int i, ret, res_count;
+
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return ERR_PTR(-ENOMEM);
+
+ res->sets = sets;
+ res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
+ GFP_KERNEL);
+ if (!res->desc)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < res->sets; i++) {
+ ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
+ sub_types[i],
+ &res->desc[i]);
+ if (ret) {
+ dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
+ dev_id, sub_types[i]);
+ memset(&res->desc[i], 0, sizeof(res->desc[i]));
+ continue;
+ }
+
+ dev_dbg(dev, "dev/sub_type: %d/%d, start/num: %d/%d | %d/%d\n",
+ dev_id, sub_types[i], res->desc[i].start,
+ res->desc[i].num, res->desc[i].start_sec,
+ res->desc[i].num_sec);
+
+ valid_set = true;
+ res_count = res->desc[i].num + res->desc[i].num_sec;
+ res->desc[i].res_map = devm_bitmap_zalloc(dev, res_count,
+ GFP_KERNEL);
+ if (!res->desc[i].res_map)
+ return ERR_PTR(-ENOMEM);
+ }
+ raw_spin_lock_init(&res->lock);
+
+ if (valid_set)
+ return res;
+
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
+ * @handle: TISCI handle
+ * @dev: Device pointer to which the resource is assigned
+ * @dev_id: TISCI device id to which the resource is assigned
+ * @of_prop: property name by which the resource are represented
+ *
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
+ * error pointer.
+ */
+struct ti_sci_resource *
+devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
+ struct device *dev, u32 dev_id, char *of_prop)
+{
+ struct ti_sci_resource *res;
+ u32 *sub_types;
+ int sets;
+
+ sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
+ sizeof(u32));
+ if (sets < 0) {
+ dev_err(dev, "%s resource type ids not available\n", of_prop);
+ return ERR_PTR(sets);
+ }
+
+ sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
+ if (!sub_types)
+ return ERR_PTR(-ENOMEM);
+
+ of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
+ res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
+ sets);
+
+ kfree(sub_types);
+ return res;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
+
+/**
+ * devm_ti_sci_get_resource() - Get a resource range assigned to the device
+ * @handle: TISCI handle
+ * @dev: Device pointer to which the resource is assigned
+ * @dev_id: TISCI device id to which the resource is assigned
+ * @sub_type: TISCI resource subytpe representing the resource.
+ *
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
+ * error pointer.
+ */
+struct ti_sci_resource *
+devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
+ u32 dev_id, u32 sub_type)
+{
+ return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
+
+static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
+ void *cmd)
+{
+ struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
+ const struct ti_sci_handle *handle = &info->handle;
+
+ ti_sci_cmd_core_reboot(handle);
+
+ /* call fail OR pass, we should not be here in the first place */
+ return NOTIFY_BAD;
+}
+
+/* Description for K2G */
+static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
+ .default_host_id = 2,
+ /* Conservative duration */
+ .max_rx_timeout_ms = 1000,
+ /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
+ .max_msgs = 20,
+ .max_msg_size = 64,
+};
+
+/* Description for AM654 */
+static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
+ .default_host_id = 12,
+ /* Conservative duration */
+ .max_rx_timeout_ms = 10000,
+ /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
+ .max_msgs = 20,
+ .max_msg_size = 60,
+};
+
+static const struct of_device_id ti_sci_of_match[] = {
+ {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
+ {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_of_match);
+
+static int ti_sci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ const struct ti_sci_desc *desc;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info = NULL;
+ struct ti_sci_xfers_info *minfo;
+ struct mbox_client *cl;
+ int ret = -EINVAL;
+ int i;
+ int reboot = 0;
+ u32 h_id;
+
+ of_id = of_match_device(ti_sci_of_match, dev);
+ if (!of_id) {
+ dev_err(dev, "OF data missing\n");
+ return -EINVAL;
+ }
+ desc = of_id->data;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ info->desc = desc;
+ ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
+ /* if the property is not present in DT, use a default from desc */
+ if (ret < 0) {
+ info->host_id = info->desc->default_host_id;
+ } else {
+ if (!h_id) {
+ dev_warn(dev, "Host ID 0 is reserved for firmware\n");
+ info->host_id = info->desc->default_host_id;
+ } else {
+ info->host_id = h_id;
+ }
+ }
+
+ reboot = of_property_read_bool(dev->of_node,
+ "ti,system-reboot-controller");
+ INIT_LIST_HEAD(&info->node);
+ minfo = &info->minfo;
+
+ /*
+ * Pre-allocate messages
+ * NEVER allocate more than what we can indicate in hdr.seq
+ * if we have data description bug, force a fix..
+ */
+ if (WARN_ON(desc->max_msgs >=
+ 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
+ return -EINVAL;
+
+ minfo->xfer_block = devm_kcalloc(dev,
+ desc->max_msgs,
+ sizeof(*minfo->xfer_block),
+ GFP_KERNEL);
+ if (!minfo->xfer_block)
+ return -ENOMEM;
+
+ minfo->xfer_alloc_table = devm_bitmap_zalloc(dev,
+ desc->max_msgs,
+ GFP_KERNEL);
+ if (!minfo->xfer_alloc_table)
+ return -ENOMEM;
+
+ /* Pre-initialize the buffer pointer to pre-allocated buffers */
+ for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
+ xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
+ GFP_KERNEL);
+ if (!xfer->xfer_buf)
+ return -ENOMEM;
+
+ xfer->tx_message.buf = xfer->xfer_buf;
+ init_completion(&xfer->done);
+ }
+
+ ret = ti_sci_debugfs_create(pdev, info);
+ if (ret)
+ dev_warn(dev, "Failed to create debug file\n");
+
+ platform_set_drvdata(pdev, info);
+
+ cl = &info->cl;
+ cl->dev = dev;
+ cl->tx_block = false;
+ cl->rx_callback = ti_sci_rx_callback;
+ cl->knows_txdone = true;
+
+ spin_lock_init(&minfo->xfer_lock);
+ sema_init(&minfo->sem_xfer_count, desc->max_msgs);
+
+ info->chan_rx = mbox_request_channel_byname(cl, "rx");
+ if (IS_ERR(info->chan_rx)) {
+ ret = PTR_ERR(info->chan_rx);
+ goto out;
+ }
+
+ info->chan_tx = mbox_request_channel_byname(cl, "tx");
+ if (IS_ERR(info->chan_tx)) {
+ ret = PTR_ERR(info->chan_tx);
+ goto out;
+ }
+ ret = ti_sci_cmd_get_revision(info);
+ if (ret) {
+ dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
+ goto out;
+ }
+
+ ti_sci_setup_ops(info);
+
+ if (reboot) {
+ info->nb.notifier_call = tisci_reboot_handler;
+ info->nb.priority = 128;
+
+ ret = register_restart_handler(&info->nb);
+ if (ret) {
+ dev_err(dev, "reboot registration fail(%d)\n", ret);
+ goto out;
+ }
+ }
+
+ dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
+ info->handle.version.abi_major, info->handle.version.abi_minor,
+ info->handle.version.firmware_revision,
+ info->handle.version.firmware_description);
+
+ mutex_lock(&ti_sci_list_mutex);
+ list_add_tail(&info->node, &ti_sci_list);
+ mutex_unlock(&ti_sci_list_mutex);
+
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
+out:
+ if (!IS_ERR(info->chan_tx))
+ mbox_free_channel(info->chan_tx);
+ if (!IS_ERR(info->chan_rx))
+ mbox_free_channel(info->chan_rx);
+ debugfs_remove(info->d);
+ return ret;
+}
+
+static struct platform_driver ti_sci_driver = {
+ .probe = ti_sci_probe,
+ .driver = {
+ .name = "ti-sci",
+ .of_match_table = of_match_ptr(ti_sci_of_match),
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(ti_sci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
+MODULE_AUTHOR("Nishanth Menon");
+MODULE_ALIAS("platform:ti-sci");
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
new file mode 100644
index 0000000000..ef3a8214d0
--- /dev/null
+++ b/drivers/firmware/ti_sci.h
@@ -0,0 +1,1397 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Texas Instruments System Control Interface (TISCI) Protocol
+ *
+ * Communication protocol with TI SCI hardware
+ * The system works in a message response protocol
+ * See: http://processors.wiki.ti.com/index.php/TISCI for details
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#ifndef __TI_SCI_H
+#define __TI_SCI_H
+
+/* Generic Messages */
+#define TI_SCI_MSG_ENABLE_WDT 0x0000
+#define TI_SCI_MSG_WAKE_RESET 0x0001
+#define TI_SCI_MSG_VERSION 0x0002
+#define TI_SCI_MSG_WAKE_REASON 0x0003
+#define TI_SCI_MSG_GOODBYE 0x0004
+#define TI_SCI_MSG_SYS_RESET 0x0005
+
+/* Device requests */
+#define TI_SCI_MSG_SET_DEVICE_STATE 0x0200
+#define TI_SCI_MSG_GET_DEVICE_STATE 0x0201
+#define TI_SCI_MSG_SET_DEVICE_RESETS 0x0202
+
+/* Clock requests */
+#define TI_SCI_MSG_SET_CLOCK_STATE 0x0100
+#define TI_SCI_MSG_GET_CLOCK_STATE 0x0101
+#define TI_SCI_MSG_SET_CLOCK_PARENT 0x0102
+#define TI_SCI_MSG_GET_CLOCK_PARENT 0x0103
+#define TI_SCI_MSG_GET_NUM_CLOCK_PARENTS 0x0104
+#define TI_SCI_MSG_SET_CLOCK_FREQ 0x010c
+#define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d
+#define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e
+
+/* Resource Management Requests */
+#define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500
+
+/* IRQ requests */
+#define TI_SCI_MSG_SET_IRQ 0x1000
+#define TI_SCI_MSG_FREE_IRQ 0x1001
+
+/* NAVSS resource management */
+/* Ringacc requests */
+#define TI_SCI_MSG_RM_RING_ALLOCATE 0x1100
+#define TI_SCI_MSG_RM_RING_FREE 0x1101
+#define TI_SCI_MSG_RM_RING_RECONFIG 0x1102
+#define TI_SCI_MSG_RM_RING_RESET 0x1103
+#define TI_SCI_MSG_RM_RING_CFG 0x1110
+
+/* PSI-L requests */
+#define TI_SCI_MSG_RM_PSIL_PAIR 0x1280
+#define TI_SCI_MSG_RM_PSIL_UNPAIR 0x1281
+
+#define TI_SCI_MSG_RM_UDMAP_TX_ALLOC 0x1200
+#define TI_SCI_MSG_RM_UDMAP_TX_FREE 0x1201
+#define TI_SCI_MSG_RM_UDMAP_RX_ALLOC 0x1210
+#define TI_SCI_MSG_RM_UDMAP_RX_FREE 0x1211
+#define TI_SCI_MSG_RM_UDMAP_FLOW_CFG 0x1220
+#define TI_SCI_MSG_RM_UDMAP_OPT_FLOW_CFG 0x1221
+
+#define TISCI_MSG_RM_UDMAP_TX_CH_CFG 0x1205
+#define TISCI_MSG_RM_UDMAP_TX_CH_GET_CFG 0x1206
+#define TISCI_MSG_RM_UDMAP_RX_CH_CFG 0x1215
+#define TISCI_MSG_RM_UDMAP_RX_CH_GET_CFG 0x1216
+#define TISCI_MSG_RM_UDMAP_FLOW_CFG 0x1230
+#define TISCI_MSG_RM_UDMAP_FLOW_SIZE_THRESH_CFG 0x1231
+#define TISCI_MSG_RM_UDMAP_FLOW_GET_CFG 0x1232
+#define TISCI_MSG_RM_UDMAP_FLOW_SIZE_THRESH_GET_CFG 0x1233
+
+/* Processor Control requests */
+#define TI_SCI_MSG_PROC_REQUEST 0xc000
+#define TI_SCI_MSG_PROC_RELEASE 0xc001
+#define TI_SCI_MSG_PROC_HANDOVER 0xc005
+#define TI_SCI_MSG_SET_CONFIG 0xc100
+#define TI_SCI_MSG_SET_CTRL 0xc101
+#define TI_SCI_MSG_GET_STATUS 0xc400
+
+/**
+ * struct ti_sci_msg_hdr - Generic Message Header for All messages and responses
+ * @type: Type of messages: One of TI_SCI_MSG* values
+ * @host: Host of the message
+ * @seq: Message identifier indicating a transfer sequence
+ * @flags: Flag for the message
+ */
+struct ti_sci_msg_hdr {
+ u16 type;
+ u8 host;
+ u8 seq;
+#define TI_SCI_MSG_FLAG(val) (1 << (val))
+#define TI_SCI_FLAG_REQ_GENERIC_NORESPONSE 0x0
+#define TI_SCI_FLAG_REQ_ACK_ON_RECEIVED TI_SCI_MSG_FLAG(0)
+#define TI_SCI_FLAG_REQ_ACK_ON_PROCESSED TI_SCI_MSG_FLAG(1)
+#define TI_SCI_FLAG_RESP_GENERIC_NACK 0x0
+#define TI_SCI_FLAG_RESP_GENERIC_ACK TI_SCI_MSG_FLAG(1)
+ /* Additional Flags */
+ u32 flags;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_version - Response for a message
+ * @hdr: Generic header
+ * @firmware_description: String describing the firmware
+ * @firmware_revision: Firmware revision
+ * @abi_major: Major version of the ABI that firmware supports
+ * @abi_minor: Minor version of the ABI that firmware supports
+ *
+ * In general, ABI version changes follow the rule that minor version increments
+ * are backward compatible. Major revision changes in ABI may not be
+ * backward compatible.
+ *
+ * Response to a generic message with message type TI_SCI_MSG_VERSION
+ */
+struct ti_sci_msg_resp_version {
+ struct ti_sci_msg_hdr hdr;
+ char firmware_description[32];
+ u16 firmware_revision;
+ u8 abi_major;
+ u8 abi_minor;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_reboot - Reboot the SoC
+ * @hdr: Generic Header
+ *
+ * Request type is TI_SCI_MSG_SYS_RESET, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_reboot {
+ struct ti_sci_msg_hdr hdr;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_device_state - Set the desired state of the device
+ * @hdr: Generic header
+ * @id: Indicates which device to modify
+ * @reserved: Reserved space in message, must be 0 for backward compatibility
+ * @state: The desired state of the device.
+ *
+ * Certain flags can also be set to alter the device state:
+ * + MSG_FLAG_DEVICE_WAKE_ENABLED - Configure the device to be a wake source.
+ * The meaning of this flag will vary slightly from device to device and from
+ * SoC to SoC but it generally allows the device to wake the SoC out of deep
+ * suspend states.
+ * + MSG_FLAG_DEVICE_RESET_ISO - Enable reset isolation for this device.
+ * + MSG_FLAG_DEVICE_EXCLUSIVE - Claim this device exclusively. When passed
+ * with STATE_RETENTION or STATE_ON, it will claim the device exclusively.
+ * If another host already has this device set to STATE_RETENTION or STATE_ON,
+ * the message will fail. Once successful, other hosts attempting to set
+ * STATE_RETENTION or STATE_ON will fail.
+ *
+ * Request type is TI_SCI_MSG_SET_DEVICE_STATE, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_set_device_state {
+ /* Additional hdr->flags options */
+#define MSG_FLAG_DEVICE_WAKE_ENABLED TI_SCI_MSG_FLAG(8)
+#define MSG_FLAG_DEVICE_RESET_ISO TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_DEVICE_EXCLUSIVE TI_SCI_MSG_FLAG(10)
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+ u32 reserved;
+
+#define MSG_DEVICE_SW_STATE_AUTO_OFF 0
+#define MSG_DEVICE_SW_STATE_RETENTION 1
+#define MSG_DEVICE_SW_STATE_ON 2
+ u8 state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_device_state - Request to get device.
+ * @hdr: Generic header
+ * @id: Device Identifier
+ *
+ * Request type is TI_SCI_MSG_GET_DEVICE_STATE, responded device state
+ * information
+ */
+struct ti_sci_msg_req_get_device_state {
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_device_state - Response to get device request.
+ * @hdr: Generic header
+ * @context_loss_count: Indicates how many times the device has lost context. A
+ * driver can use this monotonic counter to determine if the device has
+ * lost context since the last time this message was exchanged.
+ * @resets: Programmed state of the reset lines.
+ * @programmed_state: The state as programmed by set_device.
+ * - Uses the MSG_DEVICE_SW_* macros
+ * @current_state: The actual state of the hardware.
+ *
+ * Response to request TI_SCI_MSG_GET_DEVICE_STATE.
+ */
+struct ti_sci_msg_resp_get_device_state {
+ struct ti_sci_msg_hdr hdr;
+ u32 context_loss_count;
+ u32 resets;
+ u8 programmed_state;
+#define MSG_DEVICE_HW_STATE_OFF 0
+#define MSG_DEVICE_HW_STATE_ON 1
+#define MSG_DEVICE_HW_STATE_TRANS 2
+ u8 current_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_device_resets - Set the desired resets
+ * configuration of the device
+ * @hdr: Generic header
+ * @id: Indicates which device to modify
+ * @resets: A bit field of resets for the device. The meaning, behavior,
+ * and usage of the reset flags are device specific. 0 for a bit
+ * indicates releasing the reset represented by that bit while 1
+ * indicates keeping it held.
+ *
+ * Request type is TI_SCI_MSG_SET_DEVICE_RESETS, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_set_device_resets {
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+ u32 resets;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_state - Request to setup a Clock state
+ * @hdr: Generic Header, Certain flags can be set specific to the clocks:
+ * MSG_FLAG_CLOCK_ALLOW_SSC: Allow this clock to be modified
+ * via spread spectrum clocking.
+ * MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE: Allow this clock's
+ * frequency to be changed while it is running so long as it
+ * is within the min/max limits.
+ * MSG_FLAG_CLOCK_INPUT_TERM: Enable input termination, this
+ * is only applicable to clock inputs on the SoC pseudo-device.
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify. Set to 255 if clock ID is
+ * greater than or equal to 255.
+ * @request_state: Request the state for the clock to be set to.
+ * MSG_CLOCK_SW_STATE_UNREQ: The IP does not require this clock,
+ * it can be disabled, regardless of the state of the device
+ * MSG_CLOCK_SW_STATE_AUTO: Allow the System Controller to
+ * automatically manage the state of this clock. If the device
+ * is enabled, then the clock is enabled. If the device is set
+ * to off or retention, then the clock is internally set as not
+ * being required by the device.(default)
+ * MSG_CLOCK_SW_STATE_REQ: Configure the clock to be enabled,
+ * regardless of the state of the device.
+ * @clk_id_32: Clock identifier for the device for this request.
+ * Only to be used if the clock ID is greater than or equal to
+ * 255.
+ *
+ * Normally, all required clocks are managed by TISCI entity, this is used
+ * only for specific control *IF* required. Auto managed state is
+ * MSG_CLOCK_SW_STATE_AUTO, in other states, TISCI entity assume remote
+ * will explicitly control.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_STATE, response is a generic
+ * ACK or NACK message.
+ */
+struct ti_sci_msg_req_set_clock_state {
+ /* Additional hdr->flags options */
+#define MSG_FLAG_CLOCK_ALLOW_SSC TI_SCI_MSG_FLAG(8)
+#define MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_CLOCK_INPUT_TERM TI_SCI_MSG_FLAG(10)
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+#define MSG_CLOCK_SW_STATE_UNREQ 0
+#define MSG_CLOCK_SW_STATE_AUTO 1
+#define MSG_CLOCK_SW_STATE_REQ 2
+ u8 request_state;
+ u32 clk_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_state - Request for clock state
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to get state of. Set to 255 if the clock
+ * ID is greater than or equal to 255.
+ * @clk_id_32: Clock identifier for the device for the request.
+ * Only to be used if the clock ID is greater than or equal to
+ * 255.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_STATE, response is state
+ * of the clock
+ */
+struct ti_sci_msg_req_get_clock_state {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+ u32 clk_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_state - Response to get clock state
+ * @hdr: Generic Header
+ * @programmed_state: Any programmed state of the clock. This is one of
+ * MSG_CLOCK_SW_STATE* values.
+ * @current_state: Current state of the clock. This is one of:
+ * MSG_CLOCK_HW_STATE_NOT_READY: Clock is not ready
+ * MSG_CLOCK_HW_STATE_READY: Clock is ready
+ *
+ * Response to TI_SCI_MSG_GET_CLOCK_STATE.
+ */
+struct ti_sci_msg_resp_get_clock_state {
+ struct ti_sci_msg_hdr hdr;
+ u8 programmed_state;
+#define MSG_CLOCK_HW_STATE_NOT_READY 0
+#define MSG_CLOCK_HW_STATE_READY 1
+ u8 current_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_parent - Set the clock parent
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify. Set to 255 if clock ID is
+ * greater than or equal to 255.
+ * @parent_id: The new clock parent is selectable by an index via this
+ * parameter. Set to 255 if clock ID is greater than or
+ * equal to 255.
+ * @clk_id_32: Clock identifier if @clk_id field is 255.
+ * @parent_id_32: Parent identifier if @parent_id is 255.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_PARENT, response is generic
+ * ACK / NACK message.
+ */
+struct ti_sci_msg_req_set_clock_parent {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+ u8 parent_id;
+ u32 clk_id_32;
+ u32 parent_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_parent - Get the clock parent
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to get the parent for. If this field
+ * contains 255, the actual clock identifier is stored in
+ * @clk_id_32.
+ * @clk_id_32: Clock identifier if the @clk_id field contains 255.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_PARENT, response is parent information
+ */
+struct ti_sci_msg_req_get_clock_parent {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+ u32 clk_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_parent - Response with clock parent
+ * @hdr: Generic Header
+ * @parent_id: The current clock parent. If set to 255, the current parent
+ * ID can be found from the @parent_id_32 field.
+ * @parent_id_32: Current clock parent if @parent_id field is set to
+ * 255.
+ *
+ * Response to TI_SCI_MSG_GET_CLOCK_PARENT.
+ */
+struct ti_sci_msg_resp_get_clock_parent {
+ struct ti_sci_msg_hdr hdr;
+ u8 parent_id;
+ u32 parent_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_num_parents - Request to get clock parents
+ * @hdr: Generic header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request. Set to
+ * 255 if clock ID is greater than or equal to 255.
+ * @clk_id_32: Clock identifier if the @clk_id field contains 255.
+ *
+ * This request provides information about how many clock parent options
+ * are available for a given clock to a device. This is typically used
+ * for input clocks.
+ *
+ * Request type is TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, response is appropriate
+ * message, or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_get_clock_num_parents {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+ u32 clk_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_num_parents - Response for get clk parents
+ * @hdr: Generic header
+ * @num_parents: Number of clock parents. If set to 255, the actual
+ * number of parents is stored into @num_parents_32
+ * field instead.
+ * @num_parents_32: Number of clock parents if @num_parents field is
+ * set to 255.
+ *
+ * Response to TI_SCI_MSG_GET_NUM_CLOCK_PARENTS
+ */
+struct ti_sci_msg_resp_get_clock_num_parents {
+ struct ti_sci_msg_hdr hdr;
+ u8 num_parents;
+ u32 num_parents_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_query_clock_freq - Request to query a frequency
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq_hz: The target clock frequency. A frequency will be found
+ * as close to this target frequency as possible.
+ * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @clk_id: Clock identifier for the device for this request. Set to
+ * 255 if clock identifier is greater than or equal to 255.
+ * @clk_id_32: Clock identifier if @clk_id is set to 255.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In case of specific requests, TISCI evaluates capability to achieve
+ * requested frequency within provided range and responds with
+ * result message.
+ *
+ * Request type is TI_SCI_MSG_QUERY_CLOCK_FREQ, response is appropriate message,
+ * or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_query_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u64 min_freq_hz;
+ u64 target_freq_hz;
+ u64 max_freq_hz;
+ u8 clk_id;
+ u32 clk_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_query_clock_freq - Response to a clock frequency query
+ * @hdr: Generic Header
+ * @freq_hz: Frequency that is the best match in Hz.
+ *
+ * Response to request type TI_SCI_MSG_QUERY_CLOCK_FREQ. NOTE: if the request
+ * cannot be satisfied, the message will be of type NACK.
+ */
+struct ti_sci_msg_resp_query_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u64 freq_hz;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_freq - Request to setup a clock frequency
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq_hz: The target clock frequency. The clock will be programmed
+ * at a rate as close to this target frequency as possible.
+ * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @clk_id: Clock identifier for the device for this request. Set to
+ * 255 if clock ID is greater than or equal to 255.
+ * @clk_id_32: Clock identifier if @clk_id field is set to 255.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In case of specific requests, TISCI evaluates capability to achieve
+ * requested range and responds with success/failure message.
+ *
+ * This sets the desired frequency for a clock within an allowable
+ * range. This message will fail on an enabled clock unless
+ * MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE is set for the clock. Additionally,
+ * if other clocks have their frequency modified due to this message,
+ * they also must have the MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE or be disabled.
+ *
+ * Calling set frequency on a clock input to the SoC pseudo-device will
+ * inform the PMMC of that clock's frequency. Setting a frequency of
+ * zero will indicate the clock is disabled.
+ *
+ * Calling set frequency on clock outputs from the SoC pseudo-device will
+ * function similarly to setting the clock frequency on a device.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_FREQ, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_set_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u64 min_freq_hz;
+ u64 target_freq_hz;
+ u64 max_freq_hz;
+ u8 clk_id;
+ u32 clk_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_freq - Request to get the clock frequency
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request. Set to
+ * 255 if clock ID is greater than or equal to 255.
+ * @clk_id_32: Clock identifier if @clk_id field is set to 255.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In some cases, clock frequencies are configured by host.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_FREQ, responded with clock frequency
+ * that the clock is currently at.
+ */
+struct ti_sci_msg_req_get_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+ u32 clk_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_freq - Response of clock frequency request
+ * @hdr: Generic Header
+ * @freq_hz: Frequency that the clock is currently on, in Hz.
+ *
+ * Response to request type TI_SCI_MSG_GET_CLOCK_FREQ.
+ */
+struct ti_sci_msg_resp_get_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u64 freq_hz;
+} __packed;
+
+#define TI_SCI_IRQ_SECONDARY_HOST_INVALID 0xff
+
+/**
+ * struct ti_sci_msg_req_get_resource_range - Request to get a host's assigned
+ * range of resources.
+ * @hdr: Generic Header
+ * @type: Unique resource assignment type
+ * @subtype: Resource assignment subtype within the resource type.
+ * @secondary_host: Host processing entity to which the resources are
+ * allocated. This is required only when the destination
+ * host id id different from ti sci interface host id,
+ * else TI_SCI_IRQ_SECONDARY_HOST_INVALID can be passed.
+ *
+ * Request type is TI_SCI_MSG_GET_RESOURCE_RANGE. Responded with requested
+ * resource range which is of type TI_SCI_MSG_GET_RESOURCE_RANGE.
+ */
+struct ti_sci_msg_req_get_resource_range {
+ struct ti_sci_msg_hdr hdr;
+#define MSG_RM_RESOURCE_TYPE_MASK GENMASK(9, 0)
+#define MSG_RM_RESOURCE_SUBTYPE_MASK GENMASK(5, 0)
+ u16 type;
+ u8 subtype;
+ u8 secondary_host;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_resource_range - Response to resource get range.
+ * @hdr: Generic Header
+ * @range_start: Start index of the first resource range.
+ * @range_num: Number of resources in the first range.
+ * @range_start_sec: Start index of the second resource range.
+ * @range_num_sec: Number of resources in the second range.
+ *
+ * Response to request TI_SCI_MSG_GET_RESOURCE_RANGE.
+ */
+struct ti_sci_msg_resp_get_resource_range {
+ struct ti_sci_msg_hdr hdr;
+ u16 range_start;
+ u16 range_num;
+ u16 range_start_sec;
+ u16 range_num_sec;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_manage_irq - Request to configure/release the route
+ * between the dev and the host.
+ * @hdr: Generic Header
+ * @valid_params: Bit fields defining the validity of interrupt source
+ * parameters. If a bit is not set, then corresponding
+ * field is not valid and will not be used for route set.
+ * Bit field definitions:
+ * 0 - Valid bit for @dst_id
+ * 1 - Valid bit for @dst_host_irq
+ * 2 - Valid bit for @ia_id
+ * 3 - Valid bit for @vint
+ * 4 - Valid bit for @global_event
+ * 5 - Valid bit for @vint_status_bit_index
+ * 31 - Valid bit for @secondary_host
+ * @src_id: IRQ source peripheral ID.
+ * @src_index: IRQ source index within the peripheral
+ * @dst_id: IRQ Destination ID. Based on the architecture it can be
+ * IRQ controller or host processor ID.
+ * @dst_host_irq: IRQ number of the destination host IRQ controller
+ * @ia_id: Device ID of the interrupt aggregator in which the
+ * vint resides.
+ * @vint: Virtual interrupt number if the interrupt route
+ * is through an interrupt aggregator.
+ * @global_event: Global event that is to be mapped to interrupt
+ * aggregator virtual interrupt status bit.
+ * @vint_status_bit: Virtual interrupt status bit if the interrupt route
+ * utilizes an interrupt aggregator status bit.
+ * @secondary_host: Host ID of the IRQ destination computing entity. This is
+ * required only when destination host id is different
+ * from ti sci interface host id.
+ *
+ * Request type is TI_SCI_MSG_SET/RELEASE_IRQ.
+ * Response is generic ACK / NACK message.
+ */
+struct ti_sci_msg_req_manage_irq {
+ struct ti_sci_msg_hdr hdr;
+#define MSG_FLAG_DST_ID_VALID TI_SCI_MSG_FLAG(0)
+#define MSG_FLAG_DST_HOST_IRQ_VALID TI_SCI_MSG_FLAG(1)
+#define MSG_FLAG_IA_ID_VALID TI_SCI_MSG_FLAG(2)
+#define MSG_FLAG_VINT_VALID TI_SCI_MSG_FLAG(3)
+#define MSG_FLAG_GLB_EVNT_VALID TI_SCI_MSG_FLAG(4)
+#define MSG_FLAG_VINT_STS_BIT_VALID TI_SCI_MSG_FLAG(5)
+#define MSG_FLAG_SHOST_VALID TI_SCI_MSG_FLAG(31)
+ u32 valid_params;
+ u16 src_id;
+ u16 src_index;
+ u16 dst_id;
+ u16 dst_host_irq;
+ u16 ia_id;
+ u16 vint;
+ u16 global_event;
+ u8 vint_status_bit;
+ u8 secondary_host;
+} __packed;
+
+/**
+ * struct ti_sci_msg_rm_ring_cfg_req - Configure a Navigator Subsystem ring
+ *
+ * Configures the non-real-time registers of a Navigator Subsystem ring.
+ * @hdr: Generic Header
+ * @valid_params: Bitfield defining validity of ring configuration parameters.
+ * The ring configuration fields are not valid, and will not be used for
+ * ring configuration, if their corresponding valid bit is zero.
+ * Valid bit usage:
+ * 0 - Valid bit for @tisci_msg_rm_ring_cfg_req addr_lo
+ * 1 - Valid bit for @tisci_msg_rm_ring_cfg_req addr_hi
+ * 2 - Valid bit for @tisci_msg_rm_ring_cfg_req count
+ * 3 - Valid bit for @tisci_msg_rm_ring_cfg_req mode
+ * 4 - Valid bit for @tisci_msg_rm_ring_cfg_req size
+ * 5 - Valid bit for @tisci_msg_rm_ring_cfg_req order_id
+ * 6 - Valid bit for @tisci_msg_rm_ring_cfg_req virtid
+ * 7 - Valid bit for @tisci_msg_rm_ring_cfg_req ASEL
+ * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
+ * @index: ring index to be configured.
+ * @addr_lo: 32 LSBs of ring base address to be programmed into the ring's
+ * RING_BA_LO register
+ * @addr_hi: 16 MSBs of ring base address to be programmed into the ring's
+ * RING_BA_HI register.
+ * @count: Number of ring elements. Must be even if mode is CREDENTIALS or QM
+ * modes.
+ * @mode: Specifies the mode the ring is to be configured.
+ * @size: Specifies encoded ring element size. To calculate the encoded size use
+ * the formula (log2(size_bytes) - 2), where size_bytes cannot be
+ * greater than 256.
+ * @order_id: Specifies the ring's bus order ID.
+ * @virtid: Ring virt ID value
+ * @asel: Ring ASEL (address select) value to be set into the ASEL field of the
+ * ring's RING_BA_HI register.
+ */
+struct ti_sci_msg_rm_ring_cfg_req {
+ struct ti_sci_msg_hdr hdr;
+ u32 valid_params;
+ u16 nav_id;
+ u16 index;
+ u32 addr_lo;
+ u32 addr_hi;
+ u32 count;
+ u8 mode;
+ u8 size;
+ u8 order_id;
+ u16 virtid;
+ u8 asel;
+} __packed;
+
+/**
+ * struct ti_sci_msg_psil_pair - Pairs a PSI-L source thread to a destination
+ * thread
+ * @hdr: Generic Header
+ * @nav_id: SoC Navigator Subsystem device ID whose PSI-L config proxy is
+ * used to pair the source and destination threads.
+ * @src_thread: PSI-L source thread ID within the PSI-L System thread map.
+ *
+ * UDMAP transmit channels mapped to source threads will have their
+ * TCHAN_THRD_ID register programmed with the destination thread if the pairing
+ * is successful.
+
+ * @dst_thread: PSI-L destination thread ID within the PSI-L System thread map.
+ * PSI-L destination threads start at index 0x8000. The request is NACK'd if
+ * the destination thread is not greater than or equal to 0x8000.
+ *
+ * UDMAP receive channels mapped to destination threads will have their
+ * RCHAN_THRD_ID register programmed with the source thread if the pairing
+ * is successful.
+ *
+ * Request type is TI_SCI_MSG_RM_PSIL_PAIR, response is a generic ACK or NACK
+ * message.
+ */
+struct ti_sci_msg_psil_pair {
+ struct ti_sci_msg_hdr hdr;
+ u32 nav_id;
+ u32 src_thread;
+ u32 dst_thread;
+} __packed;
+
+/**
+ * struct ti_sci_msg_psil_unpair - Unpairs a PSI-L source thread from a
+ * destination thread
+ * @hdr: Generic Header
+ * @nav_id: SoC Navigator Subsystem device ID whose PSI-L config proxy is
+ * used to unpair the source and destination threads.
+ * @src_thread: PSI-L source thread ID within the PSI-L System thread map.
+ *
+ * UDMAP transmit channels mapped to source threads will have their
+ * TCHAN_THRD_ID register cleared if the unpairing is successful.
+ *
+ * @dst_thread: PSI-L destination thread ID within the PSI-L System thread map.
+ * PSI-L destination threads start at index 0x8000. The request is NACK'd if
+ * the destination thread is not greater than or equal to 0x8000.
+ *
+ * UDMAP receive channels mapped to destination threads will have their
+ * RCHAN_THRD_ID register cleared if the unpairing is successful.
+ *
+ * Request type is TI_SCI_MSG_RM_PSIL_UNPAIR, response is a generic ACK or NACK
+ * message.
+ */
+struct ti_sci_msg_psil_unpair {
+ struct ti_sci_msg_hdr hdr;
+ u32 nav_id;
+ u32 src_thread;
+ u32 dst_thread;
+} __packed;
+
+/**
+ * struct ti_sci_msg_udmap_rx_flow_cfg - UDMAP receive flow configuration
+ * message
+ * @hdr: Generic Header
+ * @nav_id: SoC Navigator Subsystem device ID from which the receive flow is
+ * allocated
+ * @flow_index: UDMAP receive flow index for non-optional configuration.
+ * @rx_ch_index: Specifies the index of the receive channel using the flow_index
+ * @rx_einfo_present: UDMAP receive flow extended packet info present.
+ * @rx_psinfo_present: UDMAP receive flow PS words present.
+ * @rx_error_handling: UDMAP receive flow error handling configuration. Valid
+ * values are TI_SCI_RM_UDMAP_RX_FLOW_ERR_DROP/RETRY.
+ * @rx_desc_type: UDMAP receive flow descriptor type. It can be one of
+ * TI_SCI_RM_UDMAP_RX_FLOW_DESC_HOST/MONO.
+ * @rx_sop_offset: UDMAP receive flow start of packet offset.
+ * @rx_dest_qnum: UDMAP receive flow destination queue number.
+ * @rx_ps_location: UDMAP receive flow PS words location.
+ * 0 - end of packet descriptor
+ * 1 - Beginning of the data buffer
+ * @rx_src_tag_hi: UDMAP receive flow source tag high byte constant
+ * @rx_src_tag_lo: UDMAP receive flow source tag low byte constant
+ * @rx_dest_tag_hi: UDMAP receive flow destination tag high byte constant
+ * @rx_dest_tag_lo: UDMAP receive flow destination tag low byte constant
+ * @rx_src_tag_hi_sel: UDMAP receive flow source tag high byte selector
+ * @rx_src_tag_lo_sel: UDMAP receive flow source tag low byte selector
+ * @rx_dest_tag_hi_sel: UDMAP receive flow destination tag high byte selector
+ * @rx_dest_tag_lo_sel: UDMAP receive flow destination tag low byte selector
+ * @rx_size_thresh_en: UDMAP receive flow packet size based free buffer queue
+ * enable. If enabled, the ti_sci_rm_udmap_rx_flow_opt_cfg also need to be
+ * configured and sent.
+ * @rx_fdq0_sz0_qnum: UDMAP receive flow free descriptor queue 0.
+ * @rx_fdq1_qnum: UDMAP receive flow free descriptor queue 1.
+ * @rx_fdq2_qnum: UDMAP receive flow free descriptor queue 2.
+ * @rx_fdq3_qnum: UDMAP receive flow free descriptor queue 3.
+ *
+ * For detailed information on the settings, see the UDMAP section of the TRM.
+ */
+struct ti_sci_msg_udmap_rx_flow_cfg {
+ struct ti_sci_msg_hdr hdr;
+ u32 nav_id;
+ u32 flow_index;
+ u32 rx_ch_index;
+ u8 rx_einfo_present;
+ u8 rx_psinfo_present;
+ u8 rx_error_handling;
+ u8 rx_desc_type;
+ u16 rx_sop_offset;
+ u16 rx_dest_qnum;
+ u8 rx_ps_location;
+ u8 rx_src_tag_hi;
+ u8 rx_src_tag_lo;
+ u8 rx_dest_tag_hi;
+ u8 rx_dest_tag_lo;
+ u8 rx_src_tag_hi_sel;
+ u8 rx_src_tag_lo_sel;
+ u8 rx_dest_tag_hi_sel;
+ u8 rx_dest_tag_lo_sel;
+ u8 rx_size_thresh_en;
+ u16 rx_fdq0_sz0_qnum;
+ u16 rx_fdq1_qnum;
+ u16 rx_fdq2_qnum;
+ u16 rx_fdq3_qnum;
+} __packed;
+
+/**
+ * struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg - parameters for UDMAP receive
+ * flow optional configuration
+ * @hdr: Generic Header
+ * @nav_id: SoC Navigator Subsystem device ID from which the receive flow is
+ * allocated
+ * @flow_index: UDMAP receive flow index for optional configuration.
+ * @rx_ch_index: Specifies the index of the receive channel using the flow_index
+ * @rx_size_thresh0: UDMAP receive flow packet size threshold 0.
+ * @rx_size_thresh1: UDMAP receive flow packet size threshold 1.
+ * @rx_size_thresh2: UDMAP receive flow packet size threshold 2.
+ * @rx_fdq0_sz1_qnum: UDMAP receive flow free descriptor queue for size
+ * threshold 1.
+ * @rx_fdq0_sz2_qnum: UDMAP receive flow free descriptor queue for size
+ * threshold 2.
+ * @rx_fdq0_sz3_qnum: UDMAP receive flow free descriptor queue for size
+ * threshold 3.
+ *
+ * For detailed information on the settings, see the UDMAP section of the TRM.
+ */
+struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg {
+ struct ti_sci_msg_hdr hdr;
+ u32 nav_id;
+ u32 flow_index;
+ u32 rx_ch_index;
+ u16 rx_size_thresh0;
+ u16 rx_size_thresh1;
+ u16 rx_size_thresh2;
+ u16 rx_fdq0_sz1_qnum;
+ u16 rx_fdq0_sz2_qnum;
+ u16 rx_fdq0_sz3_qnum;
+} __packed;
+
+/**
+ * Configures a Navigator Subsystem UDMAP transmit channel
+ *
+ * Configures the non-real-time registers of a Navigator Subsystem UDMAP
+ * transmit channel. The channel index must be assigned to the host defined
+ * in the TISCI header via the RM board configuration resource assignment
+ * range list.
+ *
+ * @hdr: Generic Header
+ *
+ * @valid_params: Bitfield defining validity of tx channel configuration
+ * parameters. The tx channel configuration fields are not valid, and will not
+ * be used for ch configuration, if their corresponding valid bit is zero.
+ * Valid bit usage:
+ * 0 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_pause_on_err
+ * 1 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_atype
+ * 2 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_chan_type
+ * 3 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_fetch_size
+ * 4 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::txcq_qnum
+ * 5 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_priority
+ * 6 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_qos
+ * 7 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_orderid
+ * 8 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_sched_priority
+ * 9 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_filt_einfo
+ * 10 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_filt_pswords
+ * 11 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_supr_tdpkt
+ * 12 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_credit_count
+ * 13 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::fdepth
+ * 14 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_burst_size
+ * 15 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_tdtype
+ * 16 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::extended_ch_type
+ *
+ * @nav_id: SoC device ID of Navigator Subsystem where tx channel is located
+ *
+ * @index: UDMAP transmit channel index.
+ *
+ * @tx_pause_on_err: UDMAP transmit channel pause on error configuration to
+ * be programmed into the tx_pause_on_err field of the channel's TCHAN_TCFG
+ * register.
+ *
+ * @tx_filt_einfo: UDMAP transmit channel extended packet information passing
+ * configuration to be programmed into the tx_filt_einfo field of the
+ * channel's TCHAN_TCFG register.
+ *
+ * @tx_filt_pswords: UDMAP transmit channel protocol specific word passing
+ * configuration to be programmed into the tx_filt_pswords field of the
+ * channel's TCHAN_TCFG register.
+ *
+ * @tx_atype: UDMAP transmit channel non Ring Accelerator access pointer
+ * interpretation configuration to be programmed into the tx_atype field of
+ * the channel's TCHAN_TCFG register.
+ *
+ * @tx_chan_type: UDMAP transmit channel functional channel type and work
+ * passing mechanism configuration to be programmed into the tx_chan_type
+ * field of the channel's TCHAN_TCFG register.
+ *
+ * @tx_supr_tdpkt: UDMAP transmit channel teardown packet generation suppression
+ * configuration to be programmed into the tx_supr_tdpkt field of the channel's
+ * TCHAN_TCFG register.
+ *
+ * @tx_fetch_size: UDMAP transmit channel number of 32-bit descriptor words to
+ * fetch configuration to be programmed into the tx_fetch_size field of the
+ * channel's TCHAN_TCFG register. The user must make sure to set the maximum
+ * word count that can pass through the channel for any allowed descriptor type.
+ *
+ * @tx_credit_count: UDMAP transmit channel transfer request credit count
+ * configuration to be programmed into the count field of the TCHAN_TCREDIT
+ * register. Specifies how many credits for complete TRs are available.
+ *
+ * @txcq_qnum: UDMAP transmit channel completion queue configuration to be
+ * programmed into the txcq_qnum field of the TCHAN_TCQ register. The specified
+ * completion queue must be assigned to the host, or a subordinate of the host,
+ * requesting configuration of the transmit channel.
+ *
+ * @tx_priority: UDMAP transmit channel transmit priority value to be programmed
+ * into the priority field of the channel's TCHAN_TPRI_CTRL register.
+ *
+ * @tx_qos: UDMAP transmit channel transmit qos value to be programmed into the
+ * qos field of the channel's TCHAN_TPRI_CTRL register.
+ *
+ * @tx_orderid: UDMAP transmit channel bus order id value to be programmed into
+ * the orderid field of the channel's TCHAN_TPRI_CTRL register.
+ *
+ * @fdepth: UDMAP transmit channel FIFO depth configuration to be programmed
+ * into the fdepth field of the TCHAN_TFIFO_DEPTH register. Sets the number of
+ * Tx FIFO bytes which are allowed to be stored for the channel. Check the UDMAP
+ * section of the TRM for restrictions regarding this parameter.
+ *
+ * @tx_sched_priority: UDMAP transmit channel tx scheduling priority
+ * configuration to be programmed into the priority field of the channel's
+ * TCHAN_TST_SCHED register.
+ *
+ * @tx_burst_size: UDMAP transmit channel burst size configuration to be
+ * programmed into the tx_burst_size field of the TCHAN_TCFG register.
+ *
+ * @tx_tdtype: UDMAP transmit channel teardown type configuration to be
+ * programmed into the tdtype field of the TCHAN_TCFG register:
+ * 0 - Return immediately
+ * 1 - Wait for completion message from remote peer
+ *
+ * @extended_ch_type: Valid for BCDMA.
+ * 0 - the channel is split tx channel (tchan)
+ * 1 - the channel is block copy channel (bchan)
+ */
+struct ti_sci_msg_rm_udmap_tx_ch_cfg_req {
+ struct ti_sci_msg_hdr hdr;
+ u32 valid_params;
+ u16 nav_id;
+ u16 index;
+ u8 tx_pause_on_err;
+ u8 tx_filt_einfo;
+ u8 tx_filt_pswords;
+ u8 tx_atype;
+ u8 tx_chan_type;
+ u8 tx_supr_tdpkt;
+ u16 tx_fetch_size;
+ u8 tx_credit_count;
+ u16 txcq_qnum;
+ u8 tx_priority;
+ u8 tx_qos;
+ u8 tx_orderid;
+ u16 fdepth;
+ u8 tx_sched_priority;
+ u8 tx_burst_size;
+ u8 tx_tdtype;
+ u8 extended_ch_type;
+} __packed;
+
+/**
+ * Configures a Navigator Subsystem UDMAP receive channel
+ *
+ * Configures the non-real-time registers of a Navigator Subsystem UDMAP
+ * receive channel. The channel index must be assigned to the host defined
+ * in the TISCI header via the RM board configuration resource assignment
+ * range list.
+ *
+ * @hdr: Generic Header
+ *
+ * @valid_params: Bitfield defining validity of rx channel configuration
+ * parameters.
+ * The rx channel configuration fields are not valid, and will not be used for
+ * ch configuration, if their corresponding valid bit is zero.
+ * Valid bit usage:
+ * 0 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_pause_on_err
+ * 1 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_atype
+ * 2 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_chan_type
+ * 3 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_fetch_size
+ * 4 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rxcq_qnum
+ * 5 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_priority
+ * 6 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_qos
+ * 7 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_orderid
+ * 8 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_sched_priority
+ * 9 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::flowid_start
+ * 10 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::flowid_cnt
+ * 11 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_ignore_short
+ * 12 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_ignore_long
+ * 14 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_burst_size
+ *
+ * @nav_id: SoC device ID of Navigator Subsystem where rx channel is located
+ *
+ * @index: UDMAP receive channel index.
+ *
+ * @rx_fetch_size: UDMAP receive channel number of 32-bit descriptor words to
+ * fetch configuration to be programmed into the rx_fetch_size field of the
+ * channel's RCHAN_RCFG register.
+ *
+ * @rxcq_qnum: UDMAP receive channel completion queue configuration to be
+ * programmed into the rxcq_qnum field of the RCHAN_RCQ register.
+ * The specified completion queue must be assigned to the host, or a subordinate
+ * of the host, requesting configuration of the receive channel.
+ *
+ * @rx_priority: UDMAP receive channel receive priority value to be programmed
+ * into the priority field of the channel's RCHAN_RPRI_CTRL register.
+ *
+ * @rx_qos: UDMAP receive channel receive qos value to be programmed into the
+ * qos field of the channel's RCHAN_RPRI_CTRL register.
+ *
+ * @rx_orderid: UDMAP receive channel bus order id value to be programmed into
+ * the orderid field of the channel's RCHAN_RPRI_CTRL register.
+ *
+ * @rx_sched_priority: UDMAP receive channel rx scheduling priority
+ * configuration to be programmed into the priority field of the channel's
+ * RCHAN_RST_SCHED register.
+ *
+ * @flowid_start: UDMAP receive channel additional flows starting index
+ * configuration to program into the flow_start field of the RCHAN_RFLOW_RNG
+ * register. Specifies the starting index for flow IDs the receive channel is to
+ * make use of beyond the default flow. flowid_start and @ref flowid_cnt must be
+ * set as valid and configured together. The starting flow ID set by
+ * @ref flowid_cnt must be a flow index within the Navigator Subsystem's subset
+ * of flows beyond the default flows statically mapped to receive channels.
+ * The additional flows must be assigned to the host, or a subordinate of the
+ * host, requesting configuration of the receive channel.
+ *
+ * @flowid_cnt: UDMAP receive channel additional flows count configuration to
+ * program into the flowid_cnt field of the RCHAN_RFLOW_RNG register.
+ * This field specifies how many flow IDs are in the additional contiguous range
+ * of legal flow IDs for the channel. @ref flowid_start and flowid_cnt must be
+ * set as valid and configured together. Disabling the valid_params field bit
+ * for flowid_cnt indicates no flow IDs other than the default are to be
+ * allocated and used by the receive channel. @ref flowid_start plus flowid_cnt
+ * cannot be greater than the number of receive flows in the receive channel's
+ * Navigator Subsystem. The additional flows must be assigned to the host, or a
+ * subordinate of the host, requesting configuration of the receive channel.
+ *
+ * @rx_pause_on_err: UDMAP receive channel pause on error configuration to be
+ * programmed into the rx_pause_on_err field of the channel's RCHAN_RCFG
+ * register.
+ *
+ * @rx_atype: UDMAP receive channel non Ring Accelerator access pointer
+ * interpretation configuration to be programmed into the rx_atype field of the
+ * channel's RCHAN_RCFG register.
+ *
+ * @rx_chan_type: UDMAP receive channel functional channel type and work passing
+ * mechanism configuration to be programmed into the rx_chan_type field of the
+ * channel's RCHAN_RCFG register.
+ *
+ * @rx_ignore_short: UDMAP receive channel short packet treatment configuration
+ * to be programmed into the rx_ignore_short field of the RCHAN_RCFG register.
+ *
+ * @rx_ignore_long: UDMAP receive channel long packet treatment configuration to
+ * be programmed into the rx_ignore_long field of the RCHAN_RCFG register.
+ *
+ * @rx_burst_size: UDMAP receive channel burst size configuration to be
+ * programmed into the rx_burst_size field of the RCHAN_RCFG register.
+ */
+struct ti_sci_msg_rm_udmap_rx_ch_cfg_req {
+ struct ti_sci_msg_hdr hdr;
+ u32 valid_params;
+ u16 nav_id;
+ u16 index;
+ u16 rx_fetch_size;
+ u16 rxcq_qnum;
+ u8 rx_priority;
+ u8 rx_qos;
+ u8 rx_orderid;
+ u8 rx_sched_priority;
+ u16 flowid_start;
+ u16 flowid_cnt;
+ u8 rx_pause_on_err;
+ u8 rx_atype;
+ u8 rx_chan_type;
+ u8 rx_ignore_short;
+ u8 rx_ignore_long;
+ u8 rx_burst_size;
+} __packed;
+
+/**
+ * Configures a Navigator Subsystem UDMAP receive flow
+ *
+ * Configures a Navigator Subsystem UDMAP receive flow's registers.
+ * Configuration does not include the flow registers which handle size-based
+ * free descriptor queue routing.
+ *
+ * The flow index must be assigned to the host defined in the TISCI header via
+ * the RM board configuration resource assignment range list.
+ *
+ * @hdr: Standard TISCI header
+ *
+ * @valid_params
+ * Bitfield defining validity of rx flow configuration parameters. The
+ * rx flow configuration fields are not valid, and will not be used for flow
+ * configuration, if their corresponding valid bit is zero. Valid bit usage:
+ * 0 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_einfo_present
+ * 1 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_psinfo_present
+ * 2 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_error_handling
+ * 3 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_desc_type
+ * 4 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_sop_offset
+ * 5 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_qnum
+ * 6 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_hi
+ * 7 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_lo
+ * 8 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_hi
+ * 9 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_lo
+ * 10 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_hi_sel
+ * 11 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_lo_sel
+ * 12 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_hi_sel
+ * 13 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_lo_sel
+ * 14 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq0_sz0_qnum
+ * 15 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq1_sz0_qnum
+ * 16 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq2_sz0_qnum
+ * 17 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq3_sz0_qnum
+ * 18 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_ps_location
+ *
+ * @nav_id: SoC device ID of Navigator Subsystem from which the receive flow is
+ * allocated
+ *
+ * @flow_index: UDMAP receive flow index for non-optional configuration.
+ *
+ * @rx_einfo_present:
+ * UDMAP receive flow extended packet info present configuration to be
+ * programmed into the rx_einfo_present field of the flow's RFLOW_RFA register.
+ *
+ * @rx_psinfo_present:
+ * UDMAP receive flow PS words present configuration to be programmed into the
+ * rx_psinfo_present field of the flow's RFLOW_RFA register.
+ *
+ * @rx_error_handling:
+ * UDMAP receive flow error handling configuration to be programmed into the
+ * rx_error_handling field of the flow's RFLOW_RFA register.
+ *
+ * @rx_desc_type:
+ * UDMAP receive flow descriptor type configuration to be programmed into the
+ * rx_desc_type field field of the flow's RFLOW_RFA register.
+ *
+ * @rx_sop_offset:
+ * UDMAP receive flow start of packet offset configuration to be programmed
+ * into the rx_sop_offset field of the RFLOW_RFA register. See the UDMAP
+ * section of the TRM for more information on this setting. Valid values for
+ * this field are 0-255 bytes.
+ *
+ * @rx_dest_qnum:
+ * UDMAP receive flow destination queue configuration to be programmed into the
+ * rx_dest_qnum field of the flow's RFLOW_RFA register. The specified
+ * destination queue must be valid within the Navigator Subsystem and must be
+ * owned by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_src_tag_hi:
+ * UDMAP receive flow source tag high byte constant configuration to be
+ * programmed into the rx_src_tag_hi field of the flow's RFLOW_RFB register.
+ * See the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_src_tag_lo:
+ * UDMAP receive flow source tag low byte constant configuration to be
+ * programmed into the rx_src_tag_lo field of the flow's RFLOW_RFB register.
+ * See the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_dest_tag_hi:
+ * UDMAP receive flow destination tag high byte constant configuration to be
+ * programmed into the rx_dest_tag_hi field of the flow's RFLOW_RFB register.
+ * See the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_dest_tag_lo:
+ * UDMAP receive flow destination tag low byte constant configuration to be
+ * programmed into the rx_dest_tag_lo field of the flow's RFLOW_RFB register.
+ * See the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_src_tag_hi_sel:
+ * UDMAP receive flow source tag high byte selector configuration to be
+ * programmed into the rx_src_tag_hi_sel field of the RFLOW_RFC register. See
+ * the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_src_tag_lo_sel:
+ * UDMAP receive flow source tag low byte selector configuration to be
+ * programmed into the rx_src_tag_lo_sel field of the RFLOW_RFC register. See
+ * the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_dest_tag_hi_sel:
+ * UDMAP receive flow destination tag high byte selector configuration to be
+ * programmed into the rx_dest_tag_hi_sel field of the RFLOW_RFC register. See
+ * the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_dest_tag_lo_sel:
+ * UDMAP receive flow destination tag low byte selector configuration to be
+ * programmed into the rx_dest_tag_lo_sel field of the RFLOW_RFC register. See
+ * the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_fdq0_sz0_qnum:
+ * UDMAP receive flow free descriptor queue 0 configuration to be programmed
+ * into the rx_fdq0_sz0_qnum field of the flow's RFLOW_RFD register. See the
+ * UDMAP section of the TRM for more information on this setting. The specified
+ * free queue must be valid within the Navigator Subsystem and must be owned
+ * by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_fdq1_qnum:
+ * UDMAP receive flow free descriptor queue 1 configuration to be programmed
+ * into the rx_fdq1_qnum field of the flow's RFLOW_RFD register. See the
+ * UDMAP section of the TRM for more information on this setting. The specified
+ * free queue must be valid within the Navigator Subsystem and must be owned
+ * by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_fdq2_qnum:
+ * UDMAP receive flow free descriptor queue 2 configuration to be programmed
+ * into the rx_fdq2_qnum field of the flow's RFLOW_RFE register. See the
+ * UDMAP section of the TRM for more information on this setting. The specified
+ * free queue must be valid within the Navigator Subsystem and must be owned
+ * by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_fdq3_qnum:
+ * UDMAP receive flow free descriptor queue 3 configuration to be programmed
+ * into the rx_fdq3_qnum field of the flow's RFLOW_RFE register. See the
+ * UDMAP section of the TRM for more information on this setting. The specified
+ * free queue must be valid within the Navigator Subsystem and must be owned
+ * by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_ps_location:
+ * UDMAP receive flow PS words location configuration to be programmed into the
+ * rx_ps_location field of the flow's RFLOW_RFA register.
+ */
+struct ti_sci_msg_rm_udmap_flow_cfg_req {
+ struct ti_sci_msg_hdr hdr;
+ u32 valid_params;
+ u16 nav_id;
+ u16 flow_index;
+ u8 rx_einfo_present;
+ u8 rx_psinfo_present;
+ u8 rx_error_handling;
+ u8 rx_desc_type;
+ u16 rx_sop_offset;
+ u16 rx_dest_qnum;
+ u8 rx_src_tag_hi;
+ u8 rx_src_tag_lo;
+ u8 rx_dest_tag_hi;
+ u8 rx_dest_tag_lo;
+ u8 rx_src_tag_hi_sel;
+ u8 rx_src_tag_lo_sel;
+ u8 rx_dest_tag_hi_sel;
+ u8 rx_dest_tag_lo_sel;
+ u16 rx_fdq0_sz0_qnum;
+ u16 rx_fdq1_qnum;
+ u16 rx_fdq2_qnum;
+ u16 rx_fdq3_qnum;
+ u8 rx_ps_location;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_proc_request - Request a processor
+ * @hdr: Generic Header
+ * @processor_id: ID of processor being requested
+ *
+ * Request type is TI_SCI_MSG_PROC_REQUEST, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_proc_request {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_proc_release - Release a processor
+ * @hdr: Generic Header
+ * @processor_id: ID of processor being released
+ *
+ * Request type is TI_SCI_MSG_PROC_RELEASE, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_proc_release {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_proc_handover - Handover a processor to a host
+ * @hdr: Generic Header
+ * @processor_id: ID of processor being handed over
+ * @host_id: Host ID the control needs to be transferred to
+ *
+ * Request type is TI_SCI_MSG_PROC_HANDOVER, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_proc_handover {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+ u8 host_id;
+} __packed;
+
+/* Boot Vector masks */
+#define TI_SCI_ADDR_LOW_MASK GENMASK_ULL(31, 0)
+#define TI_SCI_ADDR_HIGH_MASK GENMASK_ULL(63, 32)
+#define TI_SCI_ADDR_HIGH_SHIFT 32
+
+/**
+ * struct ti_sci_msg_req_set_config - Set Processor boot configuration
+ * @hdr: Generic Header
+ * @processor_id: ID of processor being configured
+ * @bootvector_low: Lower 32 bit address (Little Endian) of boot vector
+ * @bootvector_high: Higher 32 bit address (Little Endian) of boot vector
+ * @config_flags_set: Optional Processor specific Config Flags to set.
+ * Setting a bit here implies the corresponding mode
+ * will be set
+ * @config_flags_clear: Optional Processor specific Config Flags to clear.
+ * Setting a bit here implies the corresponding mode
+ * will be cleared
+ *
+ * Request type is TI_SCI_MSG_PROC_HANDOVER, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_set_config {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+ u32 bootvector_low;
+ u32 bootvector_high;
+ u32 config_flags_set;
+ u32 config_flags_clear;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_ctrl - Set Processor boot control flags
+ * @hdr: Generic Header
+ * @processor_id: ID of processor being configured
+ * @control_flags_set: Optional Processor specific Control Flags to set.
+ * Setting a bit here implies the corresponding mode
+ * will be set
+ * @control_flags_clear:Optional Processor specific Control Flags to clear.
+ * Setting a bit here implies the corresponding mode
+ * will be cleared
+ *
+ * Request type is TI_SCI_MSG_SET_CTRL, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_set_ctrl {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+ u32 control_flags_set;
+ u32 control_flags_clear;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_status - Processor boot status request
+ * @hdr: Generic Header
+ * @processor_id: ID of processor whose status is being requested
+ *
+ * Request type is TI_SCI_MSG_GET_STATUS, response is an appropriate
+ * message, or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_get_status {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_status - Processor boot status response
+ * @hdr: Generic Header
+ * @processor_id: ID of processor whose status is returned
+ * @bootvector_low: Lower 32 bit address (Little Endian) of boot vector
+ * @bootvector_high: Higher 32 bit address (Little Endian) of boot vector
+ * @config_flags: Optional Processor specific Config Flags set currently
+ * @control_flags: Optional Processor specific Control Flags set currently
+ * @status_flags: Optional Processor specific Status Flags set currently
+ *
+ * Response structure to a TI_SCI_MSG_GET_STATUS request.
+ */
+struct ti_sci_msg_resp_get_status {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+ u32 bootvector_low;
+ u32 bootvector_high;
+ u32 config_flags;
+ u32 control_flags;
+ u32 status_flags;
+} __packed;
+
+#endif /* __TI_SCI_H */
diff --git a/drivers/firmware/trusted_foundations.c b/drivers/firmware/trusted_foundations.c
new file mode 100644
index 0000000000..1389fa9418
--- /dev/null
+++ b/drivers/firmware/trusted_foundations.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Trusted Foundations support for ARM CPUs
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+
+#include <linux/firmware/trusted_foundations.h>
+
+#include <asm/firmware.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
+
+#define TF_CACHE_MAINT 0xfffff100
+
+#define TF_CACHE_ENABLE 1
+#define TF_CACHE_DISABLE 2
+#define TF_CACHE_REENABLE 4
+
+#define TF_SET_CPU_BOOT_ADDR_SMC 0xfffff200
+
+#define TF_CPU_PM 0xfffffffc
+#define TF_CPU_PM_S3 0xffffffe3
+#define TF_CPU_PM_S2 0xffffffe6
+#define TF_CPU_PM_S2_NO_MC_CLK 0xffffffe5
+#define TF_CPU_PM_S1 0xffffffe4
+#define TF_CPU_PM_S1_NOFLUSH_L2 0xffffffe7
+
+static unsigned long tf_idle_mode = TF_PM_MODE_NONE;
+static unsigned long cpu_boot_addr;
+
+static void tf_generic_smc(u32 type, u32 arg1, u32 arg2)
+{
+ register u32 r0 asm("r0") = type;
+ register u32 r1 asm("r1") = arg1;
+ register u32 r2 asm("r2") = arg2;
+
+ asm volatile(
+ ".arch_extension sec\n\t"
+ "stmfd sp!, {r4 - r11}\n\t"
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r1")
+ __asmeq("%2", "r2")
+ "mov r3, #0\n\t"
+ "mov r4, #0\n\t"
+ "smc #0\n\t"
+ "ldmfd sp!, {r4 - r11}\n\t"
+ :
+ : "r" (r0), "r" (r1), "r" (r2)
+ : "memory", "r3", "r12", "lr");
+}
+
+static int tf_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
+{
+ cpu_boot_addr = boot_addr;
+ tf_generic_smc(TF_SET_CPU_BOOT_ADDR_SMC, cpu_boot_addr, 0);
+
+ return 0;
+}
+
+static int tf_prepare_idle(unsigned long mode)
+{
+ switch (mode) {
+ case TF_PM_MODE_LP0:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S3, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP1:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP1_NO_MC_CLK:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2_NO_MC_CLK,
+ cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP2:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP2_NOFLUSH_L2:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1_NOFLUSH_L2,
+ cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_NONE:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ tf_idle_mode = mode;
+
+ return 0;
+}
+
+#ifdef CONFIG_CACHE_L2X0
+static void tf_cache_write_sec(unsigned long val, unsigned int reg)
+{
+ u32 enable_op, l2x0_way_mask = 0xff;
+
+ switch (reg) {
+ case L2X0_CTRL:
+ if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_ASSOCIATIVITY_16)
+ l2x0_way_mask = 0xffff;
+
+ switch (tf_idle_mode) {
+ case TF_PM_MODE_LP2:
+ enable_op = TF_CACHE_REENABLE;
+ break;
+
+ default:
+ enable_op = TF_CACHE_ENABLE;
+ break;
+ }
+
+ if (val == L2X0_CTRL_EN)
+ tf_generic_smc(TF_CACHE_MAINT, enable_op,
+ l2x0_saved_regs.aux_ctrl);
+ else
+ tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_DISABLE,
+ l2x0_way_mask);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int tf_init_cache(void)
+{
+ outer_cache.write_sec = tf_cache_write_sec;
+
+ return 0;
+}
+#endif /* CONFIG_CACHE_L2X0 */
+
+static const struct firmware_ops trusted_foundations_ops = {
+ .set_cpu_boot_addr = tf_set_cpu_boot_addr,
+ .prepare_idle = tf_prepare_idle,
+#ifdef CONFIG_CACHE_L2X0
+ .l2x0_init = tf_init_cache,
+#endif
+};
+
+void register_trusted_foundations(struct trusted_foundations_platform_data *pd)
+{
+ /*
+ * we are not using version information for now since currently
+ * supported SMCs are compatible with all TF releases
+ */
+ register_firmware_ops(&trusted_foundations_ops);
+}
+
+void of_register_trusted_foundations(void)
+{
+ struct device_node *node;
+ struct trusted_foundations_platform_data pdata;
+ int err;
+
+ node = of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations");
+ if (!node)
+ return;
+
+ err = of_property_read_u32(node, "tlm,version-major",
+ &pdata.version_major);
+ if (err != 0)
+ panic("Trusted Foundation: missing version-major property\n");
+ err = of_property_read_u32(node, "tlm,version-minor",
+ &pdata.version_minor);
+ if (err != 0)
+ panic("Trusted Foundation: missing version-minor property\n");
+ register_trusted_foundations(&pdata);
+}
+
+bool trusted_foundations_registered(void)
+{
+ return firmware_ops == &trusted_foundations_ops;
+}
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
new file mode 100644
index 0000000000..2de0fb139c
--- /dev/null
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -0,0 +1,589 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Turris Mox rWTM firmware driver
+ *
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
+ */
+
+#include <linux/armada-37xx-rwtm-mailbox.h>
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/hw_random.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define DRIVER_NAME "turris-mox-rwtm"
+
+/*
+ * The macros and constants below come from Turris Mox's rWTM firmware code.
+ * This firmware is open source and it's sources can be found at
+ * https://gitlab.labs.nic.cz/turris/mox-boot-builder/tree/master/wtmi.
+ */
+
+#define MBOX_STS_SUCCESS (0 << 30)
+#define MBOX_STS_FAIL (1 << 30)
+#define MBOX_STS_BADCMD (2 << 30)
+#define MBOX_STS_ERROR(s) ((s) & (3 << 30))
+#define MBOX_STS_VALUE(s) (((s) >> 10) & 0xfffff)
+#define MBOX_STS_CMD(s) ((s) & 0x3ff)
+
+enum mbox_cmd {
+ MBOX_CMD_GET_RANDOM = 1,
+ MBOX_CMD_BOARD_INFO = 2,
+ MBOX_CMD_ECDSA_PUB_KEY = 3,
+ MBOX_CMD_HASH = 4,
+ MBOX_CMD_SIGN = 5,
+ MBOX_CMD_VERIFY = 6,
+
+ MBOX_CMD_OTP_READ = 7,
+ MBOX_CMD_OTP_WRITE = 8,
+};
+
+struct mox_kobject;
+
+struct mox_rwtm {
+ struct device *dev;
+ struct mbox_client mbox_client;
+ struct mbox_chan *mbox;
+ struct mox_kobject *kobj;
+ struct hwrng hwrng;
+
+ struct armada_37xx_rwtm_rx_msg reply;
+
+ void *buf;
+ dma_addr_t buf_phys;
+
+ struct mutex busy;
+ struct completion cmd_done;
+
+ /* board information */
+ int has_board_info;
+ u64 serial_number;
+ int board_version, ram_size;
+ u8 mac_address1[6], mac_address2[6];
+
+ /* public key burned in eFuse */
+ int has_pubkey;
+ u8 pubkey[135];
+
+#ifdef CONFIG_DEBUG_FS
+ /*
+ * Signature process. This is currently done via debugfs, because it
+ * does not conform to the sysfs standard "one file per attribute".
+ * It should be rewritten via crypto API once akcipher API is available
+ * from userspace.
+ */
+ struct dentry *debugfs_root;
+ u32 last_sig[34];
+ int last_sig_done;
+#endif
+};
+
+struct mox_kobject {
+ struct kobject kobj;
+ struct mox_rwtm *rwtm;
+};
+
+static inline struct kobject *rwtm_to_kobj(struct mox_rwtm *rwtm)
+{
+ return &rwtm->kobj->kobj;
+}
+
+static inline struct mox_rwtm *to_rwtm(struct kobject *kobj)
+{
+ return container_of(kobj, struct mox_kobject, kobj)->rwtm;
+}
+
+static void mox_kobj_release(struct kobject *kobj)
+{
+ kfree(to_rwtm(kobj)->kobj);
+}
+
+static const struct kobj_type mox_kobj_ktype = {
+ .release = mox_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static int mox_kobj_create(struct mox_rwtm *rwtm)
+{
+ rwtm->kobj = kzalloc(sizeof(*rwtm->kobj), GFP_KERNEL);
+ if (!rwtm->kobj)
+ return -ENOMEM;
+
+ kobject_init(rwtm_to_kobj(rwtm), &mox_kobj_ktype);
+ if (kobject_add(rwtm_to_kobj(rwtm), firmware_kobj, "turris-mox-rwtm")) {
+ kobject_put(rwtm_to_kobj(rwtm));
+ return -ENXIO;
+ }
+
+ rwtm->kobj->rwtm = rwtm;
+
+ return 0;
+}
+
+#define MOX_ATTR_RO(name, format, cat) \
+static ssize_t \
+name##_show(struct kobject *kobj, struct kobj_attribute *a, \
+ char *buf) \
+{ \
+ struct mox_rwtm *rwtm = to_rwtm(kobj); \
+ if (!rwtm->has_##cat) \
+ return -ENODATA; \
+ return sprintf(buf, format, rwtm->name); \
+} \
+static struct kobj_attribute mox_attr_##name = __ATTR_RO(name)
+
+MOX_ATTR_RO(serial_number, "%016llX\n", board_info);
+MOX_ATTR_RO(board_version, "%i\n", board_info);
+MOX_ATTR_RO(ram_size, "%i\n", board_info);
+MOX_ATTR_RO(mac_address1, "%pM\n", board_info);
+MOX_ATTR_RO(mac_address2, "%pM\n", board_info);
+MOX_ATTR_RO(pubkey, "%s\n", pubkey);
+
+static int mox_get_status(enum mbox_cmd cmd, u32 retval)
+{
+ if (MBOX_STS_CMD(retval) != cmd)
+ return -EIO;
+ else if (MBOX_STS_ERROR(retval) == MBOX_STS_FAIL)
+ return -(int)MBOX_STS_VALUE(retval);
+ else if (MBOX_STS_ERROR(retval) == MBOX_STS_BADCMD)
+ return -ENOSYS;
+ else if (MBOX_STS_ERROR(retval) != MBOX_STS_SUCCESS)
+ return -EIO;
+ else
+ return MBOX_STS_VALUE(retval);
+}
+
+static const struct attribute *mox_rwtm_attrs[] = {
+ &mox_attr_serial_number.attr,
+ &mox_attr_board_version.attr,
+ &mox_attr_ram_size.attr,
+ &mox_attr_mac_address1.attr,
+ &mox_attr_mac_address2.attr,
+ &mox_attr_pubkey.attr,
+ NULL
+};
+
+static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data)
+{
+ struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev);
+ struct armada_37xx_rwtm_rx_msg *msg = data;
+
+ rwtm->reply = *msg;
+ complete(&rwtm->cmd_done);
+}
+
+static void reply_to_mac_addr(u8 *mac, u32 t1, u32 t2)
+{
+ mac[0] = t1 >> 8;
+ mac[1] = t1;
+ mac[2] = t2 >> 24;
+ mac[3] = t2 >> 16;
+ mac[4] = t2 >> 8;
+ mac[5] = t2;
+}
+
+static int mox_get_board_info(struct mox_rwtm *rwtm)
+{
+ struct armada_37xx_rwtm_tx_msg msg;
+ struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
+ int ret;
+
+ msg.command = MBOX_CMD_BOARD_INFO;
+ ret = mbox_send_message(rwtm->mbox, &msg);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+ if (ret < 0)
+ return ret;
+
+ ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval);
+ if (ret == -ENODATA) {
+ dev_warn(rwtm->dev,
+ "Board does not have manufacturing information burned!\n");
+ } else if (ret == -ENOSYS) {
+ dev_notice(rwtm->dev,
+ "Firmware does not support the BOARD_INFO command\n");
+ } else if (ret < 0) {
+ return ret;
+ } else {
+ rwtm->serial_number = reply->status[1];
+ rwtm->serial_number <<= 32;
+ rwtm->serial_number |= reply->status[0];
+ rwtm->board_version = reply->status[2];
+ rwtm->ram_size = reply->status[3];
+ reply_to_mac_addr(rwtm->mac_address1, reply->status[4],
+ reply->status[5]);
+ reply_to_mac_addr(rwtm->mac_address2, reply->status[6],
+ reply->status[7]);
+ rwtm->has_board_info = 1;
+
+ pr_info("Turris Mox serial number %016llX\n",
+ rwtm->serial_number);
+ pr_info(" board version %i\n", rwtm->board_version);
+ pr_info(" burned RAM size %i MiB\n", rwtm->ram_size);
+ }
+
+ msg.command = MBOX_CMD_ECDSA_PUB_KEY;
+ ret = mbox_send_message(rwtm->mbox, &msg);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+ if (ret < 0)
+ return ret;
+
+ ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval);
+ if (ret == -ENODATA) {
+ dev_warn(rwtm->dev, "Board has no public key burned!\n");
+ } else if (ret == -ENOSYS) {
+ dev_notice(rwtm->dev,
+ "Firmware does not support the ECDSA_PUB_KEY command\n");
+ } else if (ret < 0) {
+ return ret;
+ } else {
+ u32 *s = reply->status;
+
+ rwtm->has_pubkey = 1;
+ sprintf(rwtm->pubkey,
+ "%06x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x",
+ ret, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7],
+ s[8], s[9], s[10], s[11], s[12], s[13], s[14], s[15]);
+ }
+
+ return 0;
+}
+
+static int check_get_random_support(struct mox_rwtm *rwtm)
+{
+ struct armada_37xx_rwtm_tx_msg msg;
+ int ret;
+
+ msg.command = MBOX_CMD_GET_RANDOM;
+ msg.args[0] = 1;
+ msg.args[1] = rwtm->buf_phys;
+ msg.args[2] = 4;
+
+ ret = mbox_send_message(rwtm->mbox, &msg);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+ if (ret < 0)
+ return ret;
+
+ return mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval);
+}
+
+static int mox_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct mox_rwtm *rwtm = (struct mox_rwtm *) rng->priv;
+ struct armada_37xx_rwtm_tx_msg msg;
+ int ret;
+
+ if (max > 4096)
+ max = 4096;
+
+ msg.command = MBOX_CMD_GET_RANDOM;
+ msg.args[0] = 1;
+ msg.args[1] = rwtm->buf_phys;
+ msg.args[2] = (max + 3) & ~3;
+
+ if (!wait) {
+ if (!mutex_trylock(&rwtm->busy))
+ return -EBUSY;
+ } else {
+ mutex_lock(&rwtm->busy);
+ }
+
+ ret = mbox_send_message(rwtm->mbox, &msg);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ ret = wait_for_completion_interruptible(&rwtm->cmd_done);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ ret = mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ memcpy(data, rwtm->buf, max);
+ ret = max;
+
+unlock_mutex:
+ mutex_unlock(&rwtm->busy);
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int rwtm_debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t do_sign_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct mox_rwtm *rwtm = file->private_data;
+ ssize_t ret;
+
+ /* only allow one read, of 136 bytes, from position 0 */
+ if (*ppos != 0)
+ return 0;
+
+ if (len < 136)
+ return -EINVAL;
+
+ if (!rwtm->last_sig_done)
+ return -ENODATA;
+
+ /* 2 arrays of 17 32-bit words are 136 bytes */
+ ret = simple_read_from_buffer(buf, len, ppos, rwtm->last_sig, 136);
+ rwtm->last_sig_done = 0;
+
+ return ret;
+}
+
+static ssize_t do_sign_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct mox_rwtm *rwtm = file->private_data;
+ struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
+ struct armada_37xx_rwtm_tx_msg msg;
+ loff_t dummy = 0;
+ ssize_t ret;
+
+ /* the input is a SHA-512 hash, so exactly 64 bytes have to be read */
+ if (len != 64)
+ return -EINVAL;
+
+ /* if last result is not zero user has not read that information yet */
+ if (rwtm->last_sig_done)
+ return -EBUSY;
+
+ if (!mutex_trylock(&rwtm->busy))
+ return -EBUSY;
+
+ /*
+ * Here we have to send:
+ * 1. Address of the input to sign.
+ * The input is an array of 17 32-bit words, the first (most
+ * significat) is 0, the rest 16 words are copied from the SHA-512
+ * hash given by the user and converted from BE to LE.
+ * 2. Address of the buffer where ECDSA signature value R shall be
+ * stored by the rWTM firmware.
+ * 3. Address of the buffer where ECDSA signature value S shall be
+ * stored by the rWTM firmware.
+ */
+ memset(rwtm->buf, 0, 4);
+ ret = simple_write_to_buffer(rwtm->buf + 4, 64, &dummy, buf, len);
+ if (ret < 0)
+ goto unlock_mutex;
+ be32_to_cpu_array(rwtm->buf, rwtm->buf, 17);
+
+ msg.command = MBOX_CMD_SIGN;
+ msg.args[0] = 1;
+ msg.args[1] = rwtm->buf_phys;
+ msg.args[2] = rwtm->buf_phys + 68;
+ msg.args[3] = rwtm->buf_phys + 2 * 68;
+ ret = mbox_send_message(rwtm->mbox, &msg);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ ret = wait_for_completion_interruptible(&rwtm->cmd_done);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ ret = MBOX_STS_VALUE(reply->retval);
+ if (MBOX_STS_ERROR(reply->retval) != MBOX_STS_SUCCESS)
+ goto unlock_mutex;
+
+ /*
+ * Here we read the R and S values of the ECDSA signature
+ * computed by the rWTM firmware and convert their words from
+ * LE to BE.
+ */
+ memcpy(rwtm->last_sig, rwtm->buf + 68, 136);
+ cpu_to_be32_array(rwtm->last_sig, rwtm->last_sig, 34);
+ rwtm->last_sig_done = 1;
+
+ mutex_unlock(&rwtm->busy);
+ return len;
+unlock_mutex:
+ mutex_unlock(&rwtm->busy);
+ return ret;
+}
+
+static const struct file_operations do_sign_fops = {
+ .owner = THIS_MODULE,
+ .open = rwtm_debug_open,
+ .read = do_sign_read,
+ .write = do_sign_write,
+ .llseek = no_llseek,
+};
+
+static int rwtm_register_debugfs(struct mox_rwtm *rwtm)
+{
+ struct dentry *root, *entry;
+
+ root = debugfs_create_dir("turris-mox-rwtm", NULL);
+
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ entry = debugfs_create_file_unsafe("do_sign", 0600, root, rwtm,
+ &do_sign_fops);
+ if (IS_ERR(entry))
+ goto err_remove;
+
+ rwtm->debugfs_root = root;
+
+ return 0;
+err_remove:
+ debugfs_remove_recursive(root);
+ return PTR_ERR(entry);
+}
+
+static void rwtm_unregister_debugfs(struct mox_rwtm *rwtm)
+{
+ debugfs_remove_recursive(rwtm->debugfs_root);
+}
+#else
+static inline int rwtm_register_debugfs(struct mox_rwtm *rwtm)
+{
+ return 0;
+}
+
+static inline void rwtm_unregister_debugfs(struct mox_rwtm *rwtm)
+{
+}
+#endif
+
+static int turris_mox_rwtm_probe(struct platform_device *pdev)
+{
+ struct mox_rwtm *rwtm;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ rwtm = devm_kzalloc(dev, sizeof(*rwtm), GFP_KERNEL);
+ if (!rwtm)
+ return -ENOMEM;
+
+ rwtm->dev = dev;
+ rwtm->buf = dmam_alloc_coherent(dev, PAGE_SIZE, &rwtm->buf_phys,
+ GFP_KERNEL);
+ if (!rwtm->buf)
+ return -ENOMEM;
+
+ ret = mox_kobj_create(rwtm);
+ if (ret < 0) {
+ dev_err(dev, "Cannot create turris-mox-rwtm kobject!\n");
+ return ret;
+ }
+
+ ret = sysfs_create_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
+ if (ret < 0) {
+ dev_err(dev, "Cannot create sysfs files!\n");
+ goto put_kobj;
+ }
+
+ platform_set_drvdata(pdev, rwtm);
+
+ mutex_init(&rwtm->busy);
+
+ rwtm->mbox_client.dev = dev;
+ rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback;
+
+ rwtm->mbox = mbox_request_channel(&rwtm->mbox_client, 0);
+ if (IS_ERR(rwtm->mbox)) {
+ ret = PTR_ERR(rwtm->mbox);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Cannot request mailbox channel: %i\n",
+ ret);
+ goto remove_files;
+ }
+
+ init_completion(&rwtm->cmd_done);
+
+ ret = mox_get_board_info(rwtm);
+ if (ret < 0)
+ dev_warn(dev, "Cannot read board information: %i\n", ret);
+
+ ret = check_get_random_support(rwtm);
+ if (ret < 0) {
+ dev_notice(dev,
+ "Firmware does not support the GET_RANDOM command\n");
+ goto free_channel;
+ }
+
+ rwtm->hwrng.name = DRIVER_NAME "_hwrng";
+ rwtm->hwrng.read = mox_hwrng_read;
+ rwtm->hwrng.priv = (unsigned long) rwtm;
+
+ ret = devm_hwrng_register(dev, &rwtm->hwrng);
+ if (ret < 0) {
+ dev_err(dev, "Cannot register HWRNG: %i\n", ret);
+ goto free_channel;
+ }
+
+ ret = rwtm_register_debugfs(rwtm);
+ if (ret < 0) {
+ dev_err(dev, "Failed creating debugfs entries: %i\n", ret);
+ goto free_channel;
+ }
+
+ dev_info(dev, "HWRNG successfully registered\n");
+
+ return 0;
+
+free_channel:
+ mbox_free_channel(rwtm->mbox);
+remove_files:
+ sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
+put_kobj:
+ kobject_put(rwtm_to_kobj(rwtm));
+ return ret;
+}
+
+static int turris_mox_rwtm_remove(struct platform_device *pdev)
+{
+ struct mox_rwtm *rwtm = platform_get_drvdata(pdev);
+
+ rwtm_unregister_debugfs(rwtm);
+ sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
+ kobject_put(rwtm_to_kobj(rwtm));
+ mbox_free_channel(rwtm->mbox);
+
+ return 0;
+}
+
+static const struct of_device_id turris_mox_rwtm_match[] = {
+ { .compatible = "cznic,turris-mox-rwtm", },
+ { .compatible = "marvell,armada-3700-rwtm-firmware", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, turris_mox_rwtm_match);
+
+static struct platform_driver turris_mox_rwtm_driver = {
+ .probe = turris_mox_rwtm_probe,
+ .remove = turris_mox_rwtm_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = turris_mox_rwtm_match,
+ },
+};
+module_platform_driver(turris_mox_rwtm_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Turris Mox rWTM firmware driver");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
diff --git a/drivers/firmware/xilinx/Kconfig b/drivers/firmware/xilinx/Kconfig
new file mode 100644
index 0000000000..9a9bd19088
--- /dev/null
+++ b/drivers/firmware/xilinx/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0
+# Kconfig for Xilinx firmwares
+
+menu "Zynq MPSoC Firmware Drivers"
+ depends on ARCH_ZYNQMP
+
+config ZYNQMP_FIRMWARE
+ bool "Enable Xilinx Zynq MPSoC firmware interface"
+ depends on ARCH_ZYNQMP
+ default y if ARCH_ZYNQMP
+ select MFD_CORE
+ help
+ Firmware interface driver is used by different
+ drivers to communicate with the firmware for
+ various platform management services.
+ Say yes to enable ZynqMP firmware interface driver.
+ If in doubt, say N.
+
+config ZYNQMP_FIRMWARE_DEBUG
+ bool "Enable Xilinx Zynq MPSoC firmware debug APIs"
+ depends on ZYNQMP_FIRMWARE && DEBUG_FS
+ help
+ Say yes to enable ZynqMP firmware interface debug APIs.
+ If in doubt, say N.
+
+endmenu
diff --git a/drivers/firmware/xilinx/Makefile b/drivers/firmware/xilinx/Makefile
new file mode 100644
index 0000000000..875a53703c
--- /dev/null
+++ b/drivers/firmware/xilinx/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for Xilinx firmwares
+
+obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o
+obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
new file mode 100644
index 0000000000..8528850af8
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Zynq MPSoC Firmware layer for debugfs APIs
+ *
+ * Copyright (C) 2014-2018 Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@amd.com>
+ * Davorin Mista <davorin.mista@aggios.com>
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajanv@xilinx.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+#include "zynqmp-debug.h"
+
+#define PM_API_NAME_LEN 50
+
+struct pm_api_info {
+ u32 api_id;
+ char api_name[PM_API_NAME_LEN];
+ char api_name_len;
+};
+
+static char debugfs_buf[PAGE_SIZE];
+
+#define PM_API(id) {id, #id, strlen(#id)}
+static struct pm_api_info pm_api_list[] = {
+ PM_API(PM_GET_API_VERSION),
+ PM_API(PM_QUERY_DATA),
+};
+
+static struct dentry *firmware_debugfs_root;
+
+/**
+ * zynqmp_pm_argument_value() - Extract argument value from a PM-API request
+ * @arg: Entered PM-API argument in string format
+ *
+ * Return: Argument value in unsigned integer format on success
+ * 0 otherwise
+ */
+static u64 zynqmp_pm_argument_value(char *arg)
+{
+ u64 value;
+
+ if (!arg)
+ return 0;
+
+ if (!kstrtou64(arg, 0, &value))
+ return value;
+
+ return 0;
+}
+
+/**
+ * get_pm_api_id() - Extract API-ID from a PM-API request
+ * @pm_api_req: Entered PM-API argument in string format
+ * @pm_id: API-ID
+ *
+ * Return: 0 on success else error code
+ */
+static int get_pm_api_id(char *pm_api_req, u32 *pm_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pm_api_list) ; i++) {
+ if (!strncasecmp(pm_api_req, pm_api_list[i].api_name,
+ pm_api_list[i].api_name_len)) {
+ *pm_id = pm_api_list[i].api_id;
+ break;
+ }
+ }
+
+ /* If no name was entered look for PM-API ID instead */
+ if (i == ARRAY_SIZE(pm_api_list) && kstrtouint(pm_api_req, 10, pm_id))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
+{
+ u32 pm_api_version;
+ int ret;
+ struct zynqmp_pm_query_data qdata = {0};
+
+ switch (pm_id) {
+ case PM_GET_API_VERSION:
+ ret = zynqmp_pm_get_api_version(&pm_api_version);
+ sprintf(debugfs_buf, "PM-API Version = %d.%d\n",
+ pm_api_version >> 16, pm_api_version & 0xffff);
+ break;
+ case PM_QUERY_DATA:
+ qdata.qid = pm_api_arg[0];
+ qdata.arg1 = pm_api_arg[1];
+ qdata.arg2 = pm_api_arg[2];
+ qdata.arg3 = pm_api_arg[3];
+
+ ret = zynqmp_pm_query_data(qdata, pm_api_ret);
+ if (ret)
+ break;
+
+ switch (qdata.qid) {
+ case PM_QID_CLOCK_GET_NAME:
+ sprintf(debugfs_buf, "Clock name = %s\n",
+ (char *)pm_api_ret);
+ break;
+ case PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS:
+ sprintf(debugfs_buf, "Multiplier = %d, Divider = %d\n",
+ pm_api_ret[1], pm_api_ret[2]);
+ break;
+ default:
+ sprintf(debugfs_buf,
+ "data[0] = 0x%08x\ndata[1] = 0x%08x\n data[2] = 0x%08x\ndata[3] = 0x%08x\n",
+ pm_api_ret[0], pm_api_ret[1],
+ pm_api_ret[2], pm_api_ret[3]);
+ }
+ break;
+ default:
+ sprintf(debugfs_buf, "Unsupported PM-API request\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_debugfs_api_write() - debugfs write function
+ * @file: User file
+ * @ptr: User entered PM-API string
+ * @len: Length of the userspace buffer
+ * @off: Offset within the file
+ *
+ * Used for triggering pm api functions by writing
+ * echo <pm_api_id> > /sys/kernel/debug/zynqmp_pm/power or
+ * echo <pm_api_name> > /sys/kernel/debug/zynqmp_pm/power
+ *
+ * Return: Number of bytes copied if PM-API request succeeds,
+ * the corresponding error code otherwise
+ */
+static ssize_t zynqmp_pm_debugfs_api_write(struct file *file,
+ const char __user *ptr, size_t len,
+ loff_t *off)
+{
+ char *kern_buff, *tmp_buff;
+ char *pm_api_req;
+ u32 pm_id = 0;
+ u64 pm_api_arg[4] = {0, 0, 0, 0};
+ /* Return values from PM APIs calls */
+ u32 pm_api_ret[4] = {0, 0, 0, 0};
+
+ int ret;
+ int i = 0;
+
+ strcpy(debugfs_buf, "");
+
+ if (*off != 0 || len <= 1 || len > PAGE_SIZE - 1)
+ return -EINVAL;
+
+ kern_buff = memdup_user_nul(ptr, len);
+ if (IS_ERR(kern_buff))
+ return PTR_ERR(kern_buff);
+ tmp_buff = kern_buff;
+
+ /* Read the API name from a user request */
+ pm_api_req = strsep(&kern_buff, " ");
+
+ ret = get_pm_api_id(pm_api_req, &pm_id);
+ if (ret < 0)
+ goto err;
+
+ /* Read node_id and arguments from the PM-API request */
+ pm_api_req = strsep(&kern_buff, " ");
+ while ((i < ARRAY_SIZE(pm_api_arg)) && pm_api_req) {
+ pm_api_arg[i++] = zynqmp_pm_argument_value(pm_api_req);
+ pm_api_req = strsep(&kern_buff, " ");
+ }
+
+ ret = process_api_request(pm_id, pm_api_arg, pm_api_ret);
+
+err:
+ kfree(tmp_buff);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+/**
+ * zynqmp_pm_debugfs_api_read() - debugfs read function
+ * @file: User file
+ * @ptr: Requested pm_api_version string
+ * @len: Length of the userspace buffer
+ * @off: Offset within the file
+ *
+ * Return: Length of the version string on success
+ * else error code
+ */
+static ssize_t zynqmp_pm_debugfs_api_read(struct file *file, char __user *ptr,
+ size_t len, loff_t *off)
+{
+ return simple_read_from_buffer(ptr, len, off, debugfs_buf,
+ strlen(debugfs_buf));
+}
+
+/* Setup debugfs fops */
+static const struct file_operations fops_zynqmp_pm_dbgfs = {
+ .owner = THIS_MODULE,
+ .write = zynqmp_pm_debugfs_api_write,
+ .read = zynqmp_pm_debugfs_api_read,
+};
+
+/**
+ * zynqmp_pm_api_debugfs_init - Initialize debugfs interface
+ *
+ * Return: None
+ */
+void zynqmp_pm_api_debugfs_init(void)
+{
+ /* Initialize debugfs interface */
+ firmware_debugfs_root = debugfs_create_dir("zynqmp-firmware", NULL);
+ debugfs_create_file("pm", 0660, firmware_debugfs_root, NULL,
+ &fops_zynqmp_pm_dbgfs);
+}
+
+/**
+ * zynqmp_pm_api_debugfs_exit - Remove debugfs interface
+ *
+ * Return: None
+ */
+void zynqmp_pm_api_debugfs_exit(void)
+{
+ debugfs_remove_recursive(firmware_debugfs_root);
+}
diff --git a/drivers/firmware/xilinx/zynqmp-debug.h b/drivers/firmware/xilinx/zynqmp-debug.h
new file mode 100644
index 0000000000..e1515a93e9
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp-debug.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Zynq MPSoC Firmware layer
+ *
+ * Copyright (C) 2014-2018 Xilinx
+ *
+ * Michal Simek <michal.simek@amd.com>
+ * Davorin Mista <davorin.mista@aggios.com>
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajanv@xilinx.com>
+ */
+
+#ifndef __FIRMWARE_ZYNQMP_DEBUG_H__
+#define __FIRMWARE_ZYNQMP_DEBUG_H__
+
+#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE_DEBUG)
+void zynqmp_pm_api_debugfs_init(void);
+void zynqmp_pm_api_debugfs_exit(void);
+#else
+static inline void zynqmp_pm_api_debugfs_init(void) { }
+static inline void zynqmp_pm_api_debugfs_exit(void) { }
+#endif
+
+#endif /* __FIRMWARE_ZYNQMP_DEBUG_H__ */
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
new file mode 100644
index 0000000000..4cc1ac7f76
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -0,0 +1,2042 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Zynq MPSoC Firmware layer
+ *
+ * Copyright (C) 2014-2022 Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@amd.com>
+ * Davorin Mista <davorin.mista@aggios.com>
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajanv@xilinx.com>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/hashtable.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/firmware/xlnx-event-manager.h>
+#include "zynqmp-debug.h"
+
+/* Max HashMap Order for PM API feature check (1<<7 = 128) */
+#define PM_API_FEATURE_CHECK_MAX_ORDER 7
+
+/* CRL registers and bitfields */
+#define CRL_APB_BASE 0xFF5E0000U
+/* BOOT_PIN_CTRL- Used to control the mode pins after boot */
+#define CRL_APB_BOOT_PIN_CTRL (CRL_APB_BASE + (0x250U))
+/* BOOT_PIN_CTRL_MASK- out_val[11:8], out_en[3:0] */
+#define CRL_APB_BOOTPIN_CTRL_MASK 0xF0FU
+
+/* IOCTL/QUERY feature payload size */
+#define FEATURE_PAYLOAD_SIZE 2
+
+/* Firmware feature check version mask */
+#define FIRMWARE_VERSION_MASK GENMASK(15, 0)
+
+static bool feature_check_enabled;
+static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
+static u32 ioctl_features[FEATURE_PAYLOAD_SIZE];
+static u32 query_features[FEATURE_PAYLOAD_SIZE];
+
+static struct platform_device *em_dev;
+
+/**
+ * struct zynqmp_devinfo - Structure for Zynqmp device instance
+ * @dev: Device Pointer
+ * @feature_conf_id: Feature conf id
+ */
+struct zynqmp_devinfo {
+ struct device *dev;
+ u32 feature_conf_id;
+};
+
+/**
+ * struct pm_api_feature_data - PM API Feature data
+ * @pm_api_id: PM API Id, used as key to index into hashmap
+ * @feature_status: status of PM API feature: valid, invalid
+ * @hentry: hlist_node that hooks this entry into hashtable
+ */
+struct pm_api_feature_data {
+ u32 pm_api_id;
+ int feature_status;
+ struct hlist_node hentry;
+};
+
+static const struct mfd_cell firmware_devs[] = {
+ {
+ .name = "zynqmp_power_controller",
+ },
+};
+
+/**
+ * zynqmp_pm_ret_code() - Convert PMU-FW error codes to Linux error codes
+ * @ret_status: PMUFW return code
+ *
+ * Return: corresponding Linux error code
+ */
+static int zynqmp_pm_ret_code(u32 ret_status)
+{
+ switch (ret_status) {
+ case XST_PM_SUCCESS:
+ case XST_PM_DOUBLE_REQ:
+ return 0;
+ case XST_PM_NO_FEATURE:
+ return -ENOTSUPP;
+ case XST_PM_NO_ACCESS:
+ return -EACCES;
+ case XST_PM_ABORT_SUSPEND:
+ return -ECANCELED;
+ case XST_PM_MULT_USER:
+ return -EUSERS;
+ case XST_PM_INTERNAL:
+ case XST_PM_CONFLICT:
+ case XST_PM_INVALID_NODE:
+ default:
+ return -EINVAL;
+ }
+}
+
+static noinline int do_fw_call_fail(u64 arg0, u64 arg1, u64 arg2,
+ u32 *ret_payload)
+{
+ return -ENODEV;
+}
+
+/*
+ * PM function call wrapper
+ * Invoke do_fw_call_smc or do_fw_call_hvc, depending on the configuration
+ */
+static int (*do_fw_call)(u64, u64, u64, u32 *ret_payload) = do_fw_call_fail;
+
+/**
+ * do_fw_call_smc() - Call system-level platform management layer (SMC)
+ * @arg0: Argument 0 to SMC call
+ * @arg1: Argument 1 to SMC call
+ * @arg2: Argument 2 to SMC call
+ * @ret_payload: Returned value array
+ *
+ * Invoke platform management function via SMC call (no hypervisor present).
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static noinline int do_fw_call_smc(u64 arg0, u64 arg1, u64 arg2,
+ u32 *ret_payload)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(arg0, arg1, arg2, 0, 0, 0, 0, 0, &res);
+
+ if (ret_payload) {
+ ret_payload[0] = lower_32_bits(res.a0);
+ ret_payload[1] = upper_32_bits(res.a0);
+ ret_payload[2] = lower_32_bits(res.a1);
+ ret_payload[3] = upper_32_bits(res.a1);
+ }
+
+ return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
+}
+
+/**
+ * do_fw_call_hvc() - Call system-level platform management layer (HVC)
+ * @arg0: Argument 0 to HVC call
+ * @arg1: Argument 1 to HVC call
+ * @arg2: Argument 2 to HVC call
+ * @ret_payload: Returned value array
+ *
+ * Invoke platform management function via HVC
+ * HVC-based for communication through hypervisor
+ * (no direct communication with ATF).
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2,
+ u32 *ret_payload)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_hvc(arg0, arg1, arg2, 0, 0, 0, 0, 0, &res);
+
+ if (ret_payload) {
+ ret_payload[0] = lower_32_bits(res.a0);
+ ret_payload[1] = upper_32_bits(res.a0);
+ ret_payload[2] = lower_32_bits(res.a1);
+ ret_payload[3] = upper_32_bits(res.a1);
+ }
+
+ return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
+}
+
+static int __do_feature_check_call(const u32 api_id, u32 *ret_payload)
+{
+ int ret;
+ u64 smc_arg[2];
+
+ smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
+ smc_arg[1] = api_id;
+
+ ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
+ if (ret)
+ ret = -EOPNOTSUPP;
+ else
+ ret = ret_payload[1];
+
+ return ret;
+}
+
+static int do_feature_check_call(const u32 api_id)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ struct pm_api_feature_data *feature_data;
+
+ /* Check for existing entry in hash table for given api */
+ hash_for_each_possible(pm_api_features_map, feature_data, hentry,
+ api_id) {
+ if (feature_data->pm_api_id == api_id)
+ return feature_data->feature_status;
+ }
+
+ /* Add new entry if not present */
+ feature_data = kmalloc(sizeof(*feature_data), GFP_ATOMIC);
+ if (!feature_data)
+ return -ENOMEM;
+
+ feature_data->pm_api_id = api_id;
+ ret = __do_feature_check_call(api_id, ret_payload);
+
+ feature_data->feature_status = ret;
+ hash_add(pm_api_features_map, &feature_data->hentry, api_id);
+
+ if (api_id == PM_IOCTL)
+ /* Store supported IOCTL IDs mask */
+ memcpy(ioctl_features, &ret_payload[2], FEATURE_PAYLOAD_SIZE * 4);
+ else if (api_id == PM_QUERY_DATA)
+ /* Store supported QUERY IDs mask */
+ memcpy(query_features, &ret_payload[2], FEATURE_PAYLOAD_SIZE * 4);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_feature);
+
+/**
+ * zynqmp_pm_feature() - Check whether given feature is supported or not and
+ * store supported IOCTL/QUERY ID mask
+ * @api_id: API ID to check
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_feature(const u32 api_id)
+{
+ int ret;
+
+ if (!feature_check_enabled)
+ return 0;
+
+ ret = do_feature_check_call(api_id);
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_is_function_supported() - Check whether given IOCTL/QUERY function
+ * is supported or not
+ * @api_id: PM_IOCTL or PM_QUERY_DATA
+ * @id: IOCTL or QUERY function IDs
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
+{
+ int ret;
+ u32 *bit_mask;
+
+ /* Input arguments validation */
+ if (id >= 64 || (api_id != PM_IOCTL && api_id != PM_QUERY_DATA))
+ return -EINVAL;
+
+ /* Check feature check API version */
+ ret = do_feature_check_call(PM_FEATURE_CHECK);
+ if (ret < 0)
+ return ret;
+
+ /* Check if feature check version 2 is supported or not */
+ if ((ret & FIRMWARE_VERSION_MASK) == PM_API_VERSION_2) {
+ /*
+ * Call feature check for IOCTL/QUERY API to get IOCTL ID or
+ * QUERY ID feature status.
+ */
+ ret = do_feature_check_call(api_id);
+ if (ret < 0)
+ return ret;
+
+ bit_mask = (api_id == PM_IOCTL) ? ioctl_features : query_features;
+
+ if ((bit_mask[(id / 32)] & BIT((id % 32))) == 0U)
+ return -EOPNOTSUPP;
+ } else {
+ return -ENODATA;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported);
+
+/**
+ * zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
+ * caller function depending on the configuration
+ * @pm_api_id: Requested PM-API call
+ * @arg0: Argument 0 to requested PM-API call
+ * @arg1: Argument 1 to requested PM-API call
+ * @arg2: Argument 2 to requested PM-API call
+ * @arg3: Argument 3 to requested PM-API call
+ * @ret_payload: Returned value array
+ *
+ * Invoke platform management function for SMC or HVC call, depending on
+ * configuration.
+ * Following SMC Calling Convention (SMCCC) for SMC64:
+ * Pm Function Identifier,
+ * PM_SIP_SVC + PM_API_ID =
+ * ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT)
+ * ((SMC_64) << FUNCID_CC_SHIFT)
+ * ((SIP_START) << FUNCID_OEN_SHIFT)
+ * ((PM_API_ID) & FUNCID_NUM_MASK))
+ *
+ * PM_SIP_SVC - Registered ZynqMP SIP Service Call.
+ * PM_API_ID - Platform Management API ID.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
+ u32 arg2, u32 arg3, u32 *ret_payload)
+{
+ /*
+ * Added SIP service call Function Identifier
+ * Make sure to stay in x0 register
+ */
+ u64 smc_arg[4];
+ int ret;
+
+ /* Check if feature is supported or not */
+ ret = zynqmp_pm_feature(pm_api_id);
+ if (ret < 0)
+ return ret;
+
+ smc_arg[0] = PM_SIP_SVC | pm_api_id;
+ smc_arg[1] = ((u64)arg1 << 32) | arg0;
+ smc_arg[2] = ((u64)arg3 << 32) | arg2;
+
+ return do_fw_call(smc_arg[0], smc_arg[1], smc_arg[2], ret_payload);
+}
+
+static u32 pm_api_version;
+static u32 pm_tz_version;
+static u32 pm_family_code;
+static u32 pm_sub_family_code;
+
+int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset)
+{
+ int ret;
+
+ ret = zynqmp_pm_invoke_fn(TF_A_PM_REGISTER_SGI, sgi_num, reset, 0, 0,
+ NULL);
+ if (!ret)
+ return ret;
+
+ /* try old implementation as fallback strategy if above fails */
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num,
+ reset, NULL);
+}
+
+/**
+ * zynqmp_pm_get_api_version() - Get version number of PMU PM firmware
+ * @version: Returned version value
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_get_api_version(u32 *version)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!version)
+ return -EINVAL;
+
+ /* Check is PM API version already verified */
+ if (pm_api_version > 0) {
+ *version = pm_api_version;
+ return 0;
+ }
+ ret = zynqmp_pm_invoke_fn(PM_GET_API_VERSION, 0, 0, 0, 0, ret_payload);
+ *version = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_api_version);
+
+/**
+ * zynqmp_pm_get_chipid - Get silicon ID registers
+ * @idcode: IDCODE register
+ * @version: version register
+ *
+ * Return: Returns the status of the operation and the idcode and version
+ * registers in @idcode and @version.
+ */
+int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!idcode || !version)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload);
+ *idcode = ret_payload[1];
+ *version = ret_payload[2];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_chipid);
+
+/**
+ * zynqmp_pm_get_family_info() - Get family info of platform
+ * @family: Returned family code value
+ * @subfamily: Returned sub-family code value
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ u32 idcode;
+ int ret;
+
+ /* Check is family or sub-family code already received */
+ if (pm_family_code && pm_sub_family_code) {
+ *family = pm_family_code;
+ *subfamily = pm_sub_family_code;
+ return 0;
+ }
+
+ ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload);
+ if (ret < 0)
+ return ret;
+
+ idcode = ret_payload[1];
+ pm_family_code = FIELD_GET(FAMILY_CODE_MASK, idcode);
+ pm_sub_family_code = FIELD_GET(SUB_FAMILY_CODE_MASK, idcode);
+ *family = pm_family_code;
+ *subfamily = pm_sub_family_code;
+
+ return 0;
+}
+
+/**
+ * zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
+ * @version: Returned version value
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_get_trustzone_version(u32 *version)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!version)
+ return -EINVAL;
+
+ /* Check is PM trustzone version already verified */
+ if (pm_tz_version > 0) {
+ *version = pm_tz_version;
+ return 0;
+ }
+ ret = zynqmp_pm_invoke_fn(PM_GET_TRUSTZONE_VERSION, 0, 0,
+ 0, 0, ret_payload);
+ *version = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * get_set_conduit_method() - Choose SMC or HVC based communication
+ * @np: Pointer to the device_node structure
+ *
+ * Use SMC or HVC-based functions to communicate with EL2/EL3.
+ *
+ * Return: Returns 0 on success or error code
+ */
+static int get_set_conduit_method(struct device_node *np)
+{
+ const char *method;
+
+ if (of_property_read_string(np, "method", &method)) {
+ pr_warn("%s missing \"method\" property\n", __func__);
+ return -ENXIO;
+ }
+
+ if (!strcmp("hvc", method)) {
+ do_fw_call = do_fw_call_hvc;
+ } else if (!strcmp("smc", method)) {
+ do_fw_call = do_fw_call_smc;
+ } else {
+ pr_warn("%s Invalid \"method\" property: %s\n",
+ __func__, method);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_pm_query_data() - Get query data from firmware
+ * @qdata: Variable to the zynqmp_pm_query_data structure
+ * @out: Returned output value
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
+{
+ int ret;
+
+ ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, qdata.qid, qdata.arg1,
+ qdata.arg2, qdata.arg3, out);
+
+ /*
+ * For clock name query, all bytes in SMC response are clock name
+ * characters and return code is always success. For invalid clocks,
+ * clock name bytes would be zeros.
+ */
+ return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_query_data);
+
+/**
+ * zynqmp_pm_clock_enable() - Enable the clock for given id
+ * @clock_id: ID of the clock to be enabled
+ *
+ * This function is used by master to enable the clock
+ * including peripherals and PLL clocks.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_enable(u32 clock_id)
+{
+ return zynqmp_pm_invoke_fn(PM_CLOCK_ENABLE, clock_id, 0, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_enable);
+
+/**
+ * zynqmp_pm_clock_disable() - Disable the clock for given id
+ * @clock_id: ID of the clock to be disable
+ *
+ * This function is used by master to disable the clock
+ * including peripherals and PLL clocks.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_disable(u32 clock_id)
+{
+ return zynqmp_pm_invoke_fn(PM_CLOCK_DISABLE, clock_id, 0, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_disable);
+
+/**
+ * zynqmp_pm_clock_getstate() - Get the clock state for given id
+ * @clock_id: ID of the clock to be queried
+ * @state: 1/0 (Enabled/Disabled)
+ *
+ * This function is used by master to get the state of clock
+ * including peripherals and PLL clocks.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETSTATE, clock_id, 0,
+ 0, 0, ret_payload);
+ *state = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getstate);
+
+/**
+ * zynqmp_pm_clock_setdivider() - Set the clock divider for given id
+ * @clock_id: ID of the clock
+ * @divider: divider value
+ *
+ * This function is used by master to set divider for any clock
+ * to achieve desired rate.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
+{
+ return zynqmp_pm_invoke_fn(PM_CLOCK_SETDIVIDER, clock_id, divider,
+ 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setdivider);
+
+/**
+ * zynqmp_pm_clock_getdivider() - Get the clock divider for given id
+ * @clock_id: ID of the clock
+ * @divider: divider value
+ *
+ * This function is used by master to get divider values
+ * for any clock.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETDIVIDER, clock_id, 0,
+ 0, 0, ret_payload);
+ *divider = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getdivider);
+
+/**
+ * zynqmp_pm_clock_setrate() - Set the clock rate for given id
+ * @clock_id: ID of the clock
+ * @rate: rate value in hz
+ *
+ * This function is used by master to set rate for any clock.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
+{
+ return zynqmp_pm_invoke_fn(PM_CLOCK_SETRATE, clock_id,
+ lower_32_bits(rate),
+ upper_32_bits(rate),
+ 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setrate);
+
+/**
+ * zynqmp_pm_clock_getrate() - Get the clock rate for given id
+ * @clock_id: ID of the clock
+ * @rate: rate value in hz
+ *
+ * This function is used by master to get rate
+ * for any clock.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETRATE, clock_id, 0,
+ 0, 0, ret_payload);
+ *rate = ((u64)ret_payload[2] << 32) | ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getrate);
+
+/**
+ * zynqmp_pm_clock_setparent() - Set the clock parent for given id
+ * @clock_id: ID of the clock
+ * @parent_id: parent id
+ *
+ * This function is used by master to set parent for any clock.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
+{
+ return zynqmp_pm_invoke_fn(PM_CLOCK_SETPARENT, clock_id,
+ parent_id, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setparent);
+
+/**
+ * zynqmp_pm_clock_getparent() - Get the clock parent for given id
+ * @clock_id: ID of the clock
+ * @parent_id: parent id
+ *
+ * This function is used by master to get parent index
+ * for any clock.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETPARENT, clock_id, 0,
+ 0, 0, ret_payload);
+ *parent_id = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getparent);
+
+/**
+ * zynqmp_pm_set_pll_frac_mode() - PM API for set PLL mode
+ *
+ * @clk_id: PLL clock ID
+ * @mode: PLL mode (PLL_MODE_FRAC/PLL_MODE_INT)
+ *
+ * This function sets PLL mode
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_MODE,
+ clk_id, mode, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_mode);
+
+/**
+ * zynqmp_pm_get_pll_frac_mode() - PM API for get PLL mode
+ *
+ * @clk_id: PLL clock ID
+ * @mode: PLL mode
+ *
+ * This function return current PLL mode
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_MODE,
+ clk_id, 0, mode);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_mode);
+
+/**
+ * zynqmp_pm_set_pll_frac_data() - PM API for setting pll fraction data
+ *
+ * @clk_id: PLL clock ID
+ * @data: fraction data
+ *
+ * This function sets fraction data.
+ * It is valid for fraction mode only.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_DATA,
+ clk_id, data, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_data);
+
+/**
+ * zynqmp_pm_get_pll_frac_data() - PM API for getting pll fraction data
+ *
+ * @clk_id: PLL clock ID
+ * @data: fraction data
+ *
+ * This function returns fraction data value.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_DATA,
+ clk_id, 0, data);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data);
+
+/**
+ * zynqmp_pm_set_sd_tapdelay() - Set tap delay for the SD device
+ *
+ * @node_id: Node ID of the device
+ * @type: Type of tap delay to set (input/output)
+ * @value: Value to set fot the tap delay
+ *
+ * This function sets input/output tap delay for the SD device.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
+{
+ u32 reg = (type == PM_TAPDELAY_INPUT) ? SD_ITAPDLY : SD_OTAPDLYSEL;
+ u32 mask = (node_id == NODE_SD_0) ? GENMASK(15, 0) : GENMASK(31, 16);
+
+ if (value) {
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
+ IOCTL_SET_SD_TAPDELAY,
+ type, value, NULL);
+ }
+
+ /*
+ * Work around completely misdesigned firmware API on Xilinx ZynqMP.
+ * The IOCTL_SET_SD_TAPDELAY firmware call allows the caller to only
+ * ever set IOU_SLCR SD_ITAPDLY Register SD0_ITAPDLYENA/SD1_ITAPDLYENA
+ * bits, but there is no matching call to clear those bits. If those
+ * bits are not cleared, SDMMC tuning may fail.
+ *
+ * Luckily, there are PM_MMIO_READ/PM_MMIO_WRITE calls which seem to
+ * allow complete unrestricted access to all address space, including
+ * IOU_SLCR SD_ITAPDLY Register and all the other registers, access
+ * to which was supposed to be protected by the current firmware API.
+ *
+ * Use PM_MMIO_READ/PM_MMIO_WRITE to re-implement the missing counter
+ * part of IOCTL_SET_SD_TAPDELAY which clears SDx_ITAPDLYENA bits.
+ */
+ return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, reg, mask, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
+
+/**
+ * zynqmp_pm_sd_dll_reset() - Reset DLL logic
+ *
+ * @node_id: Node ID of the device
+ * @type: Reset type
+ *
+ * This function resets DLL logic for the SD device.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SD_DLL_RESET,
+ type, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
+
+/**
+ * zynqmp_pm_ospi_mux_select() - OSPI Mux selection
+ *
+ * @dev_id: Device Id of the OSPI device.
+ * @select: OSPI Mux select value.
+ *
+ * This function select the OSPI Mux.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, dev_id, IOCTL_OSPI_MUX_SELECT,
+ select, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_ospi_mux_select);
+
+/**
+ * zynqmp_pm_write_ggs() - PM API for writing global general storage (ggs)
+ * @index: GGS register index
+ * @value: Register value to be written
+ *
+ * This function writes value to GGS register.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_write_ggs(u32 index, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_GGS,
+ index, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs);
+
+/**
+ * zynqmp_pm_read_ggs() - PM API for reading global general storage (ggs)
+ * @index: GGS register index
+ * @value: Register value to be written
+ *
+ * This function returns GGS register value.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_read_ggs(u32 index, u32 *value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_GGS,
+ index, 0, value);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_read_ggs);
+
+/**
+ * zynqmp_pm_write_pggs() - PM API for writing persistent global general
+ * storage (pggs)
+ * @index: PGGS register index
+ * @value: Register value to be written
+ *
+ * This function writes value to PGGS register.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_write_pggs(u32 index, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_PGGS, index, value,
+ NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs);
+
+/**
+ * zynqmp_pm_read_pggs() - PM API for reading persistent global general
+ * storage (pggs)
+ * @index: PGGS register index
+ * @value: Register value to be written
+ *
+ * This function returns PGGS register value.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_read_pggs(u32 index, u32 *value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_PGGS, index, 0,
+ value);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_read_pggs);
+
+int zynqmp_pm_set_tapdelay_bypass(u32 index, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_TAPDELAY_BYPASS,
+ index, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_tapdelay_bypass);
+
+/**
+ * zynqmp_pm_set_boot_health_status() - PM API for setting healthy boot status
+ * @value: Status value to be written
+ *
+ * This function sets healthy bit value to indicate boot health status
+ * to firmware.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_boot_health_status(u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_BOOT_HEALTH_STATUS,
+ value, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_reset_assert - Request setting of reset (1 - assert, 0 - release)
+ * @reset: Reset to be configured
+ * @assert_flag: Flag stating should reset be asserted (1) or
+ * released (0)
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag)
+{
+ return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, reset, assert_flag,
+ 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_reset_assert);
+
+/**
+ * zynqmp_pm_reset_get_status - Get status of the reset
+ * @reset: Reset whose status should be returned
+ * @status: Returned status
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!status)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_RESET_GET_STATUS, reset, 0,
+ 0, 0, ret_payload);
+ *status = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_reset_get_status);
+
+/**
+ * zynqmp_pm_fpga_load - Perform the fpga load
+ * @address: Address to write to
+ * @size: pl bitstream size
+ * @flags: Bitstream type
+ * -XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration
+ * -XILINX_ZYNQMP_PM_FPGA_PARTIAL: FPGA partial reconfiguration
+ *
+ * This function provides access to pmufw. To transfer
+ * the required bitstream into PL.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ ret = zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address),
+ upper_32_bits(address), size, flags,
+ ret_payload);
+ if (ret_payload[0])
+ return -ret_payload[0];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_load);
+
+/**
+ * zynqmp_pm_fpga_get_status - Read value from PCAP status register
+ * @value: Value to read
+ *
+ * This function provides access to the pmufw to get the PCAP
+ * status
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_fpga_get_status(u32 *value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_FPGA_GET_STATUS, 0, 0, 0, 0, ret_payload);
+ *value = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
+
+/**
+ * zynqmp_pm_fpga_get_config_status - Get the FPGA configuration status.
+ * @value: Buffer to store FPGA configuration status.
+ *
+ * This function provides access to the pmufw to get the FPGA configuration
+ * status
+ *
+ * Return: 0 on success, a negative value on error
+ */
+int zynqmp_pm_fpga_get_config_status(u32 *value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ u32 buf, lower_addr, upper_addr;
+ int ret;
+
+ if (!value)
+ return -EINVAL;
+
+ lower_addr = lower_32_bits((u64)&buf);
+ upper_addr = upper_32_bits((u64)&buf);
+
+ ret = zynqmp_pm_invoke_fn(PM_FPGA_READ,
+ XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET,
+ lower_addr, upper_addr,
+ XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG,
+ ret_payload);
+
+ *value = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_config_status);
+
+/**
+ * zynqmp_pm_pinctrl_request - Request Pin from firmware
+ * @pin: Pin number to request
+ *
+ * This function requests pin from firmware.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+int zynqmp_pm_pinctrl_request(const u32 pin)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_REQUEST, pin, 0, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_request);
+
+/**
+ * zynqmp_pm_pinctrl_release - Inform firmware that Pin control is released
+ * @pin: Pin number to release
+ *
+ * This function release pin from firmware.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+int zynqmp_pm_pinctrl_release(const u32 pin)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_RELEASE, pin, 0, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_release);
+
+/**
+ * zynqmp_pm_pinctrl_get_function - Read function id set for the given pin
+ * @pin: Pin number
+ * @id: Buffer to store function ID
+ *
+ * This function provides the function currently set for the given pin.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!id)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_PINCTRL_GET_FUNCTION, pin, 0,
+ 0, 0, ret_payload);
+ *id = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_get_function);
+
+/**
+ * zynqmp_pm_pinctrl_set_function - Set requested function for the pin
+ * @pin: Pin number
+ * @id: Function ID to set
+ *
+ * This function sets requested function for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_SET_FUNCTION, pin, id,
+ 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_set_function);
+
+/**
+ * zynqmp_pm_pinctrl_get_config - Get configuration parameter for the pin
+ * @pin: Pin number
+ * @param: Parameter to get
+ * @value: Buffer to store parameter value
+ *
+ * This function gets requested configuration parameter for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
+ u32 *value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_GET, pin, param,
+ 0, 0, ret_payload);
+ *value = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_get_config);
+
+/**
+ * zynqmp_pm_pinctrl_set_config - Set configuration parameter for the pin
+ * @pin: Pin number
+ * @param: Parameter to set
+ * @value: Parameter value to set
+ *
+ * This function sets requested configuration parameter for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
+ u32 value)
+{
+ int ret;
+
+ if (pm_family_code == ZYNQMP_FAMILY_CODE &&
+ param == PM_PINCTRL_CONFIG_TRI_STATE) {
+ ret = zynqmp_pm_feature(PM_PINCTRL_CONFIG_PARAM_SET);
+ if (ret < PM_PINCTRL_PARAM_SET_VERSION)
+ return -EOPNOTSUPP;
+ }
+
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, pin,
+ param, value, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_set_config);
+
+/**
+ * zynqmp_pm_bootmode_read() - PM Config API for read bootpin status
+ * @ps_mode: Returned output value of ps_mode
+ *
+ * This API function is to be used for notify the power management controller
+ * to read bootpin status.
+ *
+ * Return: status, either success or error+reason
+ */
+unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode)
+{
+ unsigned int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ ret = zynqmp_pm_invoke_fn(PM_MMIO_READ, CRL_APB_BOOT_PIN_CTRL, 0,
+ 0, 0, ret_payload);
+
+ *ps_mode = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_bootmode_read);
+
+/**
+ * zynqmp_pm_bootmode_write() - PM Config API for Configure bootpin
+ * @ps_mode: Value to be written to the bootpin ctrl register
+ *
+ * This API function is to be used for notify the power management controller
+ * to configure bootpin.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_bootmode_write(u32 ps_mode)
+{
+ return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, CRL_APB_BOOT_PIN_CTRL,
+ CRL_APB_BOOTPIN_CTRL_MASK, ps_mode, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_bootmode_write);
+
+/**
+ * zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
+ * master has initialized its own power management
+ *
+ * Return: Returns status, either success or error+reason
+ *
+ * This API function is to be used for notify the power management controller
+ * about the completed power management initialization.
+ */
+int zynqmp_pm_init_finalize(void)
+{
+ return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, 0, 0, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_init_finalize);
+
+/**
+ * zynqmp_pm_set_suspend_mode() - Set system suspend mode
+ * @mode: Mode to set for system suspend
+ *
+ * This API function is used to set mode of system suspend.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_suspend_mode(u32 mode)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, mode, 0, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_suspend_mode);
+
+/**
+ * zynqmp_pm_request_node() - Request a node with specific capabilities
+ * @node: Node ID of the slave
+ * @capabilities: Requested capabilities of the slave
+ * @qos: Quality of service (not supported)
+ * @ack: Flag to specify whether acknowledge is requested
+ *
+ * This function is used by master to request particular node from firmware.
+ * Every master must request node before using it.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos, const enum zynqmp_pm_request_ack ack)
+{
+ return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, node, capabilities,
+ qos, ack, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_request_node);
+
+/**
+ * zynqmp_pm_release_node() - Release a node
+ * @node: Node ID of the slave
+ *
+ * This function is used by master to inform firmware that master
+ * has released node. Once released, master must not use that node
+ * without re-request.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_release_node(const u32 node)
+{
+ return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, node, 0, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_release_node);
+
+/**
+ * zynqmp_pm_get_rpu_mode() - Get RPU mode
+ * @node_id: Node ID of the device
+ * @rpu_mode: return by reference value
+ * either split or lockstep
+ *
+ * Return: return 0 on success or error+reason.
+ * if success, then rpu_mode will be set
+ * to current rpu mode.
+ */
+int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ ret = zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
+ IOCTL_GET_RPU_OPER_MODE, 0, 0, ret_payload);
+
+ /* only set rpu_mode if no error */
+ if (ret == XST_PM_SUCCESS)
+ *rpu_mode = ret_payload[0];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_rpu_mode);
+
+/**
+ * zynqmp_pm_set_rpu_mode() - Set RPU mode
+ * @node_id: Node ID of the device
+ * @rpu_mode: Argument 1 to requested IOCTL call. either split or lockstep
+ *
+ * This function is used to set RPU mode to split or
+ * lockstep
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
+ IOCTL_SET_RPU_OPER_MODE, (u32)rpu_mode,
+ 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_rpu_mode);
+
+/**
+ * zynqmp_pm_set_tcm_config - configure TCM
+ * @node_id: Firmware specific TCM subsystem ID
+ * @tcm_mode: Argument 1 to requested IOCTL call
+ * either PM_RPU_TCM_COMB or PM_RPU_TCM_SPLIT
+ *
+ * This function is used to set RPU mode to split or combined
+ *
+ * Return: status: 0 for success, else failure
+ */
+int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
+ IOCTL_TCM_COMB_CONFIG, (u32)tcm_mode, 0,
+ NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_tcm_config);
+
+/**
+ * zynqmp_pm_force_pwrdwn - PM call to request for another PU or subsystem to
+ * be powered down forcefully
+ * @node: Node ID of the targeted PU or subsystem
+ * @ack: Flag to specify whether acknowledge is requested
+ *
+ * Return: status, either success or error+reason
+ */
+int zynqmp_pm_force_pwrdwn(const u32 node,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return zynqmp_pm_invoke_fn(PM_FORCE_POWERDOWN, node, ack, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_force_pwrdwn);
+
+/**
+ * zynqmp_pm_request_wake - PM call to wake up selected master or subsystem
+ * @node: Node ID of the master or subsystem
+ * @set_addr: Specifies whether the address argument is relevant
+ * @address: Address from which to resume when woken up
+ * @ack: Flag to specify whether acknowledge requested
+ *
+ * Return: status, either success or error+reason
+ */
+int zynqmp_pm_request_wake(const u32 node,
+ const bool set_addr,
+ const u64 address,
+ const enum zynqmp_pm_request_ack ack)
+{
+ /* set_addr flag is encoded into 1st bit of address */
+ return zynqmp_pm_invoke_fn(PM_REQUEST_WAKEUP, node, address | set_addr,
+ address >> 32, ack, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_request_wake);
+
+/**
+ * zynqmp_pm_set_requirement() - PM call to set requirement for PM slaves
+ * @node: Node ID of the slave
+ * @capabilities: Requested capabilities of the slave
+ * @qos: Quality of service (not supported)
+ * @ack: Flag to specify whether acknowledge is requested
+ *
+ * This API function is to be used for slaves a PU already has requested
+ * to change its capabilities.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, node, capabilities,
+ qos, ack, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_requirement);
+
+/**
+ * zynqmp_pm_load_pdi - Load and process PDI
+ * @src: Source device where PDI is located
+ * @address: PDI src address
+ *
+ * This function provides support to load PDI from linux
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_load_pdi(const u32 src, const u64 address)
+{
+ return zynqmp_pm_invoke_fn(PM_LOAD_PDI, src,
+ lower_32_bits(address),
+ upper_32_bits(address), 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_load_pdi);
+
+/**
+ * zynqmp_pm_aes_engine - Access AES hardware to encrypt/decrypt the data using
+ * AES-GCM core.
+ * @address: Address of the AesParams structure.
+ * @out: Returned output value
+ *
+ * Return: Returns status, either success or error code.
+ */
+int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!out)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_SECURE_AES, upper_32_bits(address),
+ lower_32_bits(address),
+ 0, 0, ret_payload);
+ *out = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine);
+
+/**
+ * zynqmp_pm_sha_hash - Access the SHA engine to calculate the hash
+ * @address: Address of the data/ Address of output buffer where
+ * hash should be stored.
+ * @size: Size of the data.
+ * @flags:
+ * BIT(0) - for initializing csudma driver and SHA3(Here address
+ * and size inputs can be NULL).
+ * BIT(1) - to call Sha3_Update API which can be called multiple
+ * times when data is not contiguous.
+ * BIT(2) - to get final hash of the whole updated data.
+ * Hash will be overwritten at provided address with
+ * 48 bytes.
+ *
+ * Return: Returns status, either success or error code.
+ */
+int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags)
+{
+ u32 lower_addr = lower_32_bits(address);
+ u32 upper_addr = upper_32_bits(address);
+
+ return zynqmp_pm_invoke_fn(PM_SECURE_SHA, upper_addr, lower_addr,
+ size, flags, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sha_hash);
+
+/**
+ * zynqmp_pm_register_notifier() - PM API for register a subsystem
+ * to be notified about specific
+ * event/error.
+ * @node: Node ID to which the event is related.
+ * @event: Event Mask of Error events for which wants to get notified.
+ * @wake: Wake subsystem upon capturing the event if value 1
+ * @enable: Enable the registration for value 1, disable for value 0
+ *
+ * This function is used to register/un-register for particular node-event
+ * combination in firmware.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+
+int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable)
+{
+ return zynqmp_pm_invoke_fn(PM_REGISTER_NOTIFIER, node, event,
+ wake, enable, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_register_notifier);
+
+/**
+ * zynqmp_pm_system_shutdown - PM call to request a system shutdown or restart
+ * @type: Shutdown or restart? 0 for shutdown, 1 for restart
+ * @subtype: Specifies which system should be restarted or shut down
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
+{
+ return zynqmp_pm_invoke_fn(PM_SYSTEM_SHUTDOWN, type, subtype,
+ 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_feature_config - PM call to request IOCTL for feature config
+ * @id: The config ID of the feature to be configured
+ * @value: The config value of the feature to be configured
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_FEATURE_CONFIG,
+ id, value, NULL);
+}
+
+/**
+ * zynqmp_pm_get_feature_config - PM call to get value of configured feature
+ * @id: The config id of the feature to be queried
+ * @payload: Returned value array
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
+ u32 *payload)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_FEATURE_CONFIG,
+ id, 0, payload);
+}
+
+/**
+ * zynqmp_pm_set_sd_config - PM call to set value of SD config registers
+ * @node: SD node ID
+ * @config: The config type of SD registers
+ * @value: Value to be set
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_SD_CONFIG,
+ config, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_config);
+
+/**
+ * zynqmp_pm_set_gem_config - PM call to set value of GEM config registers
+ * @node: GEM node ID
+ * @config: The config type of GEM registers
+ * @value: Value to be set
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
+ u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_GEM_CONFIG,
+ config, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_gem_config);
+
+/**
+ * struct zynqmp_pm_shutdown_scope - Struct for shutdown scope
+ * @subtype: Shutdown subtype
+ * @name: Matching string for scope argument
+ *
+ * This struct encapsulates mapping between shutdown scope ID and string.
+ */
+struct zynqmp_pm_shutdown_scope {
+ const enum zynqmp_pm_shutdown_subtype subtype;
+ const char *name;
+};
+
+static struct zynqmp_pm_shutdown_scope shutdown_scopes[] = {
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
+ .name = "subsystem",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
+ .name = "ps_only",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
+ .name = "system",
+ },
+};
+
+static struct zynqmp_pm_shutdown_scope *selected_scope =
+ &shutdown_scopes[ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM];
+
+/**
+ * zynqmp_pm_is_shutdown_scope_valid - Check if shutdown scope string is valid
+ * @scope_string: Shutdown scope string
+ *
+ * Return: Return pointer to matching shutdown scope struct from
+ * array of available options in system if string is valid,
+ * otherwise returns NULL.
+ */
+static struct zynqmp_pm_shutdown_scope*
+ zynqmp_pm_is_shutdown_scope_valid(const char *scope_string)
+{
+ int count;
+
+ for (count = 0; count < ARRAY_SIZE(shutdown_scopes); count++)
+ if (sysfs_streq(scope_string, shutdown_scopes[count].name))
+ return &shutdown_scopes[count];
+
+ return NULL;
+}
+
+static ssize_t shutdown_scope_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(shutdown_scopes); i++) {
+ if (&shutdown_scopes[i] == selected_scope) {
+ strcat(buf, "[");
+ strcat(buf, shutdown_scopes[i].name);
+ strcat(buf, "]");
+ } else {
+ strcat(buf, shutdown_scopes[i].name);
+ }
+ strcat(buf, " ");
+ }
+ strcat(buf, "\n");
+
+ return strlen(buf);
+}
+
+static ssize_t shutdown_scope_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct zynqmp_pm_shutdown_scope *scope;
+
+ scope = zynqmp_pm_is_shutdown_scope_valid(buf);
+ if (!scope)
+ return -EINVAL;
+
+ ret = zynqmp_pm_system_shutdown(ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+ scope->subtype);
+ if (ret) {
+ pr_err("unable to set shutdown scope %s\n", buf);
+ return ret;
+ }
+
+ selected_scope = scope;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(shutdown_scope);
+
+static ssize_t health_status_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned int value;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_pm_set_boot_health_status(value);
+ if (ret) {
+ dev_err(device, "unable to set healthy bit value to %u\n",
+ value);
+ return ret;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(health_status);
+
+static ssize_t ggs_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ ret = zynqmp_pm_read_ggs(reg, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static ssize_t ggs_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count,
+ u32 reg)
+{
+ long value;
+ int ret;
+
+ if (reg >= GSS_NUM_REGS)
+ return -EINVAL;
+
+ ret = kstrtol(buf, 16, &value);
+ if (ret) {
+ count = -EFAULT;
+ goto err;
+ }
+
+ ret = zynqmp_pm_write_ggs(reg, value);
+ if (ret)
+ count = -EFAULT;
+err:
+ return count;
+}
+
+/* GGS register show functions */
+#define GGS0_SHOW(N) \
+ ssize_t ggs##N##_show(struct device *device, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return ggs_show(device, attr, buf, N); \
+ }
+
+static GGS0_SHOW(0);
+static GGS0_SHOW(1);
+static GGS0_SHOW(2);
+static GGS0_SHOW(3);
+
+/* GGS register store function */
+#define GGS0_STORE(N) \
+ ssize_t ggs##N##_store(struct device *device, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return ggs_store(device, attr, buf, count, N); \
+ }
+
+static GGS0_STORE(0);
+static GGS0_STORE(1);
+static GGS0_STORE(2);
+static GGS0_STORE(3);
+
+static ssize_t pggs_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ ret = zynqmp_pm_read_pggs(reg, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static ssize_t pggs_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count,
+ u32 reg)
+{
+ long value;
+ int ret;
+
+ if (reg >= GSS_NUM_REGS)
+ return -EINVAL;
+
+ ret = kstrtol(buf, 16, &value);
+ if (ret) {
+ count = -EFAULT;
+ goto err;
+ }
+
+ ret = zynqmp_pm_write_pggs(reg, value);
+ if (ret)
+ count = -EFAULT;
+
+err:
+ return count;
+}
+
+#define PGGS0_SHOW(N) \
+ ssize_t pggs##N##_show(struct device *device, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return pggs_show(device, attr, buf, N); \
+ }
+
+#define PGGS0_STORE(N) \
+ ssize_t pggs##N##_store(struct device *device, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return pggs_store(device, attr, buf, count, N); \
+ }
+
+/* PGGS register show functions */
+static PGGS0_SHOW(0);
+static PGGS0_SHOW(1);
+static PGGS0_SHOW(2);
+static PGGS0_SHOW(3);
+
+/* PGGS register store functions */
+static PGGS0_STORE(0);
+static PGGS0_STORE(1);
+static PGGS0_STORE(2);
+static PGGS0_STORE(3);
+
+/* GGS register attributes */
+static DEVICE_ATTR_RW(ggs0);
+static DEVICE_ATTR_RW(ggs1);
+static DEVICE_ATTR_RW(ggs2);
+static DEVICE_ATTR_RW(ggs3);
+
+/* PGGS register attributes */
+static DEVICE_ATTR_RW(pggs0);
+static DEVICE_ATTR_RW(pggs1);
+static DEVICE_ATTR_RW(pggs2);
+static DEVICE_ATTR_RW(pggs3);
+
+static ssize_t feature_config_id_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zynqmp_devinfo *devinfo = dev_get_drvdata(device);
+
+ return sysfs_emit(buf, "%d\n", devinfo->feature_conf_id);
+}
+
+static ssize_t feature_config_id_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 config_id;
+ int ret;
+ struct zynqmp_devinfo *devinfo = dev_get_drvdata(device);
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = kstrtou32(buf, 10, &config_id);
+ if (ret)
+ return ret;
+
+ devinfo->feature_conf_id = config_id;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(feature_config_id);
+
+static ssize_t feature_config_value_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ struct zynqmp_devinfo *devinfo = dev_get_drvdata(device);
+
+ ret = zynqmp_pm_get_feature_config(devinfo->feature_conf_id,
+ ret_payload);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", ret_payload[1]);
+}
+
+static ssize_t feature_config_value_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 value;
+ int ret;
+ struct zynqmp_devinfo *devinfo = dev_get_drvdata(device);
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = kstrtou32(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_pm_set_feature_config(devinfo->feature_conf_id,
+ value);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(feature_config_value);
+
+static struct attribute *zynqmp_firmware_attrs[] = {
+ &dev_attr_ggs0.attr,
+ &dev_attr_ggs1.attr,
+ &dev_attr_ggs2.attr,
+ &dev_attr_ggs3.attr,
+ &dev_attr_pggs0.attr,
+ &dev_attr_pggs1.attr,
+ &dev_attr_pggs2.attr,
+ &dev_attr_pggs3.attr,
+ &dev_attr_shutdown_scope.attr,
+ &dev_attr_health_status.attr,
+ &dev_attr_feature_config_id.attr,
+ &dev_attr_feature_config_value.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(zynqmp_firmware);
+
+static int zynqmp_firmware_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct zynqmp_devinfo *devinfo;
+ int ret;
+
+ ret = get_set_conduit_method(dev->of_node);
+ if (ret)
+ return ret;
+
+ np = of_find_compatible_node(NULL, NULL, "xlnx,zynqmp");
+ if (!np) {
+ np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
+ if (!np)
+ return 0;
+
+ feature_check_enabled = true;
+ }
+
+ if (!feature_check_enabled) {
+ ret = do_feature_check_call(PM_FEATURE_CHECK);
+ if (ret >= 0)
+ feature_check_enabled = true;
+ }
+
+ of_node_put(np);
+
+ devinfo = devm_kzalloc(dev, sizeof(*devinfo), GFP_KERNEL);
+ if (!devinfo)
+ return -ENOMEM;
+
+ devinfo->dev = dev;
+
+ platform_set_drvdata(pdev, devinfo);
+
+ /* Check PM API version number */
+ ret = zynqmp_pm_get_api_version(&pm_api_version);
+ if (ret)
+ return ret;
+
+ if (pm_api_version < ZYNQMP_PM_VERSION) {
+ panic("%s Platform Management API version error. Expected: v%d.%d - Found: v%d.%d\n",
+ __func__,
+ ZYNQMP_PM_VERSION_MAJOR, ZYNQMP_PM_VERSION_MINOR,
+ pm_api_version >> 16, pm_api_version & 0xFFFF);
+ }
+
+ pr_info("%s Platform Management API v%d.%d\n", __func__,
+ pm_api_version >> 16, pm_api_version & 0xFFFF);
+
+ /* Get the Family code and sub family code of platform */
+ ret = zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code);
+ if (ret < 0)
+ return ret;
+
+ /* Check trustzone version number */
+ ret = zynqmp_pm_get_trustzone_version(&pm_tz_version);
+ if (ret)
+ panic("Legacy trustzone found without version support\n");
+
+ if (pm_tz_version < ZYNQMP_TZ_VERSION)
+ panic("%s Trustzone version error. Expected: v%d.%d - Found: v%d.%d\n",
+ __func__,
+ ZYNQMP_TZ_VERSION_MAJOR, ZYNQMP_TZ_VERSION_MINOR,
+ pm_tz_version >> 16, pm_tz_version & 0xFFFF);
+
+ pr_info("%s Trustzone version v%d.%d\n", __func__,
+ pm_tz_version >> 16, pm_tz_version & 0xFFFF);
+
+ ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
+ ARRAY_SIZE(firmware_devs), NULL, 0, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add MFD devices %d\n", ret);
+ return ret;
+ }
+
+ zynqmp_pm_api_debugfs_init();
+
+ np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
+ if (np) {
+ em_dev = platform_device_register_data(&pdev->dev, "xlnx_event_manager",
+ -1, NULL, 0);
+ if (IS_ERR(em_dev))
+ dev_err_probe(&pdev->dev, PTR_ERR(em_dev), "EM register fail with error\n");
+ }
+ of_node_put(np);
+
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
+}
+
+static int zynqmp_firmware_remove(struct platform_device *pdev)
+{
+ struct pm_api_feature_data *feature_data;
+ struct hlist_node *tmp;
+ int i;
+
+ mfd_remove_devices(&pdev->dev);
+ zynqmp_pm_api_debugfs_exit();
+
+ hash_for_each_safe(pm_api_features_map, i, tmp, feature_data, hentry) {
+ hash_del(&feature_data->hentry);
+ kfree(feature_data);
+ }
+
+ platform_device_unregister(em_dev);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_firmware_of_match[] = {
+ {.compatible = "xlnx,zynqmp-firmware"},
+ {.compatible = "xlnx,versal-firmware"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, zynqmp_firmware_of_match);
+
+static struct platform_driver zynqmp_firmware_driver = {
+ .driver = {
+ .name = "zynqmp_firmware",
+ .of_match_table = zynqmp_firmware_of_match,
+ .dev_groups = zynqmp_firmware_groups,
+ },
+ .probe = zynqmp_firmware_probe,
+ .remove = zynqmp_firmware_remove,
+};
+module_platform_driver(zynqmp_firmware_driver);