summaryrefslogtreecommitdiffstats
path: root/drivers/firmware
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/firmware
parentInitial commit. (diff)
downloadlinux-upstream/5.10.209.tar.xz
linux-upstream/5.10.209.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/firmware')
-rw-r--r--drivers/firmware/Kconfig310
-rw-r--r--drivers/firmware/Makefile35
-rw-r--r--drivers/firmware/arm_scmi/Makefile11
-rw-r--r--drivers/firmware/arm_scmi/base.c367
-rw-r--r--drivers/firmware/arm_scmi/bus.c252
-rw-r--r--drivers/firmware/arm_scmi/clock.c370
-rw-r--r--drivers/firmware/arm_scmi/common.h267
-rw-r--r--drivers/firmware/arm_scmi/driver.c990
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c238
-rw-r--r--drivers/firmware/arm_scmi/notify.c1532
-rw-r--r--drivers/firmware/arm_scmi/notify.h68
-rw-r--r--drivers/firmware/arm_scmi/perf.c895
-rw-r--r--drivers/firmware/arm_scmi/power.c304
-rw-r--r--drivers/firmware/arm_scmi/reset.c314
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c147
-rw-r--r--drivers/firmware/arm_scmi/sensors.c370
-rw-r--r--drivers/firmware/arm_scmi/shmem.c101
-rw-r--r--drivers/firmware/arm_scmi/smc.c154
-rw-r--r--drivers/firmware/arm_scmi/system.c131
-rw-r--r--drivers/firmware/arm_scpi.c1049
-rw-r--r--drivers/firmware/arm_sdei.c1130
-rw-r--r--drivers/firmware/broadcom/Kconfig32
-rw-r--r--drivers/firmware/broadcom/Makefile4
-rw-r--r--drivers/firmware/broadcom/bcm47xx_nvram.c238
-rw-r--r--drivers/firmware/broadcom/bcm47xx_sprom.c726
-rw-r--r--drivers/firmware/broadcom/tee_bnxt_fw.c286
-rw-r--r--drivers/firmware/dmi-id.c254
-rw-r--r--drivers/firmware/dmi-sysfs.c698
-rw-r--r--drivers/firmware/dmi_scan.c1203
-rw-r--r--drivers/firmware/edd.c790
-rw-r--r--drivers/firmware/efi/Kconfig286
-rw-r--r--drivers/firmware/efi/Makefile44
-rw-r--r--drivers/firmware/efi/apple-properties.c237
-rw-r--r--drivers/firmware/efi/arm-runtime.c178
-rw-r--r--drivers/firmware/efi/capsule-loader.c343
-rw-r--r--drivers/firmware/efi/capsule.c303
-rw-r--r--drivers/firmware/efi/cper-arm.c342
-rw-r--r--drivers/firmware/efi/cper-x86.c356
-rw-r--r--drivers/firmware/efi/cper.c650
-rw-r--r--drivers/firmware/efi/dev-path-parser.c196
-rw-r--r--drivers/firmware/efi/earlycon.c246
-rw-r--r--drivers/firmware/efi/efi-bgrt.c88
-rw-r--r--drivers/firmware/efi/efi-init.c387
-rw-r--r--drivers/firmware/efi/efi-pstore.c467
-rw-r--r--drivers/firmware/efi/efi.c1049
-rw-r--r--drivers/firmware/efi/efibc.c107
-rw-r--r--drivers/firmware/efi/efivars.c670
-rw-r--r--drivers/firmware/efi/embedded-firmware.c147
-rw-r--r--drivers/firmware/efi/esrt.c438
-rw-r--r--drivers/firmware/efi/fake_mem.c124
-rw-r--r--drivers/firmware/efi/fake_mem.h10
-rw-r--r--drivers/firmware/efi/fdtparams.c129
-rw-r--r--drivers/firmware/efi/libstub/Makefile149
-rw-r--r--drivers/firmware/efi/libstub/alignedmem.c57
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c170
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c168
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c704
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c380
-rw-r--r--drivers/firmware/efi/libstub/efistub.h853
-rw-r--r--drivers/firmware/efi/libstub/fdt.c361
-rw-r--r--drivers/firmware/efi/libstub/file.c250
-rw-r--r--drivers/firmware/efi/libstub/gop.c580
-rw-r--r--drivers/firmware/efi/libstub/mem.c132
-rw-r--r--drivers/firmware/efi/libstub/pci.c114
-rw-r--r--drivers/firmware/efi/libstub/random.c143
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c128
-rw-r--r--drivers/firmware/efi/libstub/relocate.c174
-rw-r--r--drivers/firmware/efi/libstub/riscv-stub.c112
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c76
-rw-r--r--drivers/firmware/efi/libstub/skip_spaces.c12
-rw-r--r--drivers/firmware/efi/libstub/string.c115
-rw-r--r--drivers/firmware/efi/libstub/tpm.c167
-rw-r--r--drivers/firmware/efi/libstub/vsprintf.c564
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c817
-rw-r--r--drivers/firmware/efi/memattr.c179
-rw-r--r--drivers/firmware/efi/memmap.c378
-rw-r--r--drivers/firmware/efi/mokvar-table.c362
-rw-r--r--drivers/firmware/efi/rci2-table.c150
-rw-r--r--drivers/firmware/efi/reboot.c77
-rw-r--r--drivers/firmware/efi/riscv-runtime.c143
-rw-r--r--drivers/firmware/efi/runtime-map.c192
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c479
-rw-r--r--drivers/firmware/efi/test/Makefile2
-rw-r--r--drivers/firmware/efi/test/efi_test.c766
-rw-r--r--drivers/firmware/efi/test/efi_test.h121
-rw-r--r--drivers/firmware/efi/tpm.c110
-rw-r--r--drivers/firmware/efi/vars.c1222
-rw-r--r--drivers/firmware/efi/x86_fake_mem.c75
-rw-r--r--drivers/firmware/google/Kconfig77
-rw-r--r--drivers/firmware/google/Makefile11
-rw-r--r--drivers/firmware/google/coreboot_table.c228
-rw-r--r--drivers/firmware/google/coreboot_table.h95
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c93
-rw-r--r--drivers/firmware/google/gsmi.c1081
-rw-r--r--drivers/firmware/google/memconsole-coreboot.c112
-rw-r--r--drivers/firmware/google/memconsole-x86-legacy.c157
-rw-r--r--drivers/firmware/google/memconsole.c53
-rw-r--r--drivers/firmware/google/memconsole.h36
-rw-r--r--drivers/firmware/google/vpd.c322
-rw-r--r--drivers/firmware/google/vpd_decode.c98
-rw-r--r--drivers/firmware/google/vpd_decode.h50
-rw-r--r--drivers/firmware/imx/Kconfig30
-rw-r--r--drivers/firmware/imx/Makefile4
-rw-r--r--drivers/firmware/imx/imx-dsp.c155
-rw-r--r--drivers/firmware/imx/imx-scu-irq.c174
-rw-r--r--drivers/firmware/imx/imx-scu-soc.c138
-rw-r--r--drivers/firmware/imx/imx-scu.c361
-rw-r--r--drivers/firmware/imx/misc.c137
-rw-r--r--drivers/firmware/imx/rm.c45
-rw-r--r--drivers/firmware/imx/scu-pd.c374
-rw-r--r--drivers/firmware/iscsi_ibft.c894
-rw-r--r--drivers/firmware/iscsi_ibft_find.c104
-rw-r--r--drivers/firmware/memmap.c418
-rw-r--r--drivers/firmware/meson/Kconfig10
-rw-r--r--drivers/firmware/meson/Makefile2
-rw-r--r--drivers/firmware/meson/meson_sm.c338
-rw-r--r--drivers/firmware/pcdp.c135
-rw-r--r--drivers/firmware/pcdp.h108
-rw-r--r--drivers/firmware/psci/Kconfig14
-rw-r--r--drivers/firmware/psci/Makefile4
-rw-r--r--drivers/firmware/psci/psci.c605
-rw-r--r--drivers/firmware/psci/psci_checker.c492
-rw-r--r--drivers/firmware/qcom_scm-legacy.c242
-rw-r--r--drivers/firmware/qcom_scm-smc.c153
-rw-r--r--drivers/firmware/qcom_scm.c1307
-rw-r--r--drivers/firmware/qcom_scm.h150
-rw-r--r--drivers/firmware/qemu_fw_cfg.c938
-rw-r--r--drivers/firmware/raspberrypi.c389
-rw-r--r--drivers/firmware/scpi_pm_domain.c156
-rw-r--r--drivers/firmware/smccc/Kconfig25
-rw-r--r--drivers/firmware/smccc/Makefile4
-rw-r--r--drivers/firmware/smccc/smccc.c33
-rw-r--r--drivers/firmware/smccc/soc_id.c114
-rw-r--r--drivers/firmware/stratix10-rsu.c601
-rw-r--r--drivers/firmware/stratix10-svc.c1127
-rw-r--r--drivers/firmware/tegra/Kconfig26
-rw-r--r--drivers/firmware/tegra/Makefile9
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c766
-rw-r--r--drivers/firmware/tegra/bpmp-private.h35
-rw-r--r--drivers/firmware/tegra/bpmp-tegra186.c305
-rw-r--r--drivers/firmware/tegra/bpmp-tegra210.c243
-rw-r--r--drivers/firmware/tegra/bpmp.c879
-rw-r--r--drivers/firmware/tegra/ivc.c687
-rw-r--r--drivers/firmware/ti_sci.c3513
-rw-r--r--drivers/firmware/ti_sci.h1417
-rw-r--r--drivers/firmware/trusted_foundations.c184
-rw-r--r--drivers/firmware/turris-mox-rwtm.c590
-rw-r--r--drivers/firmware/xilinx/Kconfig26
-rw-r--r--drivers/firmware/xilinx/Makefile5
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c239
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.h24
-rw-r--r--drivers/firmware/xilinx/zynqmp.c1313
152 files changed, 53000 insertions, 0 deletions
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
new file mode 100644
index 000000000..807c5320d
--- /dev/null
+++ b/drivers/firmware/Kconfig
@@ -0,0 +1,310 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.rst.
+#
+
+menu "Firmware Drivers"
+
+config ARM_SCMI_PROTOCOL
+ tristate "ARM System Control and Management Interface (SCMI) Message Protocol"
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on MAILBOX || HAVE_ARM_SMCCC_DISCOVERY
+ help
+ ARM System Control and Management Interface (SCMI) protocol is a
+ set of operating system-independent software interfaces that are
+ used in system management. SCMI is extensible and currently provides
+ interfaces for: Discovery and self-description of the interfaces
+ it supports, Power domain management which is the ability to place
+ a given device or domain into the various power-saving states that
+ it supports, Performance management which is the ability to control
+ the performance of a domain that is composed of compute engines
+ such as application processors and other accelerators, Clock
+ management which is the ability to set and inquire rates on platform
+ managed clocks and Sensor management which is the ability to read
+ sensor data, and be notified of sensor value.
+
+ This protocol library provides interface for all the client drivers
+ making use of the features offered by the SCMI.
+
+config ARM_SCMI_POWER_DOMAIN
+ tristate "SCMI power domain driver"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ default y
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the SCMI power domains which can be
+ enabled or disabled via the SCP firmware
+
+ This driver can also be built as a module. If so, the module
+ will be called scmi_pm_domain. Note this may needed early in boot
+ before rootfs may be available.
+
+config ARM_SCPI_PROTOCOL
+ tristate "ARM System Control and Power Interface (SCPI) Message Protocol"
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on MAILBOX
+ help
+ System Control and Power Interface (SCPI) Message Protocol is
+ defined for the purpose of communication between the Application
+ Cores(AP) and the System Control Processor(SCP). The MHU peripheral
+ provides a mechanism for inter-processor communication between SCP
+ and AP.
+
+ SCP controls most of the power managament on the Application
+ Processors. It offers control and management of: the core/cluster
+ power states, various power domain DVFS including the core/cluster,
+ certain system clocks configuration, thermal sensors and many
+ others.
+
+ This protocol library provides interface for all the client drivers
+ making use of the features offered by the SCP.
+
+config ARM_SCPI_POWER_DOMAIN
+ tristate "SCPI power domain driver"
+ depends on ARM_SCPI_PROTOCOL || (COMPILE_TEST && OF)
+ default y
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the SCPI power domains which can be
+ enabled or disabled via the SCP firmware
+
+config ARM_SDE_INTERFACE
+ bool "ARM Software Delegated Exception Interface (SDEI)"
+ depends on ARM64
+ depends on ACPI_APEI_GHES
+ help
+ The Software Delegated Exception Interface (SDEI) is an ARM
+ standard for registering callbacks from the platform firmware
+ into the OS. This is typically used to implement RAS notifications.
+
+config EDD
+ tristate "BIOS Enhanced Disk Drive calls determine boot disk"
+ depends on X86
+ help
+ Say Y or M here if you want to enable BIOS Enhanced Disk Drive
+ Services real mode BIOS calls to determine which disk
+ BIOS tries boot from. This information is then exported via sysfs.
+
+ This option is experimental and is known to fail to boot on some
+ obscure configurations. Most disk controller BIOS vendors do
+ not yet implement this feature.
+
+config EDD_OFF
+ bool "Sets default behavior for EDD detection to off"
+ depends on EDD
+ default n
+ help
+ Say Y if you want EDD disabled by default, even though it is compiled into the
+ kernel. Say N if you want EDD enabled by default. EDD can be dynamically set
+ using the kernel parameter 'edd={on|skipmbr|off}'.
+
+config FIRMWARE_MEMMAP
+ bool "Add firmware-provided memory map to sysfs" if EXPERT
+ default X86
+ help
+ Add the firmware-provided (unmodified) memory map to /sys/firmware/memmap.
+ That memory map is used for example by kexec to set up parameter area
+ for the next kernel, but can also be used for debugging purposes.
+
+ See also Documentation/ABI/testing/sysfs-firmware-memmap.
+
+config EFI_PCDP
+ bool "Console device selection via EFI PCDP or HCDP table"
+ depends on ACPI && EFI && IA64
+ default y if IA64
+ help
+ If your firmware supplies the PCDP table, and you want to
+ automatically use the primary console device it describes
+ as the Linux console, say Y here.
+
+ If your firmware supplies the HCDP table, and you want to
+ use the first serial port it describes as the Linux console,
+ say Y here. If your EFI ConOut path contains only a UART
+ device, it will become the console automatically. Otherwise,
+ you must specify the "console=hcdp" kernel boot argument.
+
+ Neither the PCDP nor the HCDP affects naming of serial devices,
+ so a serial console may be /dev/ttyS0, /dev/ttyS1, etc, depending
+ on how the driver discovers devices.
+
+ You must also enable the appropriate drivers (serial, VGA, etc.)
+
+ See DIG64_HCDPv20_042804.pdf available from
+ <http://www.dig64.org/specifications/>
+
+config DMIID
+ bool "Export DMI identification via sysfs to userspace"
+ depends on DMI
+ default y
+ help
+ Say Y here if you want to query SMBIOS/DMI system identification
+ information from userspace through /sys/class/dmi/id/ or if you want
+ DMI-based module auto-loading.
+
+config DMI_SYSFS
+ tristate "DMI table support in sysfs"
+ depends on SYSFS && DMI
+ default n
+ help
+ Say Y or M here to enable the exporting of the raw DMI table
+ data via sysfs. This is useful for consuming the data without
+ requiring any access to /dev/mem at all. Tables are found
+ under /sys/firmware/dmi when this option is enabled and
+ loaded.
+
+config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
+ bool
+
+config ISCSI_IBFT_FIND
+ bool "iSCSI Boot Firmware Table Attributes"
+ depends on X86 && ISCSI_IBFT
+ default n
+ help
+ This option enables the kernel to find the region of memory
+ in which the ISCSI Boot Firmware Table (iBFT) resides. This
+ is necessary for iSCSI Boot Firmware Table Attributes module to work
+ properly.
+
+config ISCSI_IBFT
+ tristate "iSCSI Boot Firmware Table Attributes module"
+ select ISCSI_BOOT_SYSFS
+ select ISCSI_IBFT_FIND if X86
+ depends on ACPI && SCSI && SCSI_LOWLEVEL
+ default n
+ help
+ This option enables support for detection and exposing of iSCSI
+ Boot Firmware Table (iBFT) via sysfs to userspace. If you wish to
+ detect iSCSI boot parameters dynamically during system boot, say Y.
+ Otherwise, say N.
+
+config RASPBERRYPI_FIRMWARE
+ tristate "Raspberry Pi Firmware Driver"
+ depends on BCM2835_MBOX
+ help
+ This option enables support for communicating with the firmware on the
+ Raspberry Pi.
+
+config FW_CFG_SYSFS
+ tristate "QEMU fw_cfg device support in sysfs"
+ depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86)
+ depends on HAS_IOPORT_MAP
+ default n
+ help
+ Say Y or M here to enable the exporting of the QEMU firmware
+ configuration (fw_cfg) file entries via sysfs. Entries are
+ found under /sys/firmware/fw_cfg when this option is enabled
+ and loaded.
+
+config FW_CFG_SYSFS_CMDLINE
+ bool "QEMU fw_cfg device parameter parsing"
+ depends on FW_CFG_SYSFS
+ help
+ Allow the qemu_fw_cfg device to be initialized via the kernel
+ command line or using a module parameter.
+ WARNING: Using incorrect parameters (base address in particular)
+ may crash your system.
+
+config INTEL_STRATIX10_SERVICE
+ tristate "Intel Stratix10 Service Layer"
+ depends on (ARCH_STRATIX10 || ARCH_AGILEX) && HAVE_ARM_SMCCC
+ default n
+ help
+ Intel Stratix10 service layer runs at privileged exception level,
+ interfaces with the service providers (FPGA manager is one of them)
+ and manages secure monitor call to communicate with secure monitor
+ software at secure monitor exception level.
+
+ Say Y here if you want Stratix10 service layer support.
+
+config INTEL_STRATIX10_RSU
+ tristate "Intel Stratix10 Remote System Update"
+ depends on INTEL_STRATIX10_SERVICE
+ help
+ The Intel Remote System Update (RSU) driver exposes interfaces
+ access through the Intel Service Layer to user space via sysfs
+ device attribute nodes. The RSU interfaces report/control some of
+ the optional RSU features of the Stratix 10 SoC FPGA.
+
+ The RSU provides a way for customers to update the boot
+ configuration of a Stratix 10 SoC device with significantly reduced
+ risk of corrupting the bitstream storage and bricking the system.
+
+ Enable RSU support if you are using an Intel SoC FPGA with the RSU
+ feature enabled and you want Linux user space control.
+
+ Say Y here if you want Intel RSU support.
+
+config QCOM_SCM
+ bool
+ depends on ARM || ARM64
+ depends on HAVE_ARM_SMCCC
+ select RESET_CONTROLLER
+
+config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
+ bool "Qualcomm download mode enabled by default"
+ depends on QCOM_SCM
+ help
+ A device with "download mode" enabled will upon an unexpected
+ warm-restart enter a special debug mode that allows the user to
+ "download" memory content over USB for offline postmortem analysis.
+ The feature can be enabled/disabled on the kernel command line.
+
+ Say Y here to enable "download mode" by default.
+
+config TI_SCI_PROTOCOL
+ tristate "TI System Control Interface (TISCI) Message Protocol"
+ depends on TI_MESSAGE_MANAGER
+ help
+ TI System Control Interface (TISCI) Message Protocol is used to manage
+ compute systems such as ARM, DSP etc with the system controller in
+ complex System on Chip(SoC) such as those found on certain keystone
+ generation SoC from TI.
+
+ System controller provides various facilities including power
+ management function support.
+
+ This protocol library is used by client drivers to use the features
+ provided by the system controller.
+
+config TRUSTED_FOUNDATIONS
+ bool "Trusted Foundations secure monitor support"
+ depends on ARM && CPU_V7
+ help
+ Some devices (including most early Tegra-based consumer devices on
+ the market) are booted with the Trusted Foundations secure monitor
+ active, requiring some core operations to be performed by the secure
+ monitor instead of the kernel.
+
+ This option allows the kernel to invoke the secure monitor whenever
+ required on devices using Trusted Foundations. See the functions and
+ comments in linux/firmware/trusted_foundations.h or the device tree
+ bindings for "tlm,trusted-foundations" for details on how to use it.
+
+ Choose N if you don't know what this is about.
+
+config TURRIS_MOX_RWTM
+ tristate "Turris Mox rWTM secure firmware driver"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on HAS_DMA && OF
+ depends on MAILBOX
+ select HW_RANDOM
+ select ARMADA_37XX_RWTM_MBOX
+ help
+ This driver communicates with the firmware on the Cortex-M3 secure
+ processor of the Turris Mox router. Enable if you are building for
+ Turris Mox, and you will be able to read the device serial number and
+ other manufacturing data and also utilize the Entropy Bit Generator
+ for hardware random number generation.
+
+source "drivers/firmware/broadcom/Kconfig"
+source "drivers/firmware/google/Kconfig"
+source "drivers/firmware/efi/Kconfig"
+source "drivers/firmware/imx/Kconfig"
+source "drivers/firmware/meson/Kconfig"
+source "drivers/firmware/psci/Kconfig"
+source "drivers/firmware/smccc/Kconfig"
+source "drivers/firmware/tegra/Kconfig"
+source "drivers/firmware/xilinx/Kconfig"
+
+endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
new file mode 100644
index 000000000..5e013b6a3
--- /dev/null
+++ b/drivers/firmware/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the linux kernel.
+#
+obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o
+obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
+obj-$(CONFIG_ARM_SDE_INTERFACE) += arm_sdei.o
+obj-$(CONFIG_DMI) += dmi_scan.o
+obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o
+obj-$(CONFIG_EDD) += edd.o
+obj-$(CONFIG_EFI_PCDP) += pcdp.o
+obj-$(CONFIG_DMIID) += dmi-id.o
+obj-$(CONFIG_INTEL_STRATIX10_SERVICE) += stratix10-svc.o
+obj-$(CONFIG_INTEL_STRATIX10_RSU) += stratix10-rsu.o
+obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
+obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
+obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
+obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
+obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o
+obj-$(CONFIG_QCOM_SCM) += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o
+obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
+obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
+obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
+
+obj-y += arm_scmi/
+obj-y += broadcom/
+obj-y += meson/
+obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
+obj-$(CONFIG_EFI) += efi/
+obj-$(CONFIG_UEFI_CPER) += efi/
+obj-y += imx/
+obj-y += psci/
+obj-y += smccc/
+obj-y += tegra/
+obj-y += xilinx/
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
new file mode 100644
index 000000000..bc0d54f8e
--- /dev/null
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+scmi-bus-y = bus.o
+scmi-driver-y = driver.o notify.o
+scmi-transport-y = shmem.o
+scmi-transport-$(CONFIG_MAILBOX) += mailbox.o
+scmi-transport-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smc.o
+scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o
+scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
+ $(scmi-transport-y)
+obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
+obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
new file mode 100644
index 000000000..e51d28ba2
--- /dev/null
+++ b/drivers/firmware/arm_scmi/base.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Base Protocol
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications BASE - " fmt
+
+#include <linux/scmi_protocol.h>
+
+#include "common.h"
+#include "notify.h"
+
+#define SCMI_BASE_NUM_SOURCES 1
+#define SCMI_BASE_MAX_CMD_ERR_COUNT 1024
+
+enum scmi_base_protocol_cmd {
+ BASE_DISCOVER_VENDOR = 0x3,
+ BASE_DISCOVER_SUB_VENDOR = 0x4,
+ BASE_DISCOVER_IMPLEMENT_VERSION = 0x5,
+ BASE_DISCOVER_LIST_PROTOCOLS = 0x6,
+ BASE_DISCOVER_AGENT = 0x7,
+ BASE_NOTIFY_ERRORS = 0x8,
+ BASE_SET_DEVICE_PERMISSIONS = 0x9,
+ BASE_SET_PROTOCOL_PERMISSIONS = 0xa,
+ BASE_RESET_AGENT_CONFIGURATION = 0xb,
+};
+
+struct scmi_msg_resp_base_attributes {
+ u8 num_protocols;
+ u8 num_agents;
+ __le16 reserved;
+};
+
+struct scmi_msg_base_error_notify {
+ __le32 event_control;
+#define BASE_TP_NOTIFY_ALL BIT(0)
+};
+
+struct scmi_base_error_notify_payld {
+ __le32 agent_id;
+ __le32 error_status;
+#define IS_FATAL_ERROR(x) ((x) & BIT(31))
+#define ERROR_CMD_COUNT(x) FIELD_GET(GENMASK(9, 0), (x))
+ __le64 msg_reports[SCMI_BASE_MAX_CMD_ERR_COUNT];
+};
+
+/**
+ * scmi_base_attributes_get() - gets the implementation details
+ * that are associated with the base protocol.
+ *
+ * @handle: SCMI entity handle
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int scmi_base_attributes_get(const struct scmi_handle *handle)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_base_attributes *attr_info;
+ struct scmi_revision_info *rev = handle->version;
+
+ ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+ SCMI_PROTOCOL_BASE, 0, sizeof(*attr_info), &t);
+ if (ret)
+ return ret;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ attr_info = t->rx.buf;
+ rev->num_protocols = attr_info->num_protocols;
+ rev->num_agents = attr_info->num_agents;
+ }
+
+ scmi_xfer_put(handle, t);
+
+ return ret;
+}
+
+/**
+ * scmi_base_vendor_id_get() - gets vendor/subvendor identifier ASCII string.
+ *
+ * @handle: SCMI entity handle
+ * @sub_vendor: specify true if sub-vendor ID is needed
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int
+scmi_base_vendor_id_get(const struct scmi_handle *handle, bool sub_vendor)
+{
+ u8 cmd;
+ int ret, size;
+ char *vendor_id;
+ struct scmi_xfer *t;
+ struct scmi_revision_info *rev = handle->version;
+
+ if (sub_vendor) {
+ cmd = BASE_DISCOVER_SUB_VENDOR;
+ vendor_id = rev->sub_vendor_id;
+ size = ARRAY_SIZE(rev->sub_vendor_id);
+ } else {
+ cmd = BASE_DISCOVER_VENDOR;
+ vendor_id = rev->vendor_id;
+ size = ARRAY_SIZE(rev->vendor_id);
+ }
+
+ ret = scmi_xfer_get_init(handle, cmd, SCMI_PROTOCOL_BASE, 0, size, &t);
+ if (ret)
+ return ret;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret)
+ memcpy(vendor_id, t->rx.buf, size);
+
+ scmi_xfer_put(handle, t);
+
+ return ret;
+}
+
+/**
+ * scmi_base_implementation_version_get() - gets a vendor-specific
+ * implementation 32-bit version. The format of the version number is
+ * vendor-specific
+ *
+ * @handle: SCMI entity handle
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int
+scmi_base_implementation_version_get(const struct scmi_handle *handle)
+{
+ int ret;
+ __le32 *impl_ver;
+ struct scmi_xfer *t;
+ struct scmi_revision_info *rev = handle->version;
+
+ ret = scmi_xfer_get_init(handle, BASE_DISCOVER_IMPLEMENT_VERSION,
+ SCMI_PROTOCOL_BASE, 0, sizeof(*impl_ver), &t);
+ if (ret)
+ return ret;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ impl_ver = t->rx.buf;
+ rev->impl_ver = le32_to_cpu(*impl_ver);
+ }
+
+ scmi_xfer_put(handle, t);
+
+ return ret;
+}
+
+/**
+ * scmi_base_implementation_list_get() - gets the list of protocols it is
+ * OSPM is allowed to access
+ *
+ * @handle: SCMI entity handle
+ * @protocols_imp: pointer to hold the list of protocol identifiers
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
+ u8 *protocols_imp)
+{
+ u8 *list;
+ int ret, loop;
+ struct scmi_xfer *t;
+ __le32 *num_skip, *num_ret;
+ u32 tot_num_ret = 0, loop_num_ret;
+ struct device *dev = handle->dev;
+
+ ret = scmi_xfer_get_init(handle, BASE_DISCOVER_LIST_PROTOCOLS,
+ SCMI_PROTOCOL_BASE, sizeof(*num_skip), 0, &t);
+ if (ret)
+ return ret;
+
+ num_skip = t->tx.buf;
+ num_ret = t->rx.buf;
+ list = t->rx.buf + sizeof(*num_ret);
+
+ do {
+ /* Set the number of protocols to be skipped/already read */
+ *num_skip = cpu_to_le32(tot_num_ret);
+
+ ret = scmi_do_xfer(handle, t);
+ if (ret)
+ break;
+
+ loop_num_ret = le32_to_cpu(*num_ret);
+ if (loop_num_ret > MAX_PROTOCOLS_IMP - tot_num_ret) {
+ dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP");
+ break;
+ }
+
+ for (loop = 0; loop < loop_num_ret; loop++)
+ protocols_imp[tot_num_ret + loop] = *(list + loop);
+
+ tot_num_ret += loop_num_ret;
+
+ scmi_reset_rx_to_maxsz(handle, t);
+ } while (loop_num_ret);
+
+ scmi_xfer_put(handle, t);
+
+ return ret;
+}
+
+/**
+ * scmi_base_discover_agent_get() - discover the name of an agent
+ *
+ * @handle: SCMI entity handle
+ * @id: Agent identifier
+ * @name: Agent identifier ASCII string
+ *
+ * An agent id of 0 is reserved to identify the platform itself.
+ * Generally operating system is represented as "OSPM"
+ *
+ * Return: 0 on success, else appropriate SCMI error.
+ */
+static int scmi_base_discover_agent_get(const struct scmi_handle *handle,
+ int id, char *name)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = scmi_xfer_get_init(handle, BASE_DISCOVER_AGENT,
+ SCMI_PROTOCOL_BASE, sizeof(__le32),
+ SCMI_MAX_STR_SIZE, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(id, t->tx.buf);
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret)
+ strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
+
+ scmi_xfer_put(handle, t);
+
+ return ret;
+}
+
+static int scmi_base_error_notify(const struct scmi_handle *handle, bool enable)
+{
+ int ret;
+ u32 evt_cntl = enable ? BASE_TP_NOTIFY_ALL : 0;
+ struct scmi_xfer *t;
+ struct scmi_msg_base_error_notify *cfg;
+
+ ret = scmi_xfer_get_init(handle, BASE_NOTIFY_ERRORS,
+ SCMI_PROTOCOL_BASE, sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ cfg = t->tx.buf;
+ cfg->event_control = cpu_to_le32(evt_cntl);
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_base_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_base_error_notify(handle, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] ret:%d\n", evt_id, ret);
+
+ return ret;
+}
+
+static void *scmi_base_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ int i;
+ const struct scmi_base_error_notify_payld *p = payld;
+ struct scmi_base_error_report *r = report;
+
+ /*
+ * BaseError notification payload is variable in size but
+ * up to a maximum length determined by the struct ponted by p.
+ * Instead payld_sz is the effective length of this notification
+ * payload so cannot be greater of the maximum allowed size as
+ * pointed by p.
+ */
+ if (evt_id != SCMI_EVENT_BASE_ERROR_EVENT || sizeof(*p) < payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->fatal = IS_FATAL_ERROR(le32_to_cpu(p->error_status));
+ r->cmd_count = ERROR_CMD_COUNT(le32_to_cpu(p->error_status));
+ for (i = 0; i < r->cmd_count; i++)
+ r->reports[i] = le64_to_cpu(p->msg_reports[i]);
+ *src_id = 0;
+
+ return r;
+}
+
+static const struct scmi_event base_events[] = {
+ {
+ .id = SCMI_EVENT_BASE_ERROR_EVENT,
+ .max_payld_sz = sizeof(struct scmi_base_error_notify_payld),
+ .max_report_sz = sizeof(struct scmi_base_error_report) +
+ SCMI_BASE_MAX_CMD_ERR_COUNT * sizeof(u64),
+ },
+};
+
+static const struct scmi_event_ops base_event_ops = {
+ .set_notify_enabled = scmi_base_set_notify_enabled,
+ .fill_custom_report = scmi_base_fill_custom_report,
+};
+
+int scmi_base_protocol_init(struct scmi_handle *h)
+{
+ int id, ret;
+ u8 *prot_imp;
+ u32 version;
+ char name[SCMI_MAX_STR_SIZE];
+ const struct scmi_handle *handle = h;
+ struct device *dev = handle->dev;
+ struct scmi_revision_info *rev = handle->version;
+
+ ret = scmi_version_get(handle, SCMI_PROTOCOL_BASE, &version);
+ if (ret)
+ return ret;
+
+ prot_imp = devm_kcalloc(dev, MAX_PROTOCOLS_IMP, sizeof(u8), GFP_KERNEL);
+ if (!prot_imp)
+ return -ENOMEM;
+
+ rev->major_ver = PROTOCOL_REV_MAJOR(version),
+ rev->minor_ver = PROTOCOL_REV_MINOR(version);
+
+ scmi_base_attributes_get(handle);
+ scmi_base_vendor_id_get(handle, false);
+ scmi_base_vendor_id_get(handle, true);
+ scmi_base_implementation_version_get(handle);
+ scmi_base_implementation_list_get(handle, prot_imp);
+ scmi_setup_protocol_implemented(handle, prot_imp);
+
+ dev_info(dev, "SCMI Protocol v%d.%d '%s:%s' Firmware version 0x%x\n",
+ rev->major_ver, rev->minor_ver, rev->vendor_id,
+ rev->sub_vendor_id, rev->impl_ver);
+ dev_dbg(dev, "Found %d protocol(s) %d agent(s)\n", rev->num_protocols,
+ rev->num_agents);
+
+ scmi_register_protocol_events(handle, SCMI_PROTOCOL_BASE,
+ (4 * SCMI_PROTO_QUEUE_SZ),
+ &base_event_ops, base_events,
+ ARRAY_SIZE(base_events),
+ SCMI_BASE_NUM_SOURCES);
+
+ for (id = 0; id < rev->num_agents; id++) {
+ scmi_base_discover_agent_get(handle, id, name);
+ dev_dbg(dev, "Agent %d: %s\n", id, name);
+ }
+
+ return 0;
+}
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
new file mode 100644
index 000000000..def8a84d1
--- /dev/null
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol bus layer
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include "common.h"
+
+static DEFINE_IDA(scmi_bus_id);
+static DEFINE_IDR(scmi_protocols);
+static DEFINE_SPINLOCK(protocol_lock);
+
+static const struct scmi_device_id *
+scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
+{
+ const struct scmi_device_id *id = scmi_drv->id_table;
+
+ if (!id)
+ return NULL;
+
+ for (; id->protocol_id; id++)
+ if (id->protocol_id == scmi_dev->protocol_id) {
+ if (!id->name)
+ return id;
+ else if (!strcmp(id->name, scmi_dev->name))
+ return id;
+ }
+
+ return NULL;
+}
+
+static int scmi_dev_match(struct device *dev, struct device_driver *drv)
+{
+ struct scmi_driver *scmi_drv = to_scmi_driver(drv);
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+ const struct scmi_device_id *id;
+
+ id = scmi_dev_match_id(scmi_dev, scmi_drv);
+ if (id)
+ return 1;
+
+ return 0;
+}
+
+static int scmi_protocol_init(int protocol_id, struct scmi_handle *handle)
+{
+ scmi_prot_init_fn_t fn = idr_find(&scmi_protocols, protocol_id);
+
+ if (unlikely(!fn))
+ return -EINVAL;
+ return fn(handle);
+}
+
+static int scmi_protocol_dummy_init(struct scmi_handle *handle)
+{
+ return 0;
+}
+
+static int scmi_dev_probe(struct device *dev)
+{
+ struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+ const struct scmi_device_id *id;
+ int ret;
+
+ id = scmi_dev_match_id(scmi_dev, scmi_drv);
+ if (!id)
+ return -ENODEV;
+
+ if (!scmi_dev->handle)
+ return -EPROBE_DEFER;
+
+ ret = scmi_protocol_init(scmi_dev->protocol_id, scmi_dev->handle);
+ if (ret)
+ return ret;
+
+ /* Skip protocol initialisation for additional devices */
+ idr_replace(&scmi_protocols, &scmi_protocol_dummy_init,
+ scmi_dev->protocol_id);
+
+ return scmi_drv->probe(scmi_dev);
+}
+
+static int scmi_dev_remove(struct device *dev)
+{
+ struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ if (scmi_drv->remove)
+ scmi_drv->remove(scmi_dev);
+
+ return 0;
+}
+
+static struct bus_type scmi_bus_type = {
+ .name = "scmi_protocol",
+ .match = scmi_dev_match,
+ .probe = scmi_dev_probe,
+ .remove = scmi_dev_remove,
+};
+
+int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
+ const char *mod_name)
+{
+ int retval;
+
+ if (!driver->probe)
+ return -EINVAL;
+
+ driver->driver.bus = &scmi_bus_type;
+ driver->driver.name = driver->name;
+ driver->driver.owner = owner;
+ driver->driver.mod_name = mod_name;
+
+ retval = driver_register(&driver->driver);
+ if (!retval)
+ pr_debug("registered new scmi driver %s\n", driver->name);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(scmi_driver_register);
+
+void scmi_driver_unregister(struct scmi_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(scmi_driver_unregister);
+
+static void scmi_device_release(struct device *dev)
+{
+ kfree(to_scmi_dev(dev));
+}
+
+struct scmi_device *
+scmi_device_create(struct device_node *np, struct device *parent, int protocol,
+ const char *name)
+{
+ int id, retval;
+ struct scmi_device *scmi_dev;
+
+ scmi_dev = kzalloc(sizeof(*scmi_dev), GFP_KERNEL);
+ if (!scmi_dev)
+ return NULL;
+
+ scmi_dev->name = kstrdup_const(name ?: "unknown", GFP_KERNEL);
+ if (!scmi_dev->name) {
+ kfree(scmi_dev);
+ return NULL;
+ }
+
+ id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL);
+ if (id < 0) {
+ kfree_const(scmi_dev->name);
+ kfree(scmi_dev);
+ return NULL;
+ }
+
+ scmi_dev->id = id;
+ scmi_dev->protocol_id = protocol;
+ scmi_dev->dev.parent = parent;
+ scmi_dev->dev.of_node = np;
+ scmi_dev->dev.bus = &scmi_bus_type;
+ scmi_dev->dev.release = scmi_device_release;
+ dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
+
+ retval = device_register(&scmi_dev->dev);
+ if (retval)
+ goto put_dev;
+
+ return scmi_dev;
+put_dev:
+ kfree_const(scmi_dev->name);
+ put_device(&scmi_dev->dev);
+ ida_simple_remove(&scmi_bus_id, id);
+ return NULL;
+}
+
+void scmi_device_destroy(struct scmi_device *scmi_dev)
+{
+ kfree_const(scmi_dev->name);
+ scmi_handle_put(scmi_dev->handle);
+ ida_simple_remove(&scmi_bus_id, scmi_dev->id);
+ device_unregister(&scmi_dev->dev);
+}
+
+void scmi_set_handle(struct scmi_device *scmi_dev)
+{
+ scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
+}
+
+int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn)
+{
+ int ret;
+
+ spin_lock(&protocol_lock);
+ ret = idr_alloc(&scmi_protocols, fn, protocol_id, protocol_id + 1,
+ GFP_ATOMIC);
+ spin_unlock(&protocol_lock);
+ if (ret != protocol_id)
+ pr_err("unable to allocate SCMI idr slot, err %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_register);
+
+void scmi_protocol_unregister(int protocol_id)
+{
+ spin_lock(&protocol_lock);
+ idr_remove(&scmi_protocols, protocol_id);
+ spin_unlock(&protocol_lock);
+}
+EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
+
+static int __scmi_devices_unregister(struct device *dev, void *data)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ scmi_device_destroy(scmi_dev);
+ return 0;
+}
+
+static void scmi_devices_unregister(void)
+{
+ bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister);
+}
+
+int __init scmi_bus_init(void)
+{
+ int retval;
+
+ retval = bus_register(&scmi_bus_type);
+ if (retval)
+ pr_err("scmi protocol bus register failed (%d)\n", retval);
+
+ return retval;
+}
+
+void __exit scmi_bus_exit(void)
+{
+ scmi_devices_unregister();
+ bus_unregister(&scmi_bus_type);
+ ida_destroy(&scmi_bus_id);
+}
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
new file mode 100644
index 000000000..a45678cd9
--- /dev/null
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Clock Protocol
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include <linux/sort.h>
+
+#include "common.h"
+
+enum scmi_clock_protocol_cmd {
+ CLOCK_ATTRIBUTES = 0x3,
+ CLOCK_DESCRIBE_RATES = 0x4,
+ CLOCK_RATE_SET = 0x5,
+ CLOCK_RATE_GET = 0x6,
+ CLOCK_CONFIG_SET = 0x7,
+};
+
+struct scmi_msg_resp_clock_protocol_attributes {
+ __le16 num_clocks;
+ u8 max_async_req;
+ u8 reserved;
+};
+
+struct scmi_msg_resp_clock_attributes {
+ __le32 attributes;
+#define CLOCK_ENABLE BIT(0)
+ u8 name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_clock_set_config {
+ __le32 id;
+ __le32 attributes;
+};
+
+struct scmi_msg_clock_describe_rates {
+ __le32 id;
+ __le32 rate_index;
+};
+
+struct scmi_msg_resp_clock_describe_rates {
+ __le32 num_rates_flags;
+#define NUM_RETURNED(x) ((x) & 0xfff)
+#define RATE_DISCRETE(x) !((x) & BIT(12))
+#define NUM_REMAINING(x) ((x) >> 16)
+ struct {
+ __le32 value_low;
+ __le32 value_high;
+ } rate[0];
+#define RATE_TO_U64(X) \
+({ \
+ typeof(X) x = (X); \
+ le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
+})
+};
+
+struct scmi_clock_set_rate {
+ __le32 flags;
+#define CLOCK_SET_ASYNC BIT(0)
+#define CLOCK_SET_IGNORE_RESP BIT(1)
+#define CLOCK_SET_ROUND_UP BIT(2)
+#define CLOCK_SET_ROUND_AUTO BIT(3)
+ __le32 id;
+ __le32 value_low;
+ __le32 value_high;
+};
+
+struct clock_info {
+ u32 version;
+ int num_clocks;
+ int max_async_req;
+ atomic_t cur_async_req;
+ struct scmi_clock_info *clk;
+};
+
+static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle,
+ struct clock_info *ci)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_clock_protocol_attributes *attr;
+
+ ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+ SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ ci->num_clocks = le16_to_cpu(attr->num_clocks);
+ ci->max_async_req = attr->max_async_req;
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_clock_attributes_get(const struct scmi_handle *handle,
+ u32 clk_id, struct scmi_clock_info *clk)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_clock_attributes *attr;
+
+ ret = scmi_xfer_get_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK,
+ sizeof(clk_id), sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(clk_id, t->tx.buf);
+ attr = t->rx.buf;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret)
+ strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
+ else
+ clk->name[0] = '\0';
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int rate_cmp_func(const void *_r1, const void *_r2)
+{
+ const u64 *r1 = _r1, *r2 = _r2;
+
+ if (*r1 < *r2)
+ return -1;
+ else if (*r1 == *r2)
+ return 0;
+ else
+ return 1;
+}
+
+static int
+scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
+ struct scmi_clock_info *clk)
+{
+ u64 *rate = NULL;
+ int ret, cnt;
+ bool rate_discrete = false;
+ u32 tot_rate_cnt = 0, rates_flag;
+ u16 num_returned, num_remaining;
+ struct scmi_xfer *t;
+ struct scmi_msg_clock_describe_rates *clk_desc;
+ struct scmi_msg_resp_clock_describe_rates *rlist;
+
+ ret = scmi_xfer_get_init(handle, CLOCK_DESCRIBE_RATES,
+ SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t);
+ if (ret)
+ return ret;
+
+ clk_desc = t->tx.buf;
+ rlist = t->rx.buf;
+
+ do {
+ clk_desc->id = cpu_to_le32(clk_id);
+ /* Set the number of rates to be skipped/already read */
+ clk_desc->rate_index = cpu_to_le32(tot_rate_cnt);
+
+ ret = scmi_do_xfer(handle, t);
+ if (ret)
+ goto err;
+
+ rates_flag = le32_to_cpu(rlist->num_rates_flags);
+ num_remaining = NUM_REMAINING(rates_flag);
+ rate_discrete = RATE_DISCRETE(rates_flag);
+ num_returned = NUM_RETURNED(rates_flag);
+
+ if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) {
+ dev_err(handle->dev, "No. of rates > MAX_NUM_RATES");
+ break;
+ }
+
+ if (!rate_discrete) {
+ clk->range.min_rate = RATE_TO_U64(rlist->rate[0]);
+ clk->range.max_rate = RATE_TO_U64(rlist->rate[1]);
+ clk->range.step_size = RATE_TO_U64(rlist->rate[2]);
+ dev_dbg(handle->dev, "Min %llu Max %llu Step %llu Hz\n",
+ clk->range.min_rate, clk->range.max_rate,
+ clk->range.step_size);
+ break;
+ }
+
+ rate = &clk->list.rates[tot_rate_cnt];
+ for (cnt = 0; cnt < num_returned; cnt++, rate++) {
+ *rate = RATE_TO_U64(rlist->rate[cnt]);
+ dev_dbg(handle->dev, "Rate %llu Hz\n", *rate);
+ }
+
+ tot_rate_cnt += num_returned;
+
+ scmi_reset_rx_to_maxsz(handle, t);
+ /*
+ * check for both returned and remaining to avoid infinite
+ * loop due to buggy firmware
+ */
+ } while (num_returned && num_remaining);
+
+ if (rate_discrete && rate) {
+ clk->list.num_rates = tot_rate_cnt;
+ sort(clk->list.rates, tot_rate_cnt, sizeof(*rate),
+ rate_cmp_func, NULL);
+ }
+
+ clk->rate_discrete = rate_discrete;
+
+err:
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int
+scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = scmi_xfer_get_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK,
+ sizeof(__le32), sizeof(u64), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(clk_id, t->tx.buf);
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret)
+ *value = get_unaligned_le64(t->rx.buf);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
+ u64 rate)
+{
+ int ret;
+ u32 flags = 0;
+ struct scmi_xfer *t;
+ struct scmi_clock_set_rate *cfg;
+ struct clock_info *ci = handle->clk_priv;
+
+ ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
+ sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ if (ci->max_async_req &&
+ atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
+ flags |= CLOCK_SET_ASYNC;
+
+ cfg = t->tx.buf;
+ cfg->flags = cpu_to_le32(flags);
+ cfg->id = cpu_to_le32(clk_id);
+ cfg->value_low = cpu_to_le32(rate & 0xffffffff);
+ cfg->value_high = cpu_to_le32(rate >> 32);
+
+ if (flags & CLOCK_SET_ASYNC)
+ ret = scmi_do_xfer_with_response(handle, t);
+ else
+ ret = scmi_do_xfer(handle, t);
+
+ if (ci->max_async_req)
+ atomic_dec(&ci->cur_async_req);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int
+scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_clock_set_config *cfg;
+
+ ret = scmi_xfer_get_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK,
+ sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ cfg = t->tx.buf;
+ cfg->id = cpu_to_le32(clk_id);
+ cfg->attributes = cpu_to_le32(config);
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_clock_enable(const struct scmi_handle *handle, u32 clk_id)
+{
+ return scmi_clock_config_set(handle, clk_id, CLOCK_ENABLE);
+}
+
+static int scmi_clock_disable(const struct scmi_handle *handle, u32 clk_id)
+{
+ return scmi_clock_config_set(handle, clk_id, 0);
+}
+
+static int scmi_clock_count_get(const struct scmi_handle *handle)
+{
+ struct clock_info *ci = handle->clk_priv;
+
+ return ci->num_clocks;
+}
+
+static const struct scmi_clock_info *
+scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
+{
+ struct clock_info *ci = handle->clk_priv;
+ struct scmi_clock_info *clk = ci->clk + clk_id;
+
+ if (!clk->name[0])
+ return NULL;
+
+ return clk;
+}
+
+static const struct scmi_clk_ops clk_ops = {
+ .count_get = scmi_clock_count_get,
+ .info_get = scmi_clock_info_get,
+ .rate_get = scmi_clock_rate_get,
+ .rate_set = scmi_clock_rate_set,
+ .enable = scmi_clock_enable,
+ .disable = scmi_clock_disable,
+};
+
+static int scmi_clock_protocol_init(struct scmi_handle *handle)
+{
+ u32 version;
+ int clkid, ret;
+ struct clock_info *cinfo;
+
+ scmi_version_get(handle, SCMI_PROTOCOL_CLOCK, &version);
+
+ dev_dbg(handle->dev, "Clock Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ cinfo = devm_kzalloc(handle->dev, sizeof(*cinfo), GFP_KERNEL);
+ if (!cinfo)
+ return -ENOMEM;
+
+ scmi_clock_protocol_attributes_get(handle, cinfo);
+
+ cinfo->clk = devm_kcalloc(handle->dev, cinfo->num_clocks,
+ sizeof(*cinfo->clk), GFP_KERNEL);
+ if (!cinfo->clk)
+ return -ENOMEM;
+
+ for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
+ struct scmi_clock_info *clk = cinfo->clk + clkid;
+
+ ret = scmi_clock_attributes_get(handle, clkid, clk);
+ if (!ret)
+ scmi_clock_describe_rates_get(handle, clkid, clk);
+ }
+
+ cinfo->version = version;
+ handle->clk_ops = &clk_ops;
+ handle->clk_priv = cinfo;
+
+ return 0;
+}
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_CLOCK, clock)
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
new file mode 100644
index 000000000..34b7ae798
--- /dev/null
+++ b/drivers/firmware/arm_scmi/common.h
@@ -0,0 +1,267 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * driver common header file containing some definitions, structures
+ * and function prototypes used in all the different SCMI protocols.
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+#ifndef _SCMI_COMMON_H
+#define _SCMI_COMMON_H
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/scmi_protocol.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
+
+#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0)
+#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16)
+#define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
+#define PROTOCOL_REV_MINOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x)))
+#define MAX_PROTOCOLS_IMP 16
+#define MAX_OPPS 16
+
+enum scmi_common_cmd {
+ PROTOCOL_VERSION = 0x0,
+ PROTOCOL_ATTRIBUTES = 0x1,
+ PROTOCOL_MESSAGE_ATTRIBUTES = 0x2,
+};
+
+/**
+ * struct scmi_msg_resp_prot_version - Response for a message
+ *
+ * @minor_version: Minor version of the ABI that firmware supports
+ * @major_version: Major version of the ABI that firmware supports
+ *
+ * In general, ABI version changes follow the rule that minor version increments
+ * are backward compatible. Major revision changes in ABI may not be
+ * backward compatible.
+ *
+ * Response to a generic message with message type SCMI_MSG_VERSION
+ */
+struct scmi_msg_resp_prot_version {
+ __le16 minor_version;
+ __le16 major_version;
+};
+
+#define MSG_ID_MASK GENMASK(7, 0)
+#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
+#define MSG_TYPE_MASK GENMASK(9, 8)
+#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
+#define MSG_TYPE_COMMAND 0
+#define MSG_TYPE_DELAYED_RESP 2
+#define MSG_TYPE_NOTIFICATION 3
+#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
+#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
+#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
+#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
+#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
+
+/**
+ * struct scmi_msg_hdr - Message(Tx/Rx) header
+ *
+ * @id: The identifier of the message being sent
+ * @protocol_id: The identifier of the protocol used to send @id message
+ * @seq: The token to identify the message. When a message returns, the
+ * platform returns the whole message header unmodified including the
+ * token
+ * @status: Status of the transfer once it's complete
+ * @poll_completion: Indicate if the transfer needs to be polled for
+ * completion or interrupt mode is used
+ */
+struct scmi_msg_hdr {
+ u8 id;
+ u8 protocol_id;
+ u16 seq;
+ u32 status;
+ bool poll_completion;
+};
+
+/**
+ * pack_scmi_header() - packs and returns 32-bit header
+ *
+ * @hdr: pointer to header containing all the information on message id,
+ * protocol id and sequence id.
+ *
+ * Return: 32-bit packed message header to be sent to the platform.
+ */
+static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
+{
+ return FIELD_PREP(MSG_ID_MASK, hdr->id) |
+ FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
+ FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
+}
+
+/**
+ * unpack_scmi_header() - unpacks and records message and protocol id
+ *
+ * @msg_hdr: 32-bit packed message header sent from the platform
+ * @hdr: pointer to header to fetch message and protocol id.
+ */
+static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
+{
+ hdr->id = MSG_XTRACT_ID(msg_hdr);
+ hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
+}
+
+/**
+ * struct scmi_msg - Message(Tx/Rx) structure
+ *
+ * @buf: Buffer pointer
+ * @len: Length of data in the Buffer
+ */
+struct scmi_msg {
+ void *buf;
+ size_t len;
+};
+
+/**
+ * struct scmi_xfer - Structure representing a message flow
+ *
+ * @transfer_id: Unique ID for debug & profiling purpose
+ * @hdr: Transmit message header
+ * @tx: Transmit message
+ * @rx: Receive message, the buffer should be pre-allocated to store
+ * message. If request-ACK protocol is used, we can reuse the same
+ * buffer for the rx path as we use for the tx path.
+ * @done: command message transmit completion event
+ * @async_done: pointer to delayed response message received event completion
+ */
+struct scmi_xfer {
+ int transfer_id;
+ struct scmi_msg_hdr hdr;
+ struct scmi_msg tx;
+ struct scmi_msg rx;
+ struct completion done;
+ struct completion *async_done;
+};
+
+void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer);
+int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer);
+int scmi_do_xfer_with_response(const struct scmi_handle *h,
+ struct scmi_xfer *xfer);
+int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
+ size_t tx_size, size_t rx_size, struct scmi_xfer **p);
+void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer);
+int scmi_handle_put(const struct scmi_handle *handle);
+struct scmi_handle *scmi_handle_get(struct device *dev);
+void scmi_set_handle(struct scmi_device *scmi_dev);
+int scmi_version_get(const struct scmi_handle *h, u8 protocol, u32 *version);
+void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
+ u8 *prot_imp);
+
+int scmi_base_protocol_init(struct scmi_handle *h);
+
+int __init scmi_bus_init(void);
+void __exit scmi_bus_exit(void);
+
+#define DECLARE_SCMI_REGISTER_UNREGISTER(func) \
+ int __init scmi_##func##_register(void); \
+ void __exit scmi_##func##_unregister(void)
+DECLARE_SCMI_REGISTER_UNREGISTER(clock);
+DECLARE_SCMI_REGISTER_UNREGISTER(perf);
+DECLARE_SCMI_REGISTER_UNREGISTER(power);
+DECLARE_SCMI_REGISTER_UNREGISTER(reset);
+DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
+DECLARE_SCMI_REGISTER_UNREGISTER(system);
+
+#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(id, name) \
+int __init scmi_##name##_register(void) \
+{ \
+ return scmi_protocol_register((id), &scmi_##name##_protocol_init); \
+} \
+\
+void __exit scmi_##name##_unregister(void) \
+{ \
+ scmi_protocol_unregister((id)); \
+}
+
+/* SCMI Transport */
+/**
+ * struct scmi_chan_info - Structure representing a SCMI channel information
+ *
+ * @dev: Reference to device in the SCMI hierarchy corresponding to this
+ * channel
+ * @handle: Pointer to SCMI entity handle
+ * @transport_info: Transport layer related information
+ */
+struct scmi_chan_info {
+ struct device *dev;
+ struct scmi_handle *handle;
+ void *transport_info;
+};
+
+/**
+ * struct scmi_transport_ops - Structure representing a SCMI transport ops
+ *
+ * @chan_available: Callback to check if channel is available or not
+ * @chan_setup: Callback to allocate and setup a channel
+ * @chan_free: Callback to free a channel
+ * @send_message: Callback to send a message
+ * @mark_txdone: Callback to mark tx as done
+ * @fetch_response: Callback to fetch response
+ * @fetch_notification: Callback to fetch notification
+ * @clear_channel: Callback to clear a channel
+ * @poll_done: Callback to poll transfer status
+ */
+struct scmi_transport_ops {
+ bool (*chan_available)(struct device *dev, int idx);
+ int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
+ bool tx);
+ int (*chan_free)(int id, void *p, void *data);
+ int (*send_message)(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer);
+ void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
+ void (*fetch_response)(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer);
+ void (*fetch_notification)(struct scmi_chan_info *cinfo,
+ size_t max_len, struct scmi_xfer *xfer);
+ void (*clear_channel)(struct scmi_chan_info *cinfo);
+ bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
+};
+
+/**
+ * struct scmi_desc - Description of SoC integration
+ *
+ * @ops: Pointer to the transport specific ops structure
+ * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
+ * @max_msg: Maximum number of messages that can be pending
+ * simultaneously in the system
+ * @max_msg_size: Maximum size of data per message that can be handled.
+ */
+struct scmi_desc {
+ const struct scmi_transport_ops *ops;
+ int max_rx_timeout_ms;
+ int max_msg;
+ int max_msg_size;
+};
+
+extern const struct scmi_desc scmi_mailbox_desc;
+#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
+extern const struct scmi_desc scmi_smc_desc;
+#endif
+
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr);
+void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
+
+/* shmem related declarations */
+struct scmi_shared_mem;
+
+void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
+u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem);
+void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
+void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+ size_t max_len, struct scmi_xfer *xfer);
+void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
+bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer);
+
+#endif /* _SCMI_COMMON_H */
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
new file mode 100644
index 000000000..8d2408284
--- /dev/null
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -0,0 +1,990 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol driver
+ *
+ * SCMI Message Protocol is used between the System Control Processor(SCP)
+ * and the Application Processors(AP). The Message Handling Unit(MHU)
+ * provides a mechanism for inter-processor communication between SCP's
+ * Cortex M3 and AP.
+ *
+ * SCP offers control and management of the core/cluster power states,
+ * various power domain DVFS including the core/cluster, certain system
+ * clocks configuration, thermal sensors and many others.
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/processor.h>
+#include <linux/slab.h>
+
+#include "common.h"
+#include "notify.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/scmi.h>
+
+enum scmi_error_codes {
+ SCMI_SUCCESS = 0, /* Success */
+ SCMI_ERR_SUPPORT = -1, /* Not supported */
+ SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
+ SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
+ SCMI_ERR_ENTRY = -4, /* Not found */
+ SCMI_ERR_RANGE = -5, /* Value out of range */
+ SCMI_ERR_BUSY = -6, /* Device busy */
+ SCMI_ERR_COMMS = -7, /* Communication Error */
+ SCMI_ERR_GENERIC = -8, /* Generic Error */
+ SCMI_ERR_HARDWARE = -9, /* Hardware Error */
+ SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
+};
+
+/* List of all SCMI devices active in system */
+static LIST_HEAD(scmi_list);
+/* Protection for the entire list */
+static DEFINE_MUTEX(scmi_list_mutex);
+/* Track the unique id for the transfers for debug & profiling purpose */
+static atomic_t transfer_last_id;
+
+/**
+ * struct scmi_xfers_info - Structure to manage transfer information
+ *
+ * @xfer_block: Preallocated Message array
+ * @xfer_alloc_table: Bitmap table for allocated messages.
+ * Index of this bitmap table is also used for message
+ * sequence identifier.
+ * @xfer_lock: Protection for message allocation
+ */
+struct scmi_xfers_info {
+ struct scmi_xfer *xfer_block;
+ unsigned long *xfer_alloc_table;
+ spinlock_t xfer_lock;
+};
+
+/**
+ * struct scmi_info - Structure representing a SCMI instance
+ *
+ * @dev: Device pointer
+ * @desc: SoC description for this instance
+ * @version: SCMI revision information containing protocol version,
+ * implementation version and (sub-)vendor identification.
+ * @handle: Instance of SCMI handle to send to clients
+ * @tx_minfo: Universal Transmit Message management info
+ * @rx_minfo: Universal Receive Message management info
+ * @tx_idr: IDR object to map protocol id to Tx channel info pointer
+ * @rx_idr: IDR object to map protocol id to Rx channel info pointer
+ * @protocols_imp: List of protocols implemented, currently maximum of
+ * MAX_PROTOCOLS_IMP elements allocated by the base protocol
+ * @node: List head
+ * @users: Number of users of this instance
+ */
+struct scmi_info {
+ struct device *dev;
+ const struct scmi_desc *desc;
+ struct scmi_revision_info version;
+ struct scmi_handle handle;
+ struct scmi_xfers_info tx_minfo;
+ struct scmi_xfers_info rx_minfo;
+ struct idr tx_idr;
+ struct idr rx_idr;
+ u8 *protocols_imp;
+ struct list_head node;
+ int users;
+};
+
+#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
+
+static const int scmi_linux_errmap[] = {
+ /* better than switch case as long as return value is continuous */
+ 0, /* SCMI_SUCCESS */
+ -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
+ -EINVAL, /* SCMI_ERR_PARAM */
+ -EACCES, /* SCMI_ERR_ACCESS */
+ -ENOENT, /* SCMI_ERR_ENTRY */
+ -ERANGE, /* SCMI_ERR_RANGE */
+ -EBUSY, /* SCMI_ERR_BUSY */
+ -ECOMM, /* SCMI_ERR_COMMS */
+ -EIO, /* SCMI_ERR_GENERIC */
+ -EREMOTEIO, /* SCMI_ERR_HARDWARE */
+ -EPROTO, /* SCMI_ERR_PROTOCOL */
+};
+
+static inline int scmi_to_linux_errno(int errno)
+{
+ int err_idx = -errno;
+
+ if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
+ return scmi_linux_errmap[err_idx];
+ return -EIO;
+}
+
+/**
+ * scmi_dump_header_dbg() - Helper to dump a message header.
+ *
+ * @dev: Device pointer corresponding to the SCMI entity
+ * @hdr: pointer to header.
+ */
+static inline void scmi_dump_header_dbg(struct device *dev,
+ struct scmi_msg_hdr *hdr)
+{
+ dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
+ hdr->id, hdr->seq, hdr->protocol_id);
+}
+
+/**
+ * scmi_xfer_get() - Allocate one message
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ *
+ * Helper function which is used by various message functions that are
+ * exposed to clients of this driver for allocating a message traffic event.
+ *
+ * This function can sleep depending on pending requests already in the system
+ * for the SCMI entity. Further, this also holds a spinlock to maintain
+ * integrity of internal data structures.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
+ struct scmi_xfers_info *minfo)
+{
+ u16 xfer_id;
+ struct scmi_xfer *xfer;
+ unsigned long flags, bit_pos;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ /* Keep the locked section as small as possible */
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
+ info->desc->max_msg);
+ if (bit_pos == info->desc->max_msg) {
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+ return ERR_PTR(-ENOMEM);
+ }
+ set_bit(bit_pos, minfo->xfer_alloc_table);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ xfer_id = bit_pos;
+
+ xfer = &minfo->xfer_block[xfer_id];
+ xfer->hdr.seq = xfer_id;
+ reinit_completion(&xfer->done);
+ xfer->transfer_id = atomic_inc_return(&transfer_last_id);
+
+ return xfer;
+}
+
+/**
+ * __scmi_xfer_put() - Release a message
+ *
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ * @xfer: message that was reserved by scmi_xfer_get
+ *
+ * This holds a spinlock to maintain integrity of internal data structures.
+ */
+static void
+__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
+{
+ unsigned long flags;
+
+ /*
+ * Keep the locked section as small as possible
+ * NOTE: we might escape with smp_mb and no lock here..
+ * but just be conservative and symmetric.
+ */
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+}
+
+static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
+{
+ struct scmi_xfer *xfer;
+ struct device *dev = cinfo->dev;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_xfers_info *minfo = &info->rx_minfo;
+ ktime_t ts;
+
+ ts = ktime_get_boottime();
+ xfer = scmi_xfer_get(cinfo->handle, minfo);
+ if (IS_ERR(xfer)) {
+ dev_err(dev, "failed to get free message slot (%ld)\n",
+ PTR_ERR(xfer));
+ info->desc->ops->clear_channel(cinfo);
+ return;
+ }
+
+ unpack_scmi_header(msg_hdr, &xfer->hdr);
+ scmi_dump_header_dbg(dev, &xfer->hdr);
+ info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
+ xfer);
+ scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
+ xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
+
+ trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ MSG_TYPE_NOTIFICATION);
+
+ __scmi_xfer_put(minfo, xfer);
+
+ info->desc->ops->clear_channel(cinfo);
+}
+
+static void scmi_handle_response(struct scmi_chan_info *cinfo,
+ u16 xfer_id, u8 msg_type)
+{
+ struct scmi_xfer *xfer;
+ struct device *dev = cinfo->dev;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_xfers_info *minfo = &info->tx_minfo;
+
+ /* Are we even expecting this? */
+ if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
+ dev_err(dev, "message for %d is not expected!\n", xfer_id);
+ info->desc->ops->clear_channel(cinfo);
+ return;
+ }
+
+ xfer = &minfo->xfer_block[xfer_id];
+ /*
+ * Even if a response was indeed expected on this slot at this point,
+ * a buggy platform could wrongly reply feeding us an unexpected
+ * delayed response we're not prepared to handle: bail-out safely
+ * blaming firmware.
+ */
+ if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
+ dev_err(dev,
+ "Delayed Response for %d not expected! Buggy F/W ?\n",
+ xfer_id);
+ info->desc->ops->clear_channel(cinfo);
+ /* It was unexpected, so nobody will clear the xfer if not us */
+ __scmi_xfer_put(minfo, xfer);
+ return;
+ }
+
+ /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
+ if (msg_type == MSG_TYPE_DELAYED_RESP)
+ xfer->rx.len = info->desc->max_msg_size;
+
+ scmi_dump_header_dbg(dev, &xfer->hdr);
+
+ info->desc->ops->fetch_response(cinfo, xfer);
+
+ trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ msg_type);
+
+ if (msg_type == MSG_TYPE_DELAYED_RESP) {
+ info->desc->ops->clear_channel(cinfo);
+ complete(xfer->async_done);
+ } else {
+ complete(&xfer->done);
+ }
+}
+
+/**
+ * scmi_rx_callback() - callback for receiving messages
+ *
+ * @cinfo: SCMI channel info
+ * @msg_hdr: Message header
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
+{
+ u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
+ u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
+
+ switch (msg_type) {
+ case MSG_TYPE_NOTIFICATION:
+ scmi_handle_notification(cinfo, msg_hdr);
+ break;
+ case MSG_TYPE_COMMAND:
+ case MSG_TYPE_DELAYED_RESP:
+ scmi_handle_response(cinfo, xfer_id, msg_type);
+ break;
+ default:
+ WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
+ break;
+ }
+}
+
+/**
+ * scmi_xfer_put() - Release a transmit message
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: message that was reserved by scmi_xfer_get
+ */
+void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ __scmi_xfer_put(&info->tx_minfo, xfer);
+}
+
+#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
+
+static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer, ktime_t stop)
+{
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+
+ return info->desc->ops->poll_done(cinfo, xfer) ||
+ ktime_after(ktime_get(), stop);
+}
+
+/**
+ * scmi_do_xfer() - Do one transfer
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no response, if transmit error,
+ * return corresponding error, else if all goes well,
+ * return 0.
+ */
+int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+{
+ int ret;
+ int timeout;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+ struct device *dev = info->dev;
+ struct scmi_chan_info *cinfo;
+
+ cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
+ if (unlikely(!cinfo))
+ return -EINVAL;
+
+ trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ xfer->hdr.poll_completion);
+
+ ret = info->desc->ops->send_message(cinfo, xfer);
+ if (ret < 0) {
+ dev_dbg(dev, "Failed to send message %d\n", ret);
+ return ret;
+ }
+
+ if (xfer->hdr.poll_completion) {
+ ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
+
+ spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
+
+ if (ktime_before(ktime_get(), stop))
+ info->desc->ops->fetch_response(cinfo, xfer);
+ else
+ ret = -ETIMEDOUT;
+ } else {
+ /* And we wait for the response. */
+ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
+ if (!wait_for_completion_timeout(&xfer->done, timeout)) {
+ dev_err(dev, "timed out in resp(caller: %pS)\n",
+ (void *)_RET_IP_);
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ if (!ret && xfer->hdr.status)
+ ret = scmi_to_linux_errno(xfer->hdr.status);
+
+ if (info->desc->ops->mark_txdone)
+ info->desc->ops->mark_txdone(cinfo, ret);
+
+ trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq, ret);
+
+ return ret;
+}
+
+void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ xfer->rx.len = info->desc->max_msg_size;
+}
+
+#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
+
+/**
+ * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
+ * response is received
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
+ * return corresponding error, else if all goes well, return 0.
+ */
+int scmi_do_xfer_with_response(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer)
+{
+ int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
+ DECLARE_COMPLETION_ONSTACK(async_response);
+
+ xfer->async_done = &async_response;
+
+ ret = scmi_do_xfer(handle, xfer);
+ if (!ret) {
+ if (!wait_for_completion_timeout(xfer->async_done, timeout))
+ ret = -ETIMEDOUT;
+ else if (xfer->hdr.status)
+ ret = scmi_to_linux_errno(xfer->hdr.status);
+ }
+
+ xfer->async_done = NULL;
+ return ret;
+}
+
+/**
+ * scmi_xfer_get_init() - Allocate and initialise one message for transmit
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @msg_id: Message identifier
+ * @prot_id: Protocol identifier for the message
+ * @tx_size: transmit message size
+ * @rx_size: receive message size
+ * @p: pointer to the allocated and initialised message
+ *
+ * This function allocates the message using @scmi_xfer_get and
+ * initialise the header.
+ *
+ * Return: 0 if all went fine with @p pointing to message, else
+ * corresponding error.
+ */
+int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
+ size_t tx_size, size_t rx_size, struct scmi_xfer **p)
+{
+ int ret;
+ struct scmi_xfer *xfer;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+ struct scmi_xfers_info *minfo = &info->tx_minfo;
+ struct device *dev = info->dev;
+
+ /* Ensure we have sane transfer sizes */
+ if (rx_size > info->desc->max_msg_size ||
+ tx_size > info->desc->max_msg_size)
+ return -ERANGE;
+
+ xfer = scmi_xfer_get(handle, minfo);
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "failed to get free message slot(%d)\n", ret);
+ return ret;
+ }
+
+ xfer->tx.len = tx_size;
+ xfer->rx.len = rx_size ? : info->desc->max_msg_size;
+ xfer->hdr.id = msg_id;
+ xfer->hdr.protocol_id = prot_id;
+ xfer->hdr.poll_completion = false;
+
+ *p = xfer;
+
+ return 0;
+}
+
+/**
+ * scmi_version_get() - command to get the revision of the SCMI entity
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @protocol: Protocol identifier for the message
+ * @version: Holds returned version of protocol.
+ *
+ * Updates the SCMI information in the internal data structure.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
+ u32 *version)
+{
+ int ret;
+ __le32 *rev_info;
+ struct scmi_xfer *t;
+
+ ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
+ sizeof(*version), &t);
+ if (ret)
+ return ret;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ rev_info = t->rx.buf;
+ *version = le32_to_cpu(*rev_info);
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
+ u8 *prot_imp)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ info->protocols_imp = prot_imp;
+}
+
+static bool
+scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
+{
+ int i;
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ if (!info->protocols_imp)
+ return false;
+
+ for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
+ if (info->protocols_imp[i] == prot_id)
+ return true;
+ return false;
+}
+
+/**
+ * scmi_handle_get() - Get the SCMI handle for a device
+ *
+ * @dev: pointer to device for which we want SCMI handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of SCMI protocol library.
+ * scmi_handle_put must be balanced with successful scmi_handle_get
+ *
+ * Return: pointer to handle if successful, NULL on error
+ */
+struct scmi_handle *scmi_handle_get(struct device *dev)
+{
+ struct list_head *p;
+ struct scmi_info *info;
+ struct scmi_handle *handle = NULL;
+
+ mutex_lock(&scmi_list_mutex);
+ list_for_each(p, &scmi_list) {
+ info = list_entry(p, struct scmi_info, node);
+ if (dev->parent == info->dev) {
+ handle = &info->handle;
+ info->users++;
+ break;
+ }
+ }
+ mutex_unlock(&scmi_list_mutex);
+
+ return handle;
+}
+
+/**
+ * scmi_handle_put() - Release the handle acquired by scmi_handle_get
+ *
+ * @handle: handle acquired by scmi_handle_get
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of SCMI protocol library.
+ * scmi_handle_put must be balanced with successful scmi_handle_get
+ *
+ * Return: 0 is successfully released
+ * if null was passed, it returns -EINVAL;
+ */
+int scmi_handle_put(const struct scmi_handle *handle)
+{
+ struct scmi_info *info;
+
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_scmi_info(handle);
+ mutex_lock(&scmi_list_mutex);
+ if (!WARN_ON(!info->users))
+ info->users--;
+ mutex_unlock(&scmi_list_mutex);
+
+ return 0;
+}
+
+static int __scmi_xfer_info_init(struct scmi_info *sinfo,
+ struct scmi_xfers_info *info)
+{
+ int i;
+ struct scmi_xfer *xfer;
+ struct device *dev = sinfo->dev;
+ const struct scmi_desc *desc = sinfo->desc;
+
+ /* Pre-allocated messages, no more than what hdr.seq can support */
+ if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
+ dev_err(dev,
+ "Invalid maximum messages %d, not in range [1 - %lu]\n",
+ desc->max_msg, MSG_TOKEN_MAX);
+ return -EINVAL;
+ }
+
+ info->xfer_block = devm_kcalloc(dev, desc->max_msg,
+ sizeof(*info->xfer_block), GFP_KERNEL);
+ if (!info->xfer_block)
+ return -ENOMEM;
+
+ info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
+ sizeof(long), GFP_KERNEL);
+ if (!info->xfer_alloc_table)
+ return -ENOMEM;
+
+ /* Pre-initialize the buffer pointer to pre-allocated buffers */
+ for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
+ xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
+ GFP_KERNEL);
+ if (!xfer->rx.buf)
+ return -ENOMEM;
+
+ xfer->tx.buf = xfer->rx.buf;
+ init_completion(&xfer->done);
+ }
+
+ spin_lock_init(&info->xfer_lock);
+
+ return 0;
+}
+
+static int scmi_xfer_info_init(struct scmi_info *sinfo)
+{
+ int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
+
+ if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
+ ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
+
+ return ret;
+}
+
+static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
+ int prot_id, bool tx)
+{
+ int ret, idx;
+ struct scmi_chan_info *cinfo;
+ struct idr *idr;
+
+ /* Transmit channel is first entry i.e. index 0 */
+ idx = tx ? 0 : 1;
+ idr = tx ? &info->tx_idr : &info->rx_idr;
+
+ /* check if already allocated, used for multiple device per protocol */
+ cinfo = idr_find(idr, prot_id);
+ if (cinfo)
+ return 0;
+
+ if (!info->desc->ops->chan_available(dev, idx)) {
+ cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
+ if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
+ return -EINVAL;
+ goto idr_alloc;
+ }
+
+ cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
+ if (!cinfo)
+ return -ENOMEM;
+
+ cinfo->dev = dev;
+
+ ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
+ if (ret)
+ return ret;
+
+idr_alloc:
+ ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
+ if (ret != prot_id) {
+ dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
+ return ret;
+ }
+
+ cinfo->handle = &info->handle;
+ return 0;
+}
+
+static inline int
+scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
+{
+ int ret = scmi_chan_setup(info, dev, prot_id, true);
+
+ if (!ret) {
+ /* Rx is optional, report only memory errors */
+ ret = scmi_chan_setup(info, dev, prot_id, false);
+ if (ret && ret != -ENOMEM)
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static inline void
+scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
+ int prot_id, const char *name)
+{
+ struct scmi_device *sdev;
+
+ sdev = scmi_device_create(np, info->dev, prot_id, name);
+ if (!sdev) {
+ dev_err(info->dev, "failed to create %d protocol device\n",
+ prot_id);
+ return;
+ }
+
+ if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
+ dev_err(&sdev->dev, "failed to setup transport\n");
+ scmi_device_destroy(sdev);
+ return;
+ }
+
+ /* setup handle now as the transport is ready */
+ scmi_set_handle(sdev);
+}
+
+#define MAX_SCMI_DEV_PER_PROTOCOL 2
+struct scmi_prot_devnames {
+ int protocol_id;
+ char *names[MAX_SCMI_DEV_PER_PROTOCOL];
+};
+
+static struct scmi_prot_devnames devnames[] = {
+ { SCMI_PROTOCOL_POWER, { "genpd" },},
+ { SCMI_PROTOCOL_SYSTEM, { "syspower" },},
+ { SCMI_PROTOCOL_PERF, { "cpufreq" },},
+ { SCMI_PROTOCOL_CLOCK, { "clocks" },},
+ { SCMI_PROTOCOL_SENSOR, { "hwmon" },},
+ { SCMI_PROTOCOL_RESET, { "reset" },},
+};
+
+static inline void
+scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info,
+ int prot_id)
+{
+ int loop, cnt;
+
+ for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) {
+ if (devnames[loop].protocol_id != prot_id)
+ continue;
+
+ for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) {
+ const char *name = devnames[loop].names[cnt];
+
+ if (name)
+ scmi_create_protocol_device(np, info, prot_id,
+ name);
+ }
+ }
+}
+
+static int scmi_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct scmi_handle *handle;
+ const struct scmi_desc *desc;
+ struct scmi_info *info;
+ struct device *dev = &pdev->dev;
+ struct device_node *child, *np = dev->of_node;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ info->desc = desc;
+ INIT_LIST_HEAD(&info->node);
+
+ platform_set_drvdata(pdev, info);
+ idr_init(&info->tx_idr);
+ idr_init(&info->rx_idr);
+
+ handle = &info->handle;
+ handle->dev = info->dev;
+ handle->version = &info->version;
+
+ ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
+ if (ret)
+ return ret;
+
+ ret = scmi_xfer_info_init(info);
+ if (ret)
+ return ret;
+
+ if (scmi_notification_init(handle))
+ dev_err(dev, "SCMI Notifications NOT available.\n");
+
+ ret = scmi_base_protocol_init(handle);
+ if (ret) {
+ dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&scmi_list_mutex);
+ list_add_tail(&info->node, &scmi_list);
+ mutex_unlock(&scmi_list_mutex);
+
+ for_each_available_child_of_node(np, child) {
+ u32 prot_id;
+
+ if (of_property_read_u32(child, "reg", &prot_id))
+ continue;
+
+ if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
+ dev_err(dev, "Out of range protocol %d\n", prot_id);
+
+ if (!scmi_is_protocol_implemented(handle, prot_id)) {
+ dev_err(dev, "SCMI protocol %d not implemented\n",
+ prot_id);
+ continue;
+ }
+
+ scmi_create_protocol_devices(child, info, prot_id);
+ }
+
+ return 0;
+}
+
+void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
+{
+ idr_remove(idr, id);
+}
+
+static int scmi_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct scmi_info *info = platform_get_drvdata(pdev);
+ struct idr *idr = &info->tx_idr;
+
+ mutex_lock(&scmi_list_mutex);
+ if (info->users)
+ ret = -EBUSY;
+ else
+ list_del(&info->node);
+ mutex_unlock(&scmi_list_mutex);
+
+ if (ret)
+ return ret;
+
+ scmi_notification_exit(&info->handle);
+
+ /* Safe to free channels since no more users */
+ ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
+ idr_destroy(&info->tx_idr);
+
+ idr = &info->rx_idr;
+ ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
+ idr_destroy(&info->rx_idr);
+
+ return ret;
+}
+
+static ssize_t protocol_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u.%u\n", info->version.major_ver,
+ info->version.minor_ver);
+}
+static DEVICE_ATTR_RO(protocol_version);
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%x\n", info->version.impl_ver);
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static ssize_t vendor_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", info->version.vendor_id);
+}
+static DEVICE_ATTR_RO(vendor_id);
+
+static ssize_t sub_vendor_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", info->version.sub_vendor_id);
+}
+static DEVICE_ATTR_RO(sub_vendor_id);
+
+static struct attribute *versions_attrs[] = {
+ &dev_attr_firmware_version.attr,
+ &dev_attr_protocol_version.attr,
+ &dev_attr_vendor_id.attr,
+ &dev_attr_sub_vendor_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(versions);
+
+/* Each compatible listed below must have descriptor associated with it */
+static const struct of_device_id scmi_of_match[] = {
+#ifdef CONFIG_MAILBOX
+ { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
+#endif
+#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
+ { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
+#endif
+ { /* Sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, scmi_of_match);
+
+static struct platform_driver scmi_driver = {
+ .driver = {
+ .name = "arm-scmi",
+ .suppress_bind_attrs = true,
+ .of_match_table = scmi_of_match,
+ .dev_groups = versions_groups,
+ },
+ .probe = scmi_probe,
+ .remove = scmi_remove,
+};
+
+static int __init scmi_driver_init(void)
+{
+ scmi_bus_init();
+
+ scmi_clock_register();
+ scmi_perf_register();
+ scmi_power_register();
+ scmi_reset_register();
+ scmi_sensors_register();
+ scmi_system_register();
+
+ return platform_driver_register(&scmi_driver);
+}
+subsys_initcall(scmi_driver_init);
+
+static void __exit scmi_driver_exit(void)
+{
+ scmi_bus_exit();
+
+ scmi_clock_unregister();
+ scmi_perf_unregister();
+ scmi_power_unregister();
+ scmi_reset_unregister();
+ scmi_sensors_unregister();
+ scmi_system_unregister();
+
+ platform_driver_unregister(&scmi_driver);
+}
+module_exit(scmi_driver_exit);
+
+MODULE_ALIAS("platform:arm-scmi");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
new file mode 100644
index 000000000..ad773a657
--- /dev/null
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Mailbox Transport
+ * driver.
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/mailbox_client.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+/**
+ * struct scmi_mailbox - Structure representing a SCMI mailbox transport
+ *
+ * @cl: Mailbox Client
+ * @chan: Transmit/Receive mailbox channel
+ * @cinfo: SCMI channel info
+ * @shmem: Transmit/Receive shared memory area
+ */
+struct scmi_mailbox {
+ struct mbox_client cl;
+ struct mbox_chan *chan;
+ struct scmi_chan_info *cinfo;
+ struct scmi_shared_mem __iomem *shmem;
+};
+
+#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
+
+static void tx_prepare(struct mbox_client *cl, void *m)
+{
+ struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
+
+ shmem_tx_prepare(smbox->shmem, m);
+}
+
+static void rx_callback(struct mbox_client *cl, void *m)
+{
+ struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
+
+ scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem));
+}
+
+static bool mailbox_chan_available(struct device *dev, int idx)
+{
+ return !of_parse_phandle_with_args(dev->of_node, "mboxes",
+ "#mbox-cells", idx, NULL);
+}
+
+static int mailbox_chan_validate(struct device *cdev)
+{
+ int num_mb, num_sh, ret = 0;
+ struct device_node *np = cdev->of_node;
+
+ num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
+ num_sh = of_count_phandle_with_args(np, "shmem", NULL);
+ /* Bail out if mboxes and shmem descriptors are inconsistent */
+ if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) {
+ dev_warn(cdev, "Invalid channel descriptor for '%s'\n",
+ of_node_full_name(np));
+ return -EINVAL;
+ }
+
+ if (num_sh > 1) {
+ struct device_node *np_tx, *np_rx;
+
+ np_tx = of_parse_phandle(np, "shmem", 0);
+ np_rx = of_parse_phandle(np, "shmem", 1);
+ /* SCMI Tx and Rx shared mem areas have to be distinct */
+ if (!np_tx || !np_rx || np_tx == np_rx) {
+ dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
+ of_node_full_name(np));
+ ret = -EINVAL;
+ }
+
+ of_node_put(np_tx);
+ of_node_put(np_rx);
+ }
+
+ return ret;
+}
+
+static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ bool tx)
+{
+ const char *desc = tx ? "Tx" : "Rx";
+ struct device *cdev = cinfo->dev;
+ struct scmi_mailbox *smbox;
+ struct device_node *shmem;
+ int ret, idx = tx ? 0 : 1;
+ struct mbox_client *cl;
+ resource_size_t size;
+ struct resource res;
+
+ ret = mailbox_chan_validate(cdev);
+ if (ret)
+ return ret;
+
+ smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);
+ if (!smbox)
+ return -ENOMEM;
+
+ shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
+ ret = of_address_to_resource(shmem, 0, &res);
+ of_node_put(shmem);
+ if (ret) {
+ dev_err(cdev, "failed to get SCMI %s shared memory\n", desc);
+ return ret;
+ }
+
+ size = resource_size(&res);
+ smbox->shmem = devm_ioremap(dev, res.start, size);
+ if (!smbox->shmem) {
+ dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
+ return -EADDRNOTAVAIL;
+ }
+
+ cl = &smbox->cl;
+ cl->dev = cdev;
+ cl->tx_prepare = tx ? tx_prepare : NULL;
+ cl->rx_callback = rx_callback;
+ cl->tx_block = false;
+ cl->knows_txdone = tx;
+
+ smbox->chan = mbox_request_channel(cl, tx ? 0 : 1);
+ if (IS_ERR(smbox->chan)) {
+ ret = PTR_ERR(smbox->chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(cdev, "failed to request SCMI %s mailbox\n",
+ tx ? "Tx" : "Rx");
+ return ret;
+ }
+
+ cinfo->transport_info = smbox;
+ smbox->cinfo = cinfo;
+
+ return 0;
+}
+
+static int mailbox_chan_free(int id, void *p, void *data)
+{
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ if (smbox && !IS_ERR(smbox->chan)) {
+ mbox_free_channel(smbox->chan);
+ cinfo->transport_info = NULL;
+ smbox->chan = NULL;
+ smbox->cinfo = NULL;
+ }
+
+ scmi_free_channel(cinfo, data, id);
+
+ return 0;
+}
+
+static int mailbox_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+ int ret;
+
+ ret = mbox_send_message(smbox->chan, xfer);
+
+ /* mbox_send_message returns non-negative value on success, so reset */
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ /*
+ * NOTE: we might prefer not to need the mailbox ticker to manage the
+ * transfer queueing since the protocol layer queues things by itself.
+ * Unfortunately, we have to kick the mailbox framework after we have
+ * received our message.
+ */
+ mbox_client_txdone(smbox->chan, ret);
+}
+
+static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ shmem_fetch_response(smbox->shmem, xfer);
+}
+
+static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
+ size_t max_len, struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ shmem_fetch_notification(smbox->shmem, max_len, xfer);
+}
+
+static void mailbox_clear_channel(struct scmi_chan_info *cinfo)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ shmem_clear_channel(smbox->shmem);
+}
+
+static bool
+mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ return shmem_poll_done(smbox->shmem, xfer);
+}
+
+static const struct scmi_transport_ops scmi_mailbox_ops = {
+ .chan_available = mailbox_chan_available,
+ .chan_setup = mailbox_chan_setup,
+ .chan_free = mailbox_chan_free,
+ .send_message = mailbox_send_message,
+ .mark_txdone = mailbox_mark_txdone,
+ .fetch_response = mailbox_fetch_response,
+ .fetch_notification = mailbox_fetch_notification,
+ .clear_channel = mailbox_clear_channel,
+ .poll_done = mailbox_poll_done,
+};
+
+const struct scmi_desc scmi_mailbox_desc = {
+ .ops = &scmi_mailbox_ops,
+ .max_rx_timeout_ms = 30, /* We may increase this if required */
+ .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
+ .max_msg_size = 128,
+};
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
new file mode 100644
index 000000000..66196b293
--- /dev/null
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -0,0 +1,1532 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Notification support
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ */
+/**
+ * DOC: Theory of operation
+ *
+ * SCMI Protocol specification allows the platform to signal events to
+ * interested agents via notification messages: this is an implementation
+ * of the dispatch and delivery of such notifications to the interested users
+ * inside the Linux kernel.
+ *
+ * An SCMI Notification core instance is initialized for each active platform
+ * instance identified by the means of the usual &struct scmi_handle.
+ *
+ * Each SCMI Protocol implementation, during its initialization, registers with
+ * this core its set of supported events using scmi_register_protocol_events():
+ * all the needed descriptors are stored in the &struct registered_protocols and
+ * &struct registered_events arrays.
+ *
+ * Kernel users interested in some specific event can register their callbacks
+ * providing the usual notifier_block descriptor, since this core implements
+ * events' delivery using the standard Kernel notification chains machinery.
+ *
+ * Given the number of possible events defined by SCMI and the extensibility
+ * of the SCMI Protocol itself, the underlying notification chains are created
+ * and destroyed dynamically on demand depending on the number of users
+ * effectively registered for an event, so that no support structures or chains
+ * are allocated until at least one user has registered a notifier_block for
+ * such event. Similarly, events' generation itself is enabled at the platform
+ * level only after at least one user has registered, and it is shutdown after
+ * the last user for that event has gone.
+ *
+ * All users provided callbacks and allocated notification-chains are stored in
+ * the @registered_events_handlers hashtable. Callbacks' registration requests
+ * for still to be registered events are instead kept in the dedicated common
+ * hashtable @pending_events_handlers.
+ *
+ * An event is identified univocally by the tuple (proto_id, evt_id, src_id)
+ * and is served by its own dedicated notification chain; information contained
+ * in such tuples is used, in a few different ways, to generate the needed
+ * hash-keys.
+ *
+ * Here proto_id and evt_id are simply the protocol_id and message_id numbers
+ * as described in the SCMI Protocol specification, while src_id represents an
+ * optional, protocol dependent, source identifier (like domain_id, perf_id
+ * or sensor_id and so forth).
+ *
+ * Upon reception of a notification message from the platform the SCMI RX ISR
+ * passes the received message payload and some ancillary information (including
+ * an arrival timestamp in nanoseconds) to the core via @scmi_notify() which
+ * pushes the event-data itself on a protocol-dedicated kfifo queue for further
+ * deferred processing as specified in @scmi_events_dispatcher().
+ *
+ * Each protocol has it own dedicated work_struct and worker which, once kicked
+ * by the ISR, takes care to empty its own dedicated queue, deliverying the
+ * queued items into the proper notification-chain: notifications processing can
+ * proceed concurrently on distinct workers only between events belonging to
+ * different protocols while delivery of events within the same protocol is
+ * still strictly sequentially ordered by time of arrival.
+ *
+ * Events' information is then extracted from the SCMI Notification messages and
+ * conveyed, converted into a custom per-event report struct, as the void *data
+ * param to the user callback provided by the registered notifier_block, so that
+ * from the user perspective his callback will look invoked like:
+ *
+ * int user_cb(struct notifier_block *nb, unsigned long event_id, void *report)
+ *
+ */
+
+#define dev_fmt(fmt) "SCMI Notifications - " fmt
+#define pr_fmt(fmt) "SCMI Notifications - " fmt
+
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/kfifo.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/refcount.h>
+#include <linux/scmi_protocol.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "notify.h"
+
+#define SCMI_MAX_PROTO 256
+
+#define PROTO_ID_MASK GENMASK(31, 24)
+#define EVT_ID_MASK GENMASK(23, 16)
+#define SRC_ID_MASK GENMASK(15, 0)
+
+/*
+ * Builds an unsigned 32bit key from the given input tuple to be used
+ * as a key in hashtables.
+ */
+#define MAKE_HASH_KEY(p, e, s) \
+ (FIELD_PREP(PROTO_ID_MASK, (p)) | \
+ FIELD_PREP(EVT_ID_MASK, (e)) | \
+ FIELD_PREP(SRC_ID_MASK, (s)))
+
+#define MAKE_ALL_SRCS_KEY(p, e) MAKE_HASH_KEY((p), (e), SRC_ID_MASK)
+
+/*
+ * Assumes that the stored obj includes its own hash-key in a field named 'key':
+ * with this simplification this macro can be equally used for all the objects'
+ * types hashed by this implementation.
+ *
+ * @__ht: The hashtable name
+ * @__obj: A pointer to the object type to be retrieved from the hashtable;
+ * it will be used as a cursor while scanning the hastable and it will
+ * be possibly left as NULL when @__k is not found
+ * @__k: The key to search for
+ */
+#define KEY_FIND(__ht, __obj, __k) \
+({ \
+ typeof(__k) k_ = __k; \
+ typeof(__obj) obj_; \
+ \
+ hash_for_each_possible((__ht), obj_, hash, k_) \
+ if (obj_->key == k_) \
+ break; \
+ __obj = obj_; \
+})
+
+#define KEY_XTRACT_PROTO_ID(key) FIELD_GET(PROTO_ID_MASK, (key))
+#define KEY_XTRACT_EVT_ID(key) FIELD_GET(EVT_ID_MASK, (key))
+#define KEY_XTRACT_SRC_ID(key) FIELD_GET(SRC_ID_MASK, (key))
+
+/*
+ * A set of macros used to access safely @registered_protocols and
+ * @registered_events arrays; these are fixed in size and each entry is possibly
+ * populated at protocols' registration time and then only read but NEVER
+ * modified or removed.
+ */
+#define SCMI_GET_PROTO(__ni, __pid) \
+({ \
+ typeof(__ni) ni_ = __ni; \
+ struct scmi_registered_events_desc *__pd = NULL; \
+ \
+ if (ni_) \
+ __pd = READ_ONCE(ni_->registered_protocols[(__pid)]); \
+ __pd; \
+})
+
+#define SCMI_GET_REVT_FROM_PD(__pd, __eid) \
+({ \
+ typeof(__pd) pd_ = __pd; \
+ typeof(__eid) eid_ = __eid; \
+ struct scmi_registered_event *__revt = NULL; \
+ \
+ if (pd_ && eid_ < pd_->num_events) \
+ __revt = READ_ONCE(pd_->registered_events[eid_]); \
+ __revt; \
+})
+
+#define SCMI_GET_REVT(__ni, __pid, __eid) \
+({ \
+ struct scmi_registered_event *__revt; \
+ struct scmi_registered_events_desc *__pd; \
+ \
+ __pd = SCMI_GET_PROTO((__ni), (__pid)); \
+ __revt = SCMI_GET_REVT_FROM_PD(__pd, (__eid)); \
+ __revt; \
+})
+
+/* A couple of utility macros to limit cruft when calling protocols' helpers */
+#define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state) \
+({ \
+ typeof(revt) r = revt; \
+ r->proto->ops->set_notify_enabled(r->proto->ni->handle, \
+ (eid), (sid), (state)); \
+})
+
+#define REVT_NOTIFY_ENABLE(revt, eid, sid) \
+ REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), true)
+
+#define REVT_NOTIFY_DISABLE(revt, eid, sid) \
+ REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), false)
+
+#define REVT_FILL_REPORT(revt, ...) \
+({ \
+ typeof(revt) r = revt; \
+ r->proto->ops->fill_custom_report(r->proto->ni->handle, \
+ __VA_ARGS__); \
+})
+
+#define SCMI_PENDING_HASH_SZ 4
+#define SCMI_REGISTERED_HASH_SZ 6
+
+struct scmi_registered_events_desc;
+
+/**
+ * struct scmi_notify_instance - Represents an instance of the notification
+ * core
+ * @gid: GroupID used for devres
+ * @handle: A reference to the platform instance
+ * @init_work: A work item to perform final initializations of pending handlers
+ * @notify_wq: A reference to the allocated Kernel cmwq
+ * @pending_mtx: A mutex to protect @pending_events_handlers
+ * @registered_protocols: A statically allocated array containing pointers to
+ * all the registered protocol-level specific information
+ * related to events' handling
+ * @pending_events_handlers: An hashtable containing all pending events'
+ * handlers descriptors
+ *
+ * Each platform instance, represented by a handle, has its own instance of
+ * the notification subsystem represented by this structure.
+ */
+struct scmi_notify_instance {
+ void *gid;
+ struct scmi_handle *handle;
+ struct work_struct init_work;
+ struct workqueue_struct *notify_wq;
+ /* lock to protect pending_events_handlers */
+ struct mutex pending_mtx;
+ struct scmi_registered_events_desc **registered_protocols;
+ DECLARE_HASHTABLE(pending_events_handlers, SCMI_PENDING_HASH_SZ);
+};
+
+/**
+ * struct events_queue - Describes a queue and its associated worker
+ * @sz: Size in bytes of the related kfifo
+ * @kfifo: A dedicated Kernel kfifo descriptor
+ * @notify_work: A custom work item bound to this queue
+ * @wq: A reference to the associated workqueue
+ *
+ * Each protocol has its own dedicated events_queue descriptor.
+ */
+struct events_queue {
+ size_t sz;
+ struct kfifo kfifo;
+ struct work_struct notify_work;
+ struct workqueue_struct *wq;
+};
+
+/**
+ * struct scmi_event_header - A utility header
+ * @timestamp: The timestamp, in nanoseconds (boottime), which was associated
+ * to this event as soon as it entered the SCMI RX ISR
+ * @payld_sz: Effective size of the embedded message payload which follows
+ * @evt_id: Event ID (corresponds to the Event MsgID for this Protocol)
+ * @payld: A reference to the embedded event payload
+ *
+ * This header is prepended to each received event message payload before
+ * queueing it on the related &struct events_queue.
+ */
+struct scmi_event_header {
+ ktime_t timestamp;
+ size_t payld_sz;
+ unsigned char evt_id;
+ unsigned char payld[];
+};
+
+struct scmi_registered_event;
+
+/**
+ * struct scmi_registered_events_desc - Protocol Specific information
+ * @id: Protocol ID
+ * @ops: Protocol specific and event-related operations
+ * @equeue: The embedded per-protocol events_queue
+ * @ni: A reference to the initialized instance descriptor
+ * @eh: A reference to pre-allocated buffer to be used as a scratch area by the
+ * deferred worker when fetching data from the kfifo
+ * @eh_sz: Size of the pre-allocated buffer @eh
+ * @in_flight: A reference to an in flight &struct scmi_registered_event
+ * @num_events: Number of events in @registered_events
+ * @registered_events: A dynamically allocated array holding all the registered
+ * events' descriptors, whose fixed-size is determined at
+ * compile time.
+ * @registered_mtx: A mutex to protect @registered_events_handlers
+ * @registered_events_handlers: An hashtable containing all events' handlers
+ * descriptors registered for this protocol
+ *
+ * All protocols that register at least one event have their protocol-specific
+ * information stored here, together with the embedded allocated events_queue.
+ * These descriptors are stored in the @registered_protocols array at protocol
+ * registration time.
+ *
+ * Once these descriptors are successfully registered, they are NEVER again
+ * removed or modified since protocols do not unregister ever, so that, once
+ * we safely grab a NON-NULL reference from the array we can keep it and use it.
+ */
+struct scmi_registered_events_desc {
+ u8 id;
+ const struct scmi_event_ops *ops;
+ struct events_queue equeue;
+ struct scmi_notify_instance *ni;
+ struct scmi_event_header *eh;
+ size_t eh_sz;
+ void *in_flight;
+ int num_events;
+ struct scmi_registered_event **registered_events;
+ /* mutex to protect registered_events_handlers */
+ struct mutex registered_mtx;
+ DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ);
+};
+
+/**
+ * struct scmi_registered_event - Event Specific Information
+ * @proto: A reference to the associated protocol descriptor
+ * @evt: A reference to the associated event descriptor (as provided at
+ * registration time)
+ * @report: A pre-allocated buffer used by the deferred worker to fill a
+ * customized event report
+ * @num_sources: The number of possible sources for this event as stated at
+ * events' registration time
+ * @sources: A reference to a dynamically allocated array used to refcount the
+ * events' enable requests for all the existing sources
+ * @sources_mtx: A mutex to serialize the access to @sources
+ *
+ * All registered events are represented by one of these structures that are
+ * stored in the @registered_events array at protocol registration time.
+ *
+ * Once these descriptors are successfully registered, they are NEVER again
+ * removed or modified since protocols do not unregister ever, so that once we
+ * safely grab a NON-NULL reference from the table we can keep it and use it.
+ */
+struct scmi_registered_event {
+ struct scmi_registered_events_desc *proto;
+ const struct scmi_event *evt;
+ void *report;
+ u32 num_sources;
+ refcount_t *sources;
+ /* locking to serialize the access to sources */
+ struct mutex sources_mtx;
+};
+
+/**
+ * struct scmi_event_handler - Event handler information
+ * @key: The used hashkey
+ * @users: A reference count for number of active users for this handler
+ * @r_evt: A reference to the associated registered event; when this is NULL
+ * this handler is pending, which means that identifies a set of
+ * callbacks intended to be attached to an event which is still not
+ * known nor registered by any protocol at that point in time
+ * @chain: The notification chain dedicated to this specific event tuple
+ * @hash: The hlist_node used for collision handling
+ * @enabled: A boolean which records if event's generation has been already
+ * enabled for this handler as a whole
+ *
+ * This structure collects all the information needed to process a received
+ * event identified by the tuple (proto_id, evt_id, src_id).
+ * These descriptors are stored in a per-protocol @registered_events_handlers
+ * table using as a key a value derived from that tuple.
+ */
+struct scmi_event_handler {
+ u32 key;
+ refcount_t users;
+ struct scmi_registered_event *r_evt;
+ struct blocking_notifier_head chain;
+ struct hlist_node hash;
+ bool enabled;
+};
+
+#define IS_HNDL_PENDING(hndl) (!(hndl)->r_evt)
+
+static struct scmi_event_handler *
+scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key);
+static void scmi_put_active_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl);
+static void scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl);
+
+/**
+ * scmi_lookup_and_call_event_chain() - Lookup the proper chain and call it
+ * @ni: A reference to the notification instance to use
+ * @evt_key: The key to use to lookup the related notification chain
+ * @report: The customized event-specific report to pass down to the callbacks
+ * as their *data parameter.
+ */
+static inline void
+scmi_lookup_and_call_event_chain(struct scmi_notify_instance *ni,
+ u32 evt_key, void *report)
+{
+ int ret;
+ struct scmi_event_handler *hndl;
+
+ /*
+ * Here ensure the event handler cannot vanish while using it.
+ * It is legitimate, though, for an handler not to be found at all here,
+ * e.g. when it has been unregistered by the user after some events had
+ * already been queued.
+ */
+ hndl = scmi_get_active_handler(ni, evt_key);
+ if (!hndl)
+ return;
+
+ ret = blocking_notifier_call_chain(&hndl->chain,
+ KEY_XTRACT_EVT_ID(evt_key),
+ report);
+ /* Notifiers are NOT supposed to cut the chain ... */
+ WARN_ON_ONCE(ret & NOTIFY_STOP_MASK);
+
+ scmi_put_active_handler(ni, hndl);
+}
+
+/**
+ * scmi_process_event_header() - Dequeue and process an event header
+ * @eq: The queue to use
+ * @pd: The protocol descriptor to use
+ *
+ * Read an event header from the protocol queue into the dedicated scratch
+ * buffer and looks for a matching registered event; in case an anomalously
+ * sized read is detected just flush the queue.
+ *
+ * Return:
+ * * a reference to the matching registered event when found
+ * * ERR_PTR(-EINVAL) when NO registered event could be found
+ * * NULL when the queue is empty
+ */
+static inline struct scmi_registered_event *
+scmi_process_event_header(struct events_queue *eq,
+ struct scmi_registered_events_desc *pd)
+{
+ unsigned int outs;
+ struct scmi_registered_event *r_evt;
+
+ outs = kfifo_out(&eq->kfifo, pd->eh,
+ sizeof(struct scmi_event_header));
+ if (!outs)
+ return NULL;
+ if (outs != sizeof(struct scmi_event_header)) {
+ dev_err(pd->ni->handle->dev, "corrupted EVT header. Flush.\n");
+ kfifo_reset_out(&eq->kfifo);
+ return NULL;
+ }
+
+ r_evt = SCMI_GET_REVT_FROM_PD(pd, pd->eh->evt_id);
+ if (!r_evt)
+ r_evt = ERR_PTR(-EINVAL);
+
+ return r_evt;
+}
+
+/**
+ * scmi_process_event_payload() - Dequeue and process an event payload
+ * @eq: The queue to use
+ * @pd: The protocol descriptor to use
+ * @r_evt: The registered event descriptor to use
+ *
+ * Read an event payload from the protocol queue into the dedicated scratch
+ * buffer, fills a custom report and then look for matching event handlers and
+ * call them; skip any unknown event (as marked by scmi_process_event_header())
+ * and in case an anomalously sized read is detected just flush the queue.
+ *
+ * Return: False when the queue is empty
+ */
+static inline bool
+scmi_process_event_payload(struct events_queue *eq,
+ struct scmi_registered_events_desc *pd,
+ struct scmi_registered_event *r_evt)
+{
+ u32 src_id, key;
+ unsigned int outs;
+ void *report = NULL;
+
+ outs = kfifo_out(&eq->kfifo, pd->eh->payld, pd->eh->payld_sz);
+ if (!outs)
+ return false;
+
+ /* Any in-flight event has now been officially processed */
+ pd->in_flight = NULL;
+
+ if (outs != pd->eh->payld_sz) {
+ dev_err(pd->ni->handle->dev, "corrupted EVT Payload. Flush.\n");
+ kfifo_reset_out(&eq->kfifo);
+ return false;
+ }
+
+ if (IS_ERR(r_evt)) {
+ dev_warn(pd->ni->handle->dev,
+ "SKIP UNKNOWN EVT - proto:%X evt:%d\n",
+ pd->id, pd->eh->evt_id);
+ return true;
+ }
+
+ report = REVT_FILL_REPORT(r_evt, pd->eh->evt_id, pd->eh->timestamp,
+ pd->eh->payld, pd->eh->payld_sz,
+ r_evt->report, &src_id);
+ if (!report) {
+ dev_err(pd->ni->handle->dev,
+ "report not available - proto:%X evt:%d\n",
+ pd->id, pd->eh->evt_id);
+ return true;
+ }
+
+ /* At first search for a generic ALL src_ids handler... */
+ key = MAKE_ALL_SRCS_KEY(pd->id, pd->eh->evt_id);
+ scmi_lookup_and_call_event_chain(pd->ni, key, report);
+
+ /* ...then search for any specific src_id */
+ key = MAKE_HASH_KEY(pd->id, pd->eh->evt_id, src_id);
+ scmi_lookup_and_call_event_chain(pd->ni, key, report);
+
+ return true;
+}
+
+/**
+ * scmi_events_dispatcher() - Common worker logic for all work items.
+ * @work: The work item to use, which is associated to a dedicated events_queue
+ *
+ * Logic:
+ * 1. dequeue one pending RX notification (queued in SCMI RX ISR context)
+ * 2. generate a custom event report from the received event message
+ * 3. lookup for any registered ALL_SRC_IDs handler:
+ * - > call the related notification chain passing in the report
+ * 4. lookup for any registered specific SRC_ID handler:
+ * - > call the related notification chain passing in the report
+ *
+ * Note that:
+ * * a dedicated per-protocol kfifo queue is used: in this way an anomalous
+ * flood of events cannot saturate other protocols' queues.
+ * * each per-protocol queue is associated to a distinct work_item, which
+ * means, in turn, that:
+ * + all protocols can process their dedicated queues concurrently
+ * (since notify_wq:max_active != 1)
+ * + anyway at most one worker instance is allowed to run on the same queue
+ * concurrently: this ensures that we can have only one concurrent
+ * reader/writer on the associated kfifo, so that we can use it lock-less
+ *
+ * Context: Process context.
+ */
+static void scmi_events_dispatcher(struct work_struct *work)
+{
+ struct events_queue *eq;
+ struct scmi_registered_events_desc *pd;
+ struct scmi_registered_event *r_evt;
+
+ eq = container_of(work, struct events_queue, notify_work);
+ pd = container_of(eq, struct scmi_registered_events_desc, equeue);
+ /*
+ * In order to keep the queue lock-less and the number of memcopies
+ * to the bare minimum needed, the dispatcher accounts for the
+ * possibility of per-protocol in-flight events: i.e. an event whose
+ * reception could end up being split across two subsequent runs of this
+ * worker, first the header, then the payload.
+ */
+ do {
+ if (!pd->in_flight) {
+ r_evt = scmi_process_event_header(eq, pd);
+ if (!r_evt)
+ break;
+ pd->in_flight = r_evt;
+ } else {
+ r_evt = pd->in_flight;
+ }
+ } while (scmi_process_event_payload(eq, pd, r_evt));
+}
+
+/**
+ * scmi_notify() - Queues a notification for further deferred processing
+ * @handle: The handle identifying the platform instance from which the
+ * dispatched event is generated
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID (msgID)
+ * @buf: Event Message Payload (without the header)
+ * @len: Event Message Payload size
+ * @ts: RX Timestamp in nanoseconds (boottime)
+ *
+ * Context: Called in interrupt context to queue a received event for
+ * deferred processing.
+ *
+ * Return: 0 on Success
+ */
+int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
+ const void *buf, size_t len, ktime_t ts)
+{
+ struct scmi_registered_event *r_evt;
+ struct scmi_event_header eh;
+ struct scmi_notify_instance *ni;
+
+ /* Ensure notify_priv is updated */
+ smp_rmb();
+ if (!handle->notify_priv)
+ return 0;
+ ni = handle->notify_priv;
+
+ r_evt = SCMI_GET_REVT(ni, proto_id, evt_id);
+ if (!r_evt)
+ return -EINVAL;
+
+ if (len > r_evt->evt->max_payld_sz) {
+ dev_err(handle->dev, "discard badly sized message\n");
+ return -EINVAL;
+ }
+ if (kfifo_avail(&r_evt->proto->equeue.kfifo) < sizeof(eh) + len) {
+ dev_warn(handle->dev,
+ "queue full, dropping proto_id:%d evt_id:%d ts:%lld\n",
+ proto_id, evt_id, ktime_to_ns(ts));
+ return -ENOMEM;
+ }
+
+ eh.timestamp = ts;
+ eh.evt_id = evt_id;
+ eh.payld_sz = len;
+ /*
+ * Header and payload are enqueued with two distinct kfifo_in() (so non
+ * atomic), but this situation is handled properly on the consumer side
+ * with in-flight events tracking.
+ */
+ kfifo_in(&r_evt->proto->equeue.kfifo, &eh, sizeof(eh));
+ kfifo_in(&r_evt->proto->equeue.kfifo, buf, len);
+ /*
+ * Don't care about return value here since we just want to ensure that
+ * a work is queued all the times whenever some items have been pushed
+ * on the kfifo:
+ * - if work was already queued it will simply fail to queue a new one
+ * since it is not needed
+ * - if work was not queued already it will be now, even in case work
+ * was in fact already running: this behavior avoids any possible race
+ * when this function pushes new items onto the kfifos after the
+ * related executing worker had already determined the kfifo to be
+ * empty and it was terminating.
+ */
+ queue_work(r_evt->proto->equeue.wq,
+ &r_evt->proto->equeue.notify_work);
+
+ return 0;
+}
+
+/**
+ * scmi_kfifo_free() - Devres action helper to free the kfifo
+ * @kfifo: The kfifo to free
+ */
+static void scmi_kfifo_free(void *kfifo)
+{
+ kfifo_free((struct kfifo *)kfifo);
+}
+
+/**
+ * scmi_initialize_events_queue() - Allocate/Initialize a kfifo buffer
+ * @ni: A reference to the notification instance to use
+ * @equeue: The events_queue to initialize
+ * @sz: Size of the kfifo buffer to allocate
+ *
+ * Allocate a buffer for the kfifo and initialize it.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_initialize_events_queue(struct scmi_notify_instance *ni,
+ struct events_queue *equeue, size_t sz)
+{
+ int ret;
+
+ if (kfifo_alloc(&equeue->kfifo, sz, GFP_KERNEL))
+ return -ENOMEM;
+ /* Size could have been roundup to power-of-two */
+ equeue->sz = kfifo_size(&equeue->kfifo);
+
+ ret = devm_add_action_or_reset(ni->handle->dev, scmi_kfifo_free,
+ &equeue->kfifo);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&equeue->notify_work, scmi_events_dispatcher);
+ equeue->wq = ni->notify_wq;
+
+ return ret;
+}
+
+/**
+ * scmi_allocate_registered_events_desc() - Allocate a registered events'
+ * descriptor
+ * @ni: A reference to the &struct scmi_notify_instance notification instance
+ * to use
+ * @proto_id: Protocol ID
+ * @queue_sz: Size of the associated queue to allocate
+ * @eh_sz: Size of the event header scratch area to pre-allocate
+ * @num_events: Number of events to support (size of @registered_events)
+ * @ops: Pointer to a struct holding references to protocol specific helpers
+ * needed during events handling
+ *
+ * It is supposed to be called only once for each protocol at protocol
+ * initialization time, so it warns if the requested protocol is found already
+ * registered.
+ *
+ * Return: The allocated and registered descriptor on Success
+ */
+static struct scmi_registered_events_desc *
+scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni,
+ u8 proto_id, size_t queue_sz, size_t eh_sz,
+ int num_events,
+ const struct scmi_event_ops *ops)
+{
+ int ret;
+ struct scmi_registered_events_desc *pd;
+
+ /* Ensure protocols are up to date */
+ smp_rmb();
+ if (WARN_ON(ni->registered_protocols[proto_id]))
+ return ERR_PTR(-EINVAL);
+
+ pd = devm_kzalloc(ni->handle->dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+ pd->id = proto_id;
+ pd->ops = ops;
+ pd->ni = ni;
+
+ ret = scmi_initialize_events_queue(ni, &pd->equeue, queue_sz);
+ if (ret)
+ return ERR_PTR(ret);
+
+ pd->eh = devm_kzalloc(ni->handle->dev, eh_sz, GFP_KERNEL);
+ if (!pd->eh)
+ return ERR_PTR(-ENOMEM);
+ pd->eh_sz = eh_sz;
+
+ pd->registered_events = devm_kcalloc(ni->handle->dev, num_events,
+ sizeof(char *), GFP_KERNEL);
+ if (!pd->registered_events)
+ return ERR_PTR(-ENOMEM);
+ pd->num_events = num_events;
+
+ /* Initialize per protocol handlers table */
+ mutex_init(&pd->registered_mtx);
+ hash_init(pd->registered_events_handlers);
+
+ return pd;
+}
+
+/**
+ * scmi_register_protocol_events() - Register Protocol Events with the core
+ * @handle: The handle identifying the platform instance against which the
+ * the protocol's events are registered
+ * @proto_id: Protocol ID
+ * @queue_sz: Size in bytes of the associated queue to be allocated
+ * @ops: Protocol specific event-related operations
+ * @evt: Event descriptor array
+ * @num_events: Number of events in @evt array
+ * @num_sources: Number of possible sources for this protocol on this
+ * platform.
+ *
+ * Used by SCMI Protocols initialization code to register with the notification
+ * core the list of supported events and their descriptors: takes care to
+ * pre-allocate and store all needed descriptors, scratch buffers and event
+ * queues.
+ *
+ * Return: 0 on Success
+ */
+int scmi_register_protocol_events(const struct scmi_handle *handle,
+ u8 proto_id, size_t queue_sz,
+ const struct scmi_event_ops *ops,
+ const struct scmi_event *evt, int num_events,
+ int num_sources)
+{
+ int i;
+ size_t payld_sz = 0;
+ struct scmi_registered_events_desc *pd;
+ struct scmi_notify_instance *ni;
+
+ if (!ops || !evt)
+ return -EINVAL;
+
+ /* Ensure notify_priv is updated */
+ smp_rmb();
+ if (!handle->notify_priv)
+ return -ENOMEM;
+ ni = handle->notify_priv;
+
+ /* Attach to the notification main devres group */
+ if (!devres_open_group(ni->handle->dev, ni->gid, GFP_KERNEL))
+ return -ENOMEM;
+
+ for (i = 0; i < num_events; i++)
+ payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz);
+ payld_sz += sizeof(struct scmi_event_header);
+
+ pd = scmi_allocate_registered_events_desc(ni, proto_id, queue_sz,
+ payld_sz, num_events, ops);
+ if (IS_ERR(pd))
+ goto err;
+
+ for (i = 0; i < num_events; i++, evt++) {
+ struct scmi_registered_event *r_evt;
+
+ r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt),
+ GFP_KERNEL);
+ if (!r_evt)
+ goto err;
+ r_evt->proto = pd;
+ r_evt->evt = evt;
+
+ r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources,
+ sizeof(refcount_t), GFP_KERNEL);
+ if (!r_evt->sources)
+ goto err;
+ r_evt->num_sources = num_sources;
+ mutex_init(&r_evt->sources_mtx);
+
+ r_evt->report = devm_kzalloc(ni->handle->dev,
+ evt->max_report_sz, GFP_KERNEL);
+ if (!r_evt->report)
+ goto err;
+
+ pd->registered_events[i] = r_evt;
+ /* Ensure events are updated */
+ smp_wmb();
+ dev_dbg(handle->dev, "registered event - %lX\n",
+ MAKE_ALL_SRCS_KEY(r_evt->proto->id, r_evt->evt->id));
+ }
+
+ /* Register protocol and events...it will never be removed */
+ ni->registered_protocols[proto_id] = pd;
+ /* Ensure protocols are updated */
+ smp_wmb();
+
+ devres_close_group(ni->handle->dev, ni->gid);
+
+ /*
+ * Finalize any pending events' handler which could have been waiting
+ * for this protocol's events registration.
+ */
+ schedule_work(&ni->init_work);
+
+ return 0;
+
+err:
+ dev_warn(handle->dev, "Proto:%X - Registration Failed !\n", proto_id);
+ /* A failing protocol registration does not trigger full failure */
+ devres_close_group(ni->handle->dev, ni->gid);
+
+ return -ENOMEM;
+}
+
+/**
+ * scmi_allocate_event_handler() - Allocate Event handler
+ * @ni: A reference to the notification instance to use
+ * @evt_key: 32bit key uniquely bind to the event identified by the tuple
+ * (proto_id, evt_id, src_id)
+ *
+ * Allocate an event handler and related notification chain associated with
+ * the provided event handler key.
+ * Note that, at this point, a related registered_event is still to be
+ * associated to this handler descriptor (hndl->r_evt == NULL), so the handler
+ * is initialized as pending.
+ *
+ * Context: Assumes to be called with @pending_mtx already acquired.
+ * Return: the freshly allocated structure on Success
+ */
+static struct scmi_event_handler *
+scmi_allocate_event_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+ struct scmi_event_handler *hndl;
+
+ hndl = kzalloc(sizeof(*hndl), GFP_KERNEL);
+ if (!hndl)
+ return NULL;
+ hndl->key = evt_key;
+ BLOCKING_INIT_NOTIFIER_HEAD(&hndl->chain);
+ refcount_set(&hndl->users, 1);
+ /* New handlers are created pending */
+ hash_add(ni->pending_events_handlers, &hndl->hash, hndl->key);
+
+ return hndl;
+}
+
+/**
+ * scmi_free_event_handler() - Free the provided Event handler
+ * @hndl: The event handler structure to free
+ *
+ * Context: Assumes to be called with proper locking acquired depending
+ * on the situation.
+ */
+static void scmi_free_event_handler(struct scmi_event_handler *hndl)
+{
+ hash_del(&hndl->hash);
+ kfree(hndl);
+}
+
+/**
+ * scmi_bind_event_handler() - Helper to attempt binding an handler to an event
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to bind
+ *
+ * If an associated registered event is found, move the handler from the pending
+ * into the registered table.
+ *
+ * Context: Assumes to be called with @pending_mtx already acquired.
+ *
+ * Return: 0 on Success
+ */
+static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ struct scmi_registered_event *r_evt;
+
+ r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(hndl->key),
+ KEY_XTRACT_EVT_ID(hndl->key));
+ if (!r_evt)
+ return -EINVAL;
+
+ /* Remove from pending and insert into registered */
+ hash_del(&hndl->hash);
+ hndl->r_evt = r_evt;
+ mutex_lock(&r_evt->proto->registered_mtx);
+ hash_add(r_evt->proto->registered_events_handlers,
+ &hndl->hash, hndl->key);
+ mutex_unlock(&r_evt->proto->registered_mtx);
+
+ return 0;
+}
+
+/**
+ * scmi_valid_pending_handler() - Helper to check pending status of handlers
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to check
+ *
+ * An handler is considered pending when its r_evt == NULL, because the related
+ * event was still unknown at handler's registration time; anyway, since all
+ * protocols register their supported events once for all at protocols'
+ * initialization time, a pending handler cannot be considered valid anymore if
+ * the underlying event (which it is waiting for), belongs to an already
+ * initialized and registered protocol.
+ *
+ * Return: 0 on Success
+ */
+static inline int scmi_valid_pending_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ struct scmi_registered_events_desc *pd;
+
+ if (!IS_HNDL_PENDING(hndl))
+ return -EINVAL;
+
+ pd = SCMI_GET_PROTO(ni, KEY_XTRACT_PROTO_ID(hndl->key));
+ if (pd)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * scmi_register_event_handler() - Register whenever possible an Event handler
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to register
+ *
+ * At first try to bind an event handler to its associated event, then check if
+ * it was at least a valid pending handler: if it was not bound nor valid return
+ * false.
+ *
+ * Valid pending incomplete bindings will be periodically retried by a dedicated
+ * worker which is kicked each time a new protocol completes its own
+ * registration phase.
+ *
+ * Context: Assumes to be called with @pending_mtx acquired.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_register_event_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ int ret;
+
+ ret = scmi_bind_event_handler(ni, hndl);
+ if (!ret) {
+ dev_dbg(ni->handle->dev, "registered NEW handler - key:%X\n",
+ hndl->key);
+ } else {
+ ret = scmi_valid_pending_handler(ni, hndl);
+ if (!ret)
+ dev_dbg(ni->handle->dev,
+ "registered PENDING handler - key:%X\n",
+ hndl->key);
+ }
+
+ return ret;
+}
+
+/**
+ * __scmi_event_handler_get_ops() - Utility to get or create an event handler
+ * @ni: A reference to the notification instance to use
+ * @evt_key: The event key to use
+ * @create: A boolean flag to specify if a handler must be created when
+ * not already existent
+ *
+ * Search for the desired handler matching the key in both the per-protocol
+ * registered table and the common pending table:
+ * * if found adjust users refcount
+ * * if not found and @create is true, create and register the new handler:
+ * handler could end up being registered as pending if no matching event
+ * could be found.
+ *
+ * An handler is guaranteed to reside in one and only one of the tables at
+ * any one time; to ensure this the whole search and create is performed
+ * holding the @pending_mtx lock, with @registered_mtx additionally acquired
+ * if needed.
+ *
+ * Note that when a nested acquisition of these mutexes is needed the locking
+ * order is always (same as in @init_work):
+ * 1. pending_mtx
+ * 2. registered_mtx
+ *
+ * Events generation is NOT enabled right after creation within this routine
+ * since at creation time we usually want to have all setup and ready before
+ * events really start flowing.
+ *
+ * Return: A properly refcounted handler on Success, NULL on Failure
+ */
+static inline struct scmi_event_handler *
+__scmi_event_handler_get_ops(struct scmi_notify_instance *ni,
+ u32 evt_key, bool create)
+{
+ struct scmi_registered_event *r_evt;
+ struct scmi_event_handler *hndl = NULL;
+
+ r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
+ KEY_XTRACT_EVT_ID(evt_key));
+
+ mutex_lock(&ni->pending_mtx);
+ /* Search registered events at first ... if possible at all */
+ if (r_evt) {
+ mutex_lock(&r_evt->proto->registered_mtx);
+ hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
+ hndl, evt_key);
+ if (hndl)
+ refcount_inc(&hndl->users);
+ mutex_unlock(&r_evt->proto->registered_mtx);
+ }
+
+ /* ...then amongst pending. */
+ if (!hndl) {
+ hndl = KEY_FIND(ni->pending_events_handlers, hndl, evt_key);
+ if (hndl)
+ refcount_inc(&hndl->users);
+ }
+
+ /* Create if still not found and required */
+ if (!hndl && create) {
+ hndl = scmi_allocate_event_handler(ni, evt_key);
+ if (hndl && scmi_register_event_handler(ni, hndl)) {
+ dev_dbg(ni->handle->dev,
+ "purging UNKNOWN handler - key:%X\n",
+ hndl->key);
+ /* this hndl can be only a pending one */
+ scmi_put_handler_unlocked(ni, hndl);
+ hndl = NULL;
+ }
+ }
+ mutex_unlock(&ni->pending_mtx);
+
+ return hndl;
+}
+
+static struct scmi_event_handler *
+scmi_get_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+ return __scmi_event_handler_get_ops(ni, evt_key, false);
+}
+
+static struct scmi_event_handler *
+scmi_get_or_create_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+ return __scmi_event_handler_get_ops(ni, evt_key, true);
+}
+
+/**
+ * scmi_get_active_handler() - Helper to get active handlers only
+ * @ni: A reference to the notification instance to use
+ * @evt_key: The event key to use
+ *
+ * Search for the desired handler matching the key only in the per-protocol
+ * table of registered handlers: this is called only from the dispatching path
+ * so want to be as quick as possible and do not care about pending.
+ *
+ * Return: A properly refcounted active handler
+ */
+static struct scmi_event_handler *
+scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key)
+{
+ struct scmi_registered_event *r_evt;
+ struct scmi_event_handler *hndl = NULL;
+
+ r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
+ KEY_XTRACT_EVT_ID(evt_key));
+ if (r_evt) {
+ mutex_lock(&r_evt->proto->registered_mtx);
+ hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
+ hndl, evt_key);
+ if (hndl)
+ refcount_inc(&hndl->users);
+ mutex_unlock(&r_evt->proto->registered_mtx);
+ }
+
+ return hndl;
+}
+
+/**
+ * __scmi_enable_evt() - Enable/disable events generation
+ * @r_evt: The registered event to act upon
+ * @src_id: The src_id to act upon
+ * @enable: The action to perform: true->Enable, false->Disable
+ *
+ * Takes care of proper refcounting while performing enable/disable: handles
+ * the special case of ALL sources requests by itself.
+ * Returns successfully if at least one of the required src_id has been
+ * successfully enabled/disabled.
+ *
+ * Return: 0 on Success
+ */
+static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt,
+ u32 src_id, bool enable)
+{
+ int retvals = 0;
+ u32 num_sources;
+ refcount_t *sid;
+
+ if (src_id == SRC_ID_MASK) {
+ src_id = 0;
+ num_sources = r_evt->num_sources;
+ } else if (src_id < r_evt->num_sources) {
+ num_sources = 1;
+ } else {
+ return -EINVAL;
+ }
+
+ mutex_lock(&r_evt->sources_mtx);
+ if (enable) {
+ for (; num_sources; src_id++, num_sources--) {
+ int ret = 0;
+
+ sid = &r_evt->sources[src_id];
+ if (refcount_read(sid) == 0) {
+ ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id,
+ src_id);
+ if (!ret)
+ refcount_set(sid, 1);
+ } else {
+ refcount_inc(sid);
+ }
+ retvals += !ret;
+ }
+ } else {
+ for (; num_sources; src_id++, num_sources--) {
+ sid = &r_evt->sources[src_id];
+ if (refcount_dec_and_test(sid))
+ REVT_NOTIFY_DISABLE(r_evt,
+ r_evt->evt->id, src_id);
+ }
+ retvals = 1;
+ }
+ mutex_unlock(&r_evt->sources_mtx);
+
+ return retvals ? 0 : -EINVAL;
+}
+
+static int scmi_enable_events(struct scmi_event_handler *hndl)
+{
+ int ret = 0;
+
+ if (!hndl->enabled) {
+ ret = __scmi_enable_evt(hndl->r_evt,
+ KEY_XTRACT_SRC_ID(hndl->key), true);
+ if (!ret)
+ hndl->enabled = true;
+ }
+
+ return ret;
+}
+
+static int scmi_disable_events(struct scmi_event_handler *hndl)
+{
+ int ret = 0;
+
+ if (hndl->enabled) {
+ ret = __scmi_enable_evt(hndl->r_evt,
+ KEY_XTRACT_SRC_ID(hndl->key), false);
+ if (!ret)
+ hndl->enabled = false;
+ }
+
+ return ret;
+}
+
+/**
+ * scmi_put_handler_unlocked() - Put an event handler
+ * @ni: A reference to the notification instance to use
+ * @hndl: The event handler to act upon
+ *
+ * After having got exclusive access to the registered handlers hashtable,
+ * update the refcount and if @hndl is no more in use by anyone:
+ * * ask for events' generation disabling
+ * * unregister and free the handler itself
+ *
+ * Context: Assumes all the proper locking has been managed by the caller.
+ */
+static void scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ if (refcount_dec_and_test(&hndl->users)) {
+ if (!IS_HNDL_PENDING(hndl))
+ scmi_disable_events(hndl);
+ scmi_free_event_handler(hndl);
+ }
+}
+
+static void scmi_put_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ struct scmi_registered_event *r_evt = hndl->r_evt;
+
+ mutex_lock(&ni->pending_mtx);
+ if (r_evt)
+ mutex_lock(&r_evt->proto->registered_mtx);
+
+ scmi_put_handler_unlocked(ni, hndl);
+
+ if (r_evt)
+ mutex_unlock(&r_evt->proto->registered_mtx);
+ mutex_unlock(&ni->pending_mtx);
+}
+
+static void scmi_put_active_handler(struct scmi_notify_instance *ni,
+ struct scmi_event_handler *hndl)
+{
+ struct scmi_registered_event *r_evt = hndl->r_evt;
+
+ mutex_lock(&r_evt->proto->registered_mtx);
+ scmi_put_handler_unlocked(ni, hndl);
+ mutex_unlock(&r_evt->proto->registered_mtx);
+}
+
+/**
+ * scmi_event_handler_enable_events() - Enable events associated to an handler
+ * @hndl: The Event handler to act upon
+ *
+ * Return: 0 on Success
+ */
+static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl)
+{
+ if (scmi_enable_events(hndl)) {
+ pr_err("Failed to ENABLE events for key:%X !\n", hndl->key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * scmi_register_notifier() - Register a notifier_block for an event
+ * @handle: The handle identifying the platform instance against which the
+ * callback is registered
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID
+ * @src_id: Source ID, when NULL register for events coming form ALL possible
+ * sources
+ * @nb: A standard notifier block to register for the specified event
+ *
+ * Generic helper to register a notifier_block against a protocol event.
+ *
+ * A notifier_block @nb will be registered for each distinct event identified
+ * by the tuple (proto_id, evt_id, src_id) on a dedicated notification chain
+ * so that:
+ *
+ * (proto_X, evt_Y, src_Z) --> chain_X_Y_Z
+ *
+ * @src_id meaning is protocol specific and identifies the origin of the event
+ * (like domain_id, sensor_id and so forth).
+ *
+ * @src_id can be NULL to signify that the caller is interested in receiving
+ * notifications from ALL the available sources for that protocol OR simply that
+ * the protocol does not support distinct sources.
+ *
+ * As soon as one user for the specified tuple appears, an handler is created,
+ * and that specific event's generation is enabled at the platform level, unless
+ * an associated registered event is found missing, meaning that the needed
+ * protocol is still to be initialized and the handler has just been registered
+ * as still pending.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_register_notifier(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id, u32 *src_id,
+ struct notifier_block *nb)
+{
+ int ret = 0;
+ u32 evt_key;
+ struct scmi_event_handler *hndl;
+ struct scmi_notify_instance *ni;
+
+ /* Ensure notify_priv is updated */
+ smp_rmb();
+ if (!handle->notify_priv)
+ return -ENODEV;
+ ni = handle->notify_priv;
+
+ evt_key = MAKE_HASH_KEY(proto_id, evt_id,
+ src_id ? *src_id : SRC_ID_MASK);
+ hndl = scmi_get_or_create_handler(ni, evt_key);
+ if (!hndl)
+ return -EINVAL;
+
+ blocking_notifier_chain_register(&hndl->chain, nb);
+
+ /* Enable events for not pending handlers */
+ if (!IS_HNDL_PENDING(hndl)) {
+ ret = scmi_event_handler_enable_events(hndl);
+ if (ret)
+ scmi_put_handler(ni, hndl);
+ }
+
+ return ret;
+}
+
+/**
+ * scmi_unregister_notifier() - Unregister a notifier_block for an event
+ * @handle: The handle identifying the platform instance against which the
+ * callback is unregistered
+ * @proto_id: Protocol ID
+ * @evt_id: Event ID
+ * @src_id: Source ID
+ * @nb: The notifier_block to unregister
+ *
+ * Takes care to unregister the provided @nb from the notification chain
+ * associated to the specified event and, if there are no more users for the
+ * event handler, frees also the associated event handler structures.
+ * (this could possibly cause disabling of event's generation at platform level)
+ *
+ * Return: 0 on Success
+ */
+static int scmi_unregister_notifier(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id, u32 *src_id,
+ struct notifier_block *nb)
+{
+ u32 evt_key;
+ struct scmi_event_handler *hndl;
+ struct scmi_notify_instance *ni;
+
+ /* Ensure notify_priv is updated */
+ smp_rmb();
+ if (!handle->notify_priv)
+ return -ENODEV;
+ ni = handle->notify_priv;
+
+ evt_key = MAKE_HASH_KEY(proto_id, evt_id,
+ src_id ? *src_id : SRC_ID_MASK);
+ hndl = scmi_get_handler(ni, evt_key);
+ if (!hndl)
+ return -EINVAL;
+
+ /*
+ * Note that this chain unregistration call is safe on its own
+ * being internally protected by an rwsem.
+ */
+ blocking_notifier_chain_unregister(&hndl->chain, nb);
+ scmi_put_handler(ni, hndl);
+
+ /*
+ * This balances the initial get issued in @scmi_register_notifier.
+ * If this notifier_block happened to be the last known user callback
+ * for this event, the handler is here freed and the event's generation
+ * stopped.
+ *
+ * Note that, an ongoing concurrent lookup on the delivery workqueue
+ * path could still hold the refcount to 1 even after this routine
+ * completes: in such a case it will be the final put on the delivery
+ * path which will finally free this unused handler.
+ */
+ scmi_put_handler(ni, hndl);
+
+ return 0;
+}
+
+/**
+ * scmi_protocols_late_init() - Worker for late initialization
+ * @work: The work item to use associated to the proper SCMI instance
+ *
+ * This kicks in whenever a new protocol has completed its own registration via
+ * scmi_register_protocol_events(): it is in charge of scanning the table of
+ * pending handlers (registered by users while the related protocol was still
+ * not initialized) and finalizing their initialization whenever possible;
+ * invalid pending handlers are purged at this point in time.
+ */
+static void scmi_protocols_late_init(struct work_struct *work)
+{
+ int bkt;
+ struct scmi_event_handler *hndl;
+ struct scmi_notify_instance *ni;
+ struct hlist_node *tmp;
+
+ ni = container_of(work, struct scmi_notify_instance, init_work);
+
+ /* Ensure protocols and events are up to date */
+ smp_rmb();
+
+ mutex_lock(&ni->pending_mtx);
+ hash_for_each_safe(ni->pending_events_handlers, bkt, tmp, hndl, hash) {
+ int ret;
+
+ ret = scmi_bind_event_handler(ni, hndl);
+ if (!ret) {
+ dev_dbg(ni->handle->dev,
+ "finalized PENDING handler - key:%X\n",
+ hndl->key);
+ ret = scmi_event_handler_enable_events(hndl);
+ if (ret) {
+ dev_dbg(ni->handle->dev,
+ "purging INVALID handler - key:%X\n",
+ hndl->key);
+ scmi_put_active_handler(ni, hndl);
+ }
+ } else {
+ ret = scmi_valid_pending_handler(ni, hndl);
+ if (ret) {
+ dev_dbg(ni->handle->dev,
+ "purging PENDING handler - key:%X\n",
+ hndl->key);
+ /* this hndl can be only a pending one */
+ scmi_put_handler_unlocked(ni, hndl);
+ }
+ }
+ }
+ mutex_unlock(&ni->pending_mtx);
+}
+
+/*
+ * notify_ops are attached to the handle so that can be accessed
+ * directly from an scmi_driver to register its own notifiers.
+ */
+static const struct scmi_notify_ops notify_ops = {
+ .register_event_notifier = scmi_register_notifier,
+ .unregister_event_notifier = scmi_unregister_notifier,
+};
+
+/**
+ * scmi_notification_init() - Initializes Notification Core Support
+ * @handle: The handle identifying the platform instance to initialize
+ *
+ * This function lays out all the basic resources needed by the notification
+ * core instance identified by the provided handle: once done, all of the
+ * SCMI Protocols can register their events with the core during their own
+ * initializations.
+ *
+ * Note that failing to initialize the core notifications support does not
+ * cause the whole SCMI Protocols stack to fail its initialization.
+ *
+ * SCMI Notification Initialization happens in 2 steps:
+ * * initialization: basic common allocations (this function)
+ * * registration: protocols asynchronously come into life and registers their
+ * own supported list of events with the core; this causes
+ * further per-protocol allocations
+ *
+ * Any user's callback registration attempt, referring a still not registered
+ * event, will be registered as pending and finalized later (if possible)
+ * by scmi_protocols_late_init() work.
+ * This allows for lazy initialization of SCMI Protocols due to late (or
+ * missing) SCMI drivers' modules loading.
+ *
+ * Return: 0 on Success
+ */
+int scmi_notification_init(struct scmi_handle *handle)
+{
+ void *gid;
+ struct scmi_notify_instance *ni;
+
+ gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
+ if (!gid)
+ return -ENOMEM;
+
+ ni = devm_kzalloc(handle->dev, sizeof(*ni), GFP_KERNEL);
+ if (!ni)
+ goto err;
+
+ ni->gid = gid;
+ ni->handle = handle;
+
+ ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,
+ sizeof(char *), GFP_KERNEL);
+ if (!ni->registered_protocols)
+ goto err;
+
+ ni->notify_wq = alloc_workqueue(dev_name(handle->dev),
+ WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
+ 0);
+ if (!ni->notify_wq)
+ goto err;
+
+ mutex_init(&ni->pending_mtx);
+ hash_init(ni->pending_events_handlers);
+
+ INIT_WORK(&ni->init_work, scmi_protocols_late_init);
+
+ handle->notify_ops = &notify_ops;
+ handle->notify_priv = ni;
+ /* Ensure handle is up to date */
+ smp_wmb();
+
+ dev_info(handle->dev, "Core Enabled.\n");
+
+ devres_close_group(handle->dev, ni->gid);
+
+ return 0;
+
+err:
+ dev_warn(handle->dev, "Initialization Failed.\n");
+ devres_release_group(handle->dev, NULL);
+ return -ENOMEM;
+}
+
+/**
+ * scmi_notification_exit() - Shutdown and clean Notification core
+ * @handle: The handle identifying the platform instance to shutdown
+ */
+void scmi_notification_exit(struct scmi_handle *handle)
+{
+ struct scmi_notify_instance *ni;
+
+ /* Ensure notify_priv is updated */
+ smp_rmb();
+ if (!handle->notify_priv)
+ return;
+ ni = handle->notify_priv;
+
+ handle->notify_priv = NULL;
+ /* Ensure handle is up to date */
+ smp_wmb();
+
+ /* Destroy while letting pending work complete */
+ destroy_workqueue(ni->notify_wq);
+
+ devres_release_group(ni->handle->dev, ni->gid);
+}
diff --git a/drivers/firmware/arm_scmi/notify.h b/drivers/firmware/arm_scmi/notify.h
new file mode 100644
index 000000000..3485f20fa
--- /dev/null
+++ b/drivers/firmware/arm_scmi/notify.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * notification header file containing some definitions, structures
+ * and function prototypes related to SCMI Notification handling.
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef _SCMI_NOTIFY_H
+#define _SCMI_NOTIFY_H
+
+#include <linux/device.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#define SCMI_PROTO_QUEUE_SZ 4096
+
+/**
+ * struct scmi_event - Describes an event to be supported
+ * @id: Event ID
+ * @max_payld_sz: Max possible size for the payload of a notification message
+ * @max_report_sz: Max possible size for the report of a notification message
+ *
+ * Each SCMI protocol, during its initialization phase, can describe the events
+ * it wishes to support in a few struct scmi_event and pass them to the core
+ * using scmi_register_protocol_events().
+ */
+struct scmi_event {
+ u8 id;
+ size_t max_payld_sz;
+ size_t max_report_sz;
+};
+
+/**
+ * struct scmi_event_ops - Protocol helpers called by the notification core.
+ * @set_notify_enabled: Enable/disable the required evt_id/src_id notifications
+ * using the proper custom protocol commands.
+ * Return 0 on Success
+ * @fill_custom_report: fills a custom event report from the provided
+ * event message payld identifying the event
+ * specific src_id.
+ * Return NULL on failure otherwise @report now fully
+ * populated
+ *
+ * Context: Helpers described in &struct scmi_event_ops are called only in
+ * process context.
+ */
+struct scmi_event_ops {
+ int (*set_notify_enabled)(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enabled);
+ void *(*fill_custom_report)(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id);
+};
+
+int scmi_notification_init(struct scmi_handle *handle);
+void scmi_notification_exit(struct scmi_handle *handle);
+
+int scmi_register_protocol_events(const struct scmi_handle *handle,
+ u8 proto_id, size_t queue_sz,
+ const struct scmi_event_ops *ops,
+ const struct scmi_event *evt, int num_events,
+ int num_sources);
+int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
+ const void *buf, size_t len, ktime_t ts);
+
+#endif /* _SCMI_NOTIFY_H */
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
new file mode 100644
index 000000000..82fb3babf
--- /dev/null
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -0,0 +1,895 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Performance Protocol
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
+
+#include <linux/bits.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/scmi_protocol.h>
+#include <linux/sort.h>
+
+#include "common.h"
+#include "notify.h"
+
+enum scmi_performance_protocol_cmd {
+ PERF_DOMAIN_ATTRIBUTES = 0x3,
+ PERF_DESCRIBE_LEVELS = 0x4,
+ PERF_LIMITS_SET = 0x5,
+ PERF_LIMITS_GET = 0x6,
+ PERF_LEVEL_SET = 0x7,
+ PERF_LEVEL_GET = 0x8,
+ PERF_NOTIFY_LIMITS = 0x9,
+ PERF_NOTIFY_LEVEL = 0xa,
+ PERF_DESCRIBE_FASTCHANNEL = 0xb,
+};
+
+struct scmi_opp {
+ u32 perf;
+ u32 power;
+ u32 trans_latency_us;
+};
+
+struct scmi_msg_resp_perf_attributes {
+ __le16 num_domains;
+ __le16 flags;
+#define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0))
+ __le32 stats_addr_low;
+ __le32 stats_addr_high;
+ __le32 stats_size;
+};
+
+struct scmi_msg_resp_perf_domain_attributes {
+ __le32 flags;
+#define SUPPORTS_SET_LIMITS(x) ((x) & BIT(31))
+#define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
+#define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
+#define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
+#define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27))
+ __le32 rate_limit_us;
+ __le32 sustained_freq_khz;
+ __le32 sustained_perf_level;
+ u8 name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_msg_perf_describe_levels {
+ __le32 domain;
+ __le32 level_index;
+};
+
+struct scmi_perf_set_limits {
+ __le32 domain;
+ __le32 max_level;
+ __le32 min_level;
+};
+
+struct scmi_perf_get_limits {
+ __le32 max_level;
+ __le32 min_level;
+};
+
+struct scmi_perf_set_level {
+ __le32 domain;
+ __le32 level;
+};
+
+struct scmi_perf_notify_level_or_limits {
+ __le32 domain;
+ __le32 notify_enable;
+};
+
+struct scmi_perf_limits_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 range_max;
+ __le32 range_min;
+};
+
+struct scmi_perf_level_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 performance_level;
+};
+
+struct scmi_msg_resp_perf_describe_levels {
+ __le16 num_returned;
+ __le16 num_remaining;
+ struct {
+ __le32 perf_val;
+ __le32 power;
+ __le16 transition_latency_us;
+ __le16 reserved;
+ } opp[];
+};
+
+struct scmi_perf_get_fc_info {
+ __le32 domain;
+ __le32 message_id;
+};
+
+struct scmi_msg_resp_perf_desc_fc {
+ __le32 attr;
+#define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
+#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
+ __le32 rate_limit;
+ __le32 chan_addr_low;
+ __le32 chan_addr_high;
+ __le32 chan_size;
+ __le32 db_addr_low;
+ __le32 db_addr_high;
+ __le32 db_set_lmask;
+ __le32 db_set_hmask;
+ __le32 db_preserve_lmask;
+ __le32 db_preserve_hmask;
+};
+
+struct scmi_fc_db_info {
+ int width;
+ u64 set;
+ u64 mask;
+ void __iomem *addr;
+};
+
+struct scmi_fc_info {
+ void __iomem *level_set_addr;
+ void __iomem *limit_set_addr;
+ void __iomem *level_get_addr;
+ void __iomem *limit_get_addr;
+ struct scmi_fc_db_info *level_set_db;
+ struct scmi_fc_db_info *limit_set_db;
+};
+
+struct perf_dom_info {
+ bool set_limits;
+ bool set_perf;
+ bool perf_limit_notify;
+ bool perf_level_notify;
+ bool perf_fastchannels;
+ u32 opp_count;
+ u32 sustained_freq_khz;
+ u32 sustained_perf_level;
+ u32 mult_factor;
+ char name[SCMI_MAX_STR_SIZE];
+ struct scmi_opp opp[MAX_OPPS];
+ struct scmi_fc_info *fc_info;
+};
+
+struct scmi_perf_info {
+ u32 version;
+ int num_domains;
+ bool power_scale_mw;
+ u64 stats_addr;
+ u32 stats_size;
+ struct perf_dom_info *dom_info;
+};
+
+static enum scmi_performance_protocol_cmd evt_2_cmd[] = {
+ PERF_NOTIFY_LIMITS,
+ PERF_NOTIFY_LEVEL,
+};
+
+static int scmi_perf_attributes_get(const struct scmi_handle *handle,
+ struct scmi_perf_info *pi)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_perf_attributes *attr;
+
+ ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+ SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ u16 flags = le16_to_cpu(attr->flags);
+
+ pi->num_domains = le16_to_cpu(attr->num_domains);
+ pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
+ pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
+ (u64)le32_to_cpu(attr->stats_addr_high) << 32;
+ pi->stats_size = le32_to_cpu(attr->stats_size);
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int
+scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
+ struct perf_dom_info *dom_info)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_perf_domain_attributes *attr;
+
+ ret = scmi_xfer_get_init(handle, PERF_DOMAIN_ATTRIBUTES,
+ SCMI_PROTOCOL_PERF, sizeof(domain),
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain, t->tx.buf);
+ attr = t->rx.buf;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ u32 flags = le32_to_cpu(attr->flags);
+
+ dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
+ dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
+ dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
+ dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
+ dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
+ dom_info->sustained_freq_khz =
+ le32_to_cpu(attr->sustained_freq_khz);
+ dom_info->sustained_perf_level =
+ le32_to_cpu(attr->sustained_perf_level);
+ if (!dom_info->sustained_freq_khz ||
+ !dom_info->sustained_perf_level)
+ /* CPUFreq converts to kHz, hence default 1000 */
+ dom_info->mult_factor = 1000;
+ else
+ dom_info->mult_factor =
+ (dom_info->sustained_freq_khz * 1000) /
+ dom_info->sustained_perf_level;
+ strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int opp_cmp_func(const void *opp1, const void *opp2)
+{
+ const struct scmi_opp *t1 = opp1, *t2 = opp2;
+
+ return t1->perf - t2->perf;
+}
+
+static int
+scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
+ struct perf_dom_info *perf_dom)
+{
+ int ret, cnt;
+ u32 tot_opp_cnt = 0;
+ u16 num_returned, num_remaining;
+ struct scmi_xfer *t;
+ struct scmi_opp *opp;
+ struct scmi_msg_perf_describe_levels *dom_info;
+ struct scmi_msg_resp_perf_describe_levels *level_info;
+
+ ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_LEVELS,
+ SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t);
+ if (ret)
+ return ret;
+
+ dom_info = t->tx.buf;
+ level_info = t->rx.buf;
+
+ do {
+ dom_info->domain = cpu_to_le32(domain);
+ /* Set the number of OPPs to be skipped/already read */
+ dom_info->level_index = cpu_to_le32(tot_opp_cnt);
+
+ ret = scmi_do_xfer(handle, t);
+ if (ret)
+ break;
+
+ num_returned = le16_to_cpu(level_info->num_returned);
+ num_remaining = le16_to_cpu(level_info->num_remaining);
+ if (tot_opp_cnt + num_returned > MAX_OPPS) {
+ dev_err(handle->dev, "No. of OPPs exceeded MAX_OPPS");
+ break;
+ }
+
+ opp = &perf_dom->opp[tot_opp_cnt];
+ for (cnt = 0; cnt < num_returned; cnt++, opp++) {
+ opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
+ opp->power = le32_to_cpu(level_info->opp[cnt].power);
+ opp->trans_latency_us = le16_to_cpu
+ (level_info->opp[cnt].transition_latency_us);
+
+ dev_dbg(handle->dev, "Level %d Power %d Latency %dus\n",
+ opp->perf, opp->power, opp->trans_latency_us);
+ }
+
+ tot_opp_cnt += num_returned;
+
+ scmi_reset_rx_to_maxsz(handle, t);
+ /*
+ * check for both returned and remaining to avoid infinite
+ * loop due to buggy firmware
+ */
+ } while (num_returned && num_remaining);
+
+ perf_dom->opp_count = tot_opp_cnt;
+ scmi_xfer_put(handle, t);
+
+ sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
+ return ret;
+}
+
+#define SCMI_PERF_FC_RING_DB(w) \
+do { \
+ u##w val = 0; \
+ \
+ if (db->mask) \
+ val = ioread##w(db->addr) & db->mask; \
+ iowrite##w((u##w)db->set | val, db->addr); \
+} while (0)
+
+static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
+{
+ if (!db || !db->addr)
+ return;
+
+ if (db->width == 1)
+ SCMI_PERF_FC_RING_DB(8);
+ else if (db->width == 2)
+ SCMI_PERF_FC_RING_DB(16);
+ else if (db->width == 4)
+ SCMI_PERF_FC_RING_DB(32);
+ else /* db->width == 8 */
+#ifdef CONFIG_64BIT
+ SCMI_PERF_FC_RING_DB(64);
+#else
+ {
+ u64 val = 0;
+
+ if (db->mask)
+ val = ioread64_hi_lo(db->addr) & db->mask;
+ iowrite64_hi_lo(db->set | val, db->addr);
+ }
+#endif
+}
+
+static int scmi_perf_mb_limits_set(const struct scmi_handle *handle, u32 domain,
+ u32 max_perf, u32 min_perf)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_perf_set_limits *limits;
+
+ ret = scmi_xfer_get_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF,
+ sizeof(*limits), 0, &t);
+ if (ret)
+ return ret;
+
+ limits = t->tx.buf;
+ limits->domain = cpu_to_le32(domain);
+ limits->max_level = cpu_to_le32(max_perf);
+ limits->min_level = cpu_to_le32(min_perf);
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
+ u32 max_perf, u32 min_perf)
+{
+ struct scmi_perf_info *pi = handle->perf_priv;
+ struct perf_dom_info *dom = pi->dom_info + domain;
+
+ if (dom->fc_info && dom->fc_info->limit_set_addr) {
+ iowrite32(max_perf, dom->fc_info->limit_set_addr);
+ iowrite32(min_perf, dom->fc_info->limit_set_addr + 4);
+ scmi_perf_fc_ring_db(dom->fc_info->limit_set_db);
+ return 0;
+ }
+
+ return scmi_perf_mb_limits_set(handle, domain, max_perf, min_perf);
+}
+
+static int scmi_perf_mb_limits_get(const struct scmi_handle *handle, u32 domain,
+ u32 *max_perf, u32 *min_perf)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_perf_get_limits *limits;
+
+ ret = scmi_xfer_get_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF,
+ sizeof(__le32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain, t->tx.buf);
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ limits = t->rx.buf;
+
+ *max_perf = le32_to_cpu(limits->max_level);
+ *min_perf = le32_to_cpu(limits->min_level);
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
+ u32 *max_perf, u32 *min_perf)
+{
+ struct scmi_perf_info *pi = handle->perf_priv;
+ struct perf_dom_info *dom = pi->dom_info + domain;
+
+ if (dom->fc_info && dom->fc_info->limit_get_addr) {
+ *max_perf = ioread32(dom->fc_info->limit_get_addr);
+ *min_perf = ioread32(dom->fc_info->limit_get_addr + 4);
+ return 0;
+ }
+
+ return scmi_perf_mb_limits_get(handle, domain, max_perf, min_perf);
+}
+
+static int scmi_perf_mb_level_set(const struct scmi_handle *handle, u32 domain,
+ u32 level, bool poll)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_perf_set_level *lvl;
+
+ ret = scmi_xfer_get_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF,
+ sizeof(*lvl), 0, &t);
+ if (ret)
+ return ret;
+
+ t->hdr.poll_completion = poll;
+ lvl = t->tx.buf;
+ lvl->domain = cpu_to_le32(domain);
+ lvl->level = cpu_to_le32(level);
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
+ u32 level, bool poll)
+{
+ struct scmi_perf_info *pi = handle->perf_priv;
+ struct perf_dom_info *dom = pi->dom_info + domain;
+
+ if (dom->fc_info && dom->fc_info->level_set_addr) {
+ iowrite32(level, dom->fc_info->level_set_addr);
+ scmi_perf_fc_ring_db(dom->fc_info->level_set_db);
+ return 0;
+ }
+
+ return scmi_perf_mb_level_set(handle, domain, level, poll);
+}
+
+static int scmi_perf_mb_level_get(const struct scmi_handle *handle, u32 domain,
+ u32 *level, bool poll)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = scmi_xfer_get_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF,
+ sizeof(u32), sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ t->hdr.poll_completion = poll;
+ put_unaligned_le32(domain, t->tx.buf);
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret)
+ *level = get_unaligned_le32(t->rx.buf);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
+ u32 *level, bool poll)
+{
+ struct scmi_perf_info *pi = handle->perf_priv;
+ struct perf_dom_info *dom = pi->dom_info + domain;
+
+ if (dom->fc_info && dom->fc_info->level_get_addr) {
+ *level = ioread32(dom->fc_info->level_get_addr);
+ return 0;
+ }
+
+ return scmi_perf_mb_level_get(handle, domain, level, poll);
+}
+
+static int scmi_perf_level_limits_notify(const struct scmi_handle *handle,
+ u32 domain, int message_id,
+ bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_perf_notify_level_or_limits *notify;
+
+ ret = scmi_xfer_get_init(handle, message_id, SCMI_PROTOCOL_PERF,
+ sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
+{
+ if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4)
+ return true;
+ if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8)
+ return true;
+ return false;
+}
+
+static void
+scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
+ u32 message_id, void __iomem **p_addr,
+ struct scmi_fc_db_info **p_db)
+{
+ int ret;
+ u32 flags;
+ u64 phys_addr;
+ u8 size;
+ void __iomem *addr;
+ struct scmi_xfer *t;
+ struct scmi_fc_db_info *db;
+ struct scmi_perf_get_fc_info *info;
+ struct scmi_msg_resp_perf_desc_fc *resp;
+
+ if (!p_addr)
+ return;
+
+ ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_FASTCHANNEL,
+ SCMI_PROTOCOL_PERF,
+ sizeof(*info), sizeof(*resp), &t);
+ if (ret)
+ return;
+
+ info = t->tx.buf;
+ info->domain = cpu_to_le32(domain);
+ info->message_id = cpu_to_le32(message_id);
+
+ ret = scmi_do_xfer(handle, t);
+ if (ret)
+ goto err_xfer;
+
+ resp = t->rx.buf;
+ flags = le32_to_cpu(resp->attr);
+ size = le32_to_cpu(resp->chan_size);
+ if (!scmi_perf_fc_size_is_valid(message_id, size))
+ goto err_xfer;
+
+ phys_addr = le32_to_cpu(resp->chan_addr_low);
+ phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
+ addr = devm_ioremap(handle->dev, phys_addr, size);
+ if (!addr)
+ goto err_xfer;
+ *p_addr = addr;
+
+ if (p_db && SUPPORTS_DOORBELL(flags)) {
+ db = devm_kzalloc(handle->dev, sizeof(*db), GFP_KERNEL);
+ if (!db)
+ goto err_xfer;
+
+ size = 1 << DOORBELL_REG_WIDTH(flags);
+ phys_addr = le32_to_cpu(resp->db_addr_low);
+ phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
+ addr = devm_ioremap(handle->dev, phys_addr, size);
+ if (!addr)
+ goto err_xfer;
+
+ db->addr = addr;
+ db->width = size;
+ db->set = le32_to_cpu(resp->db_set_lmask);
+ db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
+ db->mask = le32_to_cpu(resp->db_preserve_lmask);
+ db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
+ *p_db = db;
+ }
+err_xfer:
+ scmi_xfer_put(handle, t);
+}
+
+static void scmi_perf_domain_init_fc(const struct scmi_handle *handle,
+ u32 domain, struct scmi_fc_info **p_fc)
+{
+ struct scmi_fc_info *fc;
+
+ fc = devm_kzalloc(handle->dev, sizeof(*fc), GFP_KERNEL);
+ if (!fc)
+ return;
+
+ scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_SET,
+ &fc->level_set_addr, &fc->level_set_db);
+ scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_GET,
+ &fc->level_get_addr, NULL);
+ scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_SET,
+ &fc->limit_set_addr, &fc->limit_set_db);
+ scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_GET,
+ &fc->limit_get_addr, NULL);
+ *p_fc = fc;
+}
+
+/* Device specific ops */
+static int scmi_dev_domain_id(struct device *dev)
+{
+ struct of_phandle_args clkspec;
+
+ if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
+ 0, &clkspec))
+ return -EINVAL;
+
+ return clkspec.args[0];
+}
+
+static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
+ struct device *dev)
+{
+ int idx, ret, domain;
+ unsigned long freq;
+ struct scmi_opp *opp;
+ struct perf_dom_info *dom;
+ struct scmi_perf_info *pi = handle->perf_priv;
+
+ domain = scmi_dev_domain_id(dev);
+ if (domain < 0)
+ return domain;
+
+ dom = pi->dom_info + domain;
+
+ for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
+ freq = opp->perf * dom->mult_factor;
+
+ ret = dev_pm_opp_add(dev, freq, 0);
+ if (ret) {
+ dev_warn(dev, "failed to add opp %luHz\n", freq);
+
+ while (idx-- > 0) {
+ freq = (--opp)->perf * dom->mult_factor;
+ dev_pm_opp_remove(dev, freq);
+ }
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle,
+ struct device *dev)
+{
+ struct perf_dom_info *dom;
+ struct scmi_perf_info *pi = handle->perf_priv;
+ int domain = scmi_dev_domain_id(dev);
+
+ if (domain < 0)
+ return domain;
+
+ dom = pi->dom_info + domain;
+ /* uS to nS */
+ return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
+}
+
+static int scmi_dvfs_freq_set(const struct scmi_handle *handle, u32 domain,
+ unsigned long freq, bool poll)
+{
+ struct scmi_perf_info *pi = handle->perf_priv;
+ struct perf_dom_info *dom = pi->dom_info + domain;
+
+ return scmi_perf_level_set(handle, domain, freq / dom->mult_factor,
+ poll);
+}
+
+static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain,
+ unsigned long *freq, bool poll)
+{
+ int ret;
+ u32 level;
+ struct scmi_perf_info *pi = handle->perf_priv;
+ struct perf_dom_info *dom = pi->dom_info + domain;
+
+ ret = scmi_perf_level_get(handle, domain, &level, poll);
+ if (!ret)
+ *freq = level * dom->mult_factor;
+
+ return ret;
+}
+
+static int scmi_dvfs_est_power_get(const struct scmi_handle *handle, u32 domain,
+ unsigned long *freq, unsigned long *power)
+{
+ struct scmi_perf_info *pi = handle->perf_priv;
+ struct perf_dom_info *dom;
+ unsigned long opp_freq;
+ int idx, ret = -EINVAL;
+ struct scmi_opp *opp;
+
+ dom = pi->dom_info + domain;
+ if (!dom)
+ return -EIO;
+
+ for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
+ opp_freq = opp->perf * dom->mult_factor;
+ if (opp_freq < *freq)
+ continue;
+
+ *freq = opp_freq;
+ *power = opp->power;
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static bool scmi_fast_switch_possible(const struct scmi_handle *handle,
+ struct device *dev)
+{
+ struct perf_dom_info *dom;
+ struct scmi_perf_info *pi = handle->perf_priv;
+
+ dom = pi->dom_info + scmi_dev_domain_id(dev);
+
+ return dom->fc_info && dom->fc_info->level_set_addr;
+}
+
+static const struct scmi_perf_ops perf_ops = {
+ .limits_set = scmi_perf_limits_set,
+ .limits_get = scmi_perf_limits_get,
+ .level_set = scmi_perf_level_set,
+ .level_get = scmi_perf_level_get,
+ .device_domain_id = scmi_dev_domain_id,
+ .transition_latency_get = scmi_dvfs_transition_latency_get,
+ .device_opps_add = scmi_dvfs_device_opps_add,
+ .freq_set = scmi_dvfs_freq_set,
+ .freq_get = scmi_dvfs_freq_get,
+ .est_power_get = scmi_dvfs_est_power_get,
+ .fast_switch_possible = scmi_fast_switch_possible,
+};
+
+static int scmi_perf_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret, cmd_id;
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd))
+ return -EINVAL;
+
+ cmd_id = evt_2_cmd[evt_id];
+ ret = scmi_perf_level_limits_notify(handle, src_id, cmd_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_perf_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ void *rep = NULL;
+
+ switch (evt_id) {
+ case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED:
+ {
+ const struct scmi_perf_limits_notify_payld *p = payld;
+ struct scmi_perf_limits_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->range_max = le32_to_cpu(p->range_max);
+ r->range_min = le32_to_cpu(p->range_min);
+ *src_id = r->domain_id;
+ rep = r;
+ break;
+ }
+ case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED:
+ {
+ const struct scmi_perf_level_notify_payld *p = payld;
+ struct scmi_perf_level_report *r = report;
+
+ if (sizeof(*p) != payld_sz)
+ break;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->performance_level = le32_to_cpu(p->performance_level);
+ *src_id = r->domain_id;
+ rep = r;
+ break;
+ }
+ default:
+ break;
+ }
+
+ return rep;
+}
+
+static const struct scmi_event perf_events[] = {
+ {
+ .id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld),
+ .max_report_sz = sizeof(struct scmi_perf_limits_report),
+ },
+ {
+ .id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_perf_level_notify_payld),
+ .max_report_sz = sizeof(struct scmi_perf_level_report),
+ },
+};
+
+static const struct scmi_event_ops perf_event_ops = {
+ .set_notify_enabled = scmi_perf_set_notify_enabled,
+ .fill_custom_report = scmi_perf_fill_custom_report,
+};
+
+static int scmi_perf_protocol_init(struct scmi_handle *handle)
+{
+ int domain;
+ u32 version;
+ struct scmi_perf_info *pinfo;
+
+ scmi_version_get(handle, SCMI_PROTOCOL_PERF, &version);
+
+ dev_dbg(handle->dev, "Performance Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ scmi_perf_attributes_get(handle, pinfo);
+
+ pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+ sizeof(*pinfo->dom_info), GFP_KERNEL);
+ if (!pinfo->dom_info)
+ return -ENOMEM;
+
+ for (domain = 0; domain < pinfo->num_domains; domain++) {
+ struct perf_dom_info *dom = pinfo->dom_info + domain;
+
+ scmi_perf_domain_attributes_get(handle, domain, dom);
+ scmi_perf_describe_levels_get(handle, domain, dom);
+
+ if (dom->perf_fastchannels)
+ scmi_perf_domain_init_fc(handle, domain, &dom->fc_info);
+ }
+
+ scmi_register_protocol_events(handle,
+ SCMI_PROTOCOL_PERF, SCMI_PROTO_QUEUE_SZ,
+ &perf_event_ops, perf_events,
+ ARRAY_SIZE(perf_events),
+ pinfo->num_domains);
+
+ pinfo->version = version;
+ handle->perf_ops = &perf_ops;
+ handle->perf_priv = pinfo;
+
+ return 0;
+}
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_PERF, perf)
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
new file mode 100644
index 000000000..1f37258e9
--- /dev/null
+++ b/drivers/firmware/arm_scmi/power.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Power Protocol
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications POWER - " fmt
+
+#include <linux/scmi_protocol.h>
+
+#include "common.h"
+#include "notify.h"
+
+enum scmi_power_protocol_cmd {
+ POWER_DOMAIN_ATTRIBUTES = 0x3,
+ POWER_STATE_SET = 0x4,
+ POWER_STATE_GET = 0x5,
+ POWER_STATE_NOTIFY = 0x6,
+};
+
+struct scmi_msg_resp_power_attributes {
+ __le16 num_domains;
+ __le16 reserved;
+ __le32 stats_addr_low;
+ __le32 stats_addr_high;
+ __le32 stats_size;
+};
+
+struct scmi_msg_resp_power_domain_attributes {
+ __le32 flags;
+#define SUPPORTS_STATE_SET_NOTIFY(x) ((x) & BIT(31))
+#define SUPPORTS_STATE_SET_ASYNC(x) ((x) & BIT(30))
+#define SUPPORTS_STATE_SET_SYNC(x) ((x) & BIT(29))
+ u8 name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_power_set_state {
+ __le32 flags;
+#define STATE_SET_ASYNC BIT(0)
+ __le32 domain;
+ __le32 state;
+};
+
+struct scmi_power_state_notify {
+ __le32 domain;
+ __le32 notify_enable;
+};
+
+struct scmi_power_state_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 power_state;
+};
+
+struct power_dom_info {
+ bool state_set_sync;
+ bool state_set_async;
+ bool state_set_notify;
+ char name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_power_info {
+ u32 version;
+ int num_domains;
+ u64 stats_addr;
+ u32 stats_size;
+ struct power_dom_info *dom_info;
+};
+
+static int scmi_power_attributes_get(const struct scmi_handle *handle,
+ struct scmi_power_info *pi)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_power_attributes *attr;
+
+ ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+ SCMI_PROTOCOL_POWER, 0, sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ pi->num_domains = le16_to_cpu(attr->num_domains);
+ pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
+ (u64)le32_to_cpu(attr->stats_addr_high) << 32;
+ pi->stats_size = le32_to_cpu(attr->stats_size);
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int
+scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
+ struct power_dom_info *dom_info)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_power_domain_attributes *attr;
+
+ ret = scmi_xfer_get_init(handle, POWER_DOMAIN_ATTRIBUTES,
+ SCMI_PROTOCOL_POWER, sizeof(domain),
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain, t->tx.buf);
+ attr = t->rx.buf;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ u32 flags = le32_to_cpu(attr->flags);
+
+ dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags);
+ dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags);
+ dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags);
+ strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int
+scmi_power_state_set(const struct scmi_handle *handle, u32 domain, u32 state)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_power_set_state *st;
+
+ ret = scmi_xfer_get_init(handle, POWER_STATE_SET, SCMI_PROTOCOL_POWER,
+ sizeof(*st), 0, &t);
+ if (ret)
+ return ret;
+
+ st = t->tx.buf;
+ st->flags = cpu_to_le32(0);
+ st->domain = cpu_to_le32(domain);
+ st->state = cpu_to_le32(state);
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int
+scmi_power_state_get(const struct scmi_handle *handle, u32 domain, u32 *state)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = scmi_xfer_get_init(handle, POWER_STATE_GET, SCMI_PROTOCOL_POWER,
+ sizeof(u32), sizeof(u32), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain, t->tx.buf);
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret)
+ *state = get_unaligned_le32(t->rx.buf);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_power_num_domains_get(const struct scmi_handle *handle)
+{
+ struct scmi_power_info *pi = handle->power_priv;
+
+ return pi->num_domains;
+}
+
+static char *scmi_power_name_get(const struct scmi_handle *handle, u32 domain)
+{
+ struct scmi_power_info *pi = handle->power_priv;
+ struct power_dom_info *dom = pi->dom_info + domain;
+
+ return dom->name;
+}
+
+static const struct scmi_power_ops power_ops = {
+ .num_domains_get = scmi_power_num_domains_get,
+ .name_get = scmi_power_name_get,
+ .state_set = scmi_power_state_set,
+ .state_get = scmi_power_state_get,
+};
+
+static int scmi_power_request_notify(const struct scmi_handle *handle,
+ u32 domain, bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_power_state_notify *notify;
+
+ ret = scmi_xfer_get_init(handle, POWER_STATE_NOTIFY,
+ SCMI_PROTOCOL_POWER, sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->domain = cpu_to_le32(domain);
+ notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_power_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_power_request_notify(handle, src_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLE - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_power_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_power_state_notify_payld *p = payld;
+ struct scmi_power_state_changed_report *r = report;
+
+ if (evt_id != SCMI_EVENT_POWER_STATE_CHANGED || sizeof(*p) != payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->power_state = le32_to_cpu(p->power_state);
+ *src_id = r->domain_id;
+
+ return r;
+}
+
+static const struct scmi_event power_events[] = {
+ {
+ .id = SCMI_EVENT_POWER_STATE_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_power_state_notify_payld),
+ .max_report_sz =
+ sizeof(struct scmi_power_state_changed_report),
+ },
+};
+
+static const struct scmi_event_ops power_event_ops = {
+ .set_notify_enabled = scmi_power_set_notify_enabled,
+ .fill_custom_report = scmi_power_fill_custom_report,
+};
+
+static int scmi_power_protocol_init(struct scmi_handle *handle)
+{
+ int domain;
+ u32 version;
+ struct scmi_power_info *pinfo;
+
+ scmi_version_get(handle, SCMI_PROTOCOL_POWER, &version);
+
+ dev_dbg(handle->dev, "Power Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ scmi_power_attributes_get(handle, pinfo);
+
+ pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+ sizeof(*pinfo->dom_info), GFP_KERNEL);
+ if (!pinfo->dom_info)
+ return -ENOMEM;
+
+ for (domain = 0; domain < pinfo->num_domains; domain++) {
+ struct power_dom_info *dom = pinfo->dom_info + domain;
+
+ scmi_power_domain_attributes_get(handle, domain, dom);
+ }
+
+ scmi_register_protocol_events(handle,
+ SCMI_PROTOCOL_POWER, SCMI_PROTO_QUEUE_SZ,
+ &power_event_ops, power_events,
+ ARRAY_SIZE(power_events),
+ pinfo->num_domains);
+
+ pinfo->version = version;
+ handle->power_ops = &power_ops;
+ handle->power_priv = pinfo;
+
+ return 0;
+}
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_POWER, power)
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
new file mode 100644
index 000000000..a981a22cf
--- /dev/null
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Reset Protocol
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications RESET - " fmt
+
+#include <linux/scmi_protocol.h>
+
+#include "common.h"
+#include "notify.h"
+
+enum scmi_reset_protocol_cmd {
+ RESET_DOMAIN_ATTRIBUTES = 0x3,
+ RESET = 0x4,
+ RESET_NOTIFY = 0x5,
+};
+
+#define NUM_RESET_DOMAIN_MASK 0xffff
+#define RESET_NOTIFY_ENABLE BIT(0)
+
+struct scmi_msg_resp_reset_domain_attributes {
+ __le32 attributes;
+#define SUPPORTS_ASYNC_RESET(x) ((x) & BIT(31))
+#define SUPPORTS_NOTIFY_RESET(x) ((x) & BIT(30))
+ __le32 latency;
+ u8 name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_msg_reset_domain_reset {
+ __le32 domain_id;
+ __le32 flags;
+#define AUTONOMOUS_RESET BIT(0)
+#define EXPLICIT_RESET_ASSERT BIT(1)
+#define ASYNCHRONOUS_RESET BIT(2)
+ __le32 reset_state;
+#define ARCH_COLD_RESET 0
+};
+
+struct scmi_msg_reset_notify {
+ __le32 id;
+ __le32 event_control;
+#define RESET_TP_NOTIFY_ALL BIT(0)
+};
+
+struct scmi_reset_issued_notify_payld {
+ __le32 agent_id;
+ __le32 domain_id;
+ __le32 reset_state;
+};
+
+struct reset_dom_info {
+ bool async_reset;
+ bool reset_notify;
+ u32 latency_us;
+ char name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_reset_info {
+ u32 version;
+ int num_domains;
+ struct reset_dom_info *dom_info;
+};
+
+static int scmi_reset_attributes_get(const struct scmi_handle *handle,
+ struct scmi_reset_info *pi)
+{
+ int ret;
+ struct scmi_xfer *t;
+ u32 attr;
+
+ ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+ SCMI_PROTOCOL_RESET, 0, sizeof(attr), &t);
+ if (ret)
+ return ret;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ attr = get_unaligned_le32(t->rx.buf);
+ pi->num_domains = attr & NUM_RESET_DOMAIN_MASK;
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int
+scmi_reset_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
+ struct reset_dom_info *dom_info)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_reset_domain_attributes *attr;
+
+ ret = scmi_xfer_get_init(handle, RESET_DOMAIN_ATTRIBUTES,
+ SCMI_PROTOCOL_RESET, sizeof(domain),
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain, t->tx.buf);
+ attr = t->rx.buf;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ u32 attributes = le32_to_cpu(attr->attributes);
+
+ dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes);
+ dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes);
+ dom_info->latency_us = le32_to_cpu(attr->latency);
+ if (dom_info->latency_us == U32_MAX)
+ dom_info->latency_us = 0;
+ strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_reset_num_domains_get(const struct scmi_handle *handle)
+{
+ struct scmi_reset_info *pi = handle->reset_priv;
+
+ return pi->num_domains;
+}
+
+static char *scmi_reset_name_get(const struct scmi_handle *handle, u32 domain)
+{
+ struct scmi_reset_info *pi = handle->reset_priv;
+ struct reset_dom_info *dom = pi->dom_info + domain;
+
+ return dom->name;
+}
+
+static int scmi_reset_latency_get(const struct scmi_handle *handle, u32 domain)
+{
+ struct scmi_reset_info *pi = handle->reset_priv;
+ struct reset_dom_info *dom = pi->dom_info + domain;
+
+ return dom->latency_us;
+}
+
+static int scmi_domain_reset(const struct scmi_handle *handle, u32 domain,
+ u32 flags, u32 state)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_reset_domain_reset *dom;
+ struct scmi_reset_info *pi = handle->reset_priv;
+ struct reset_dom_info *rdom = pi->dom_info + domain;
+
+ if (rdom->async_reset)
+ flags |= ASYNCHRONOUS_RESET;
+
+ ret = scmi_xfer_get_init(handle, RESET, SCMI_PROTOCOL_RESET,
+ sizeof(*dom), 0, &t);
+ if (ret)
+ return ret;
+
+ dom = t->tx.buf;
+ dom->domain_id = cpu_to_le32(domain);
+ dom->flags = cpu_to_le32(flags);
+ dom->reset_state = cpu_to_le32(state);
+
+ if (rdom->async_reset)
+ ret = scmi_do_xfer_with_response(handle, t);
+ else
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_reset_domain_reset(const struct scmi_handle *handle, u32 domain)
+{
+ return scmi_domain_reset(handle, domain, AUTONOMOUS_RESET,
+ ARCH_COLD_RESET);
+}
+
+static int
+scmi_reset_domain_assert(const struct scmi_handle *handle, u32 domain)
+{
+ return scmi_domain_reset(handle, domain, EXPLICIT_RESET_ASSERT,
+ ARCH_COLD_RESET);
+}
+
+static int
+scmi_reset_domain_deassert(const struct scmi_handle *handle, u32 domain)
+{
+ return scmi_domain_reset(handle, domain, 0, ARCH_COLD_RESET);
+}
+
+static const struct scmi_reset_ops reset_ops = {
+ .num_domains_get = scmi_reset_num_domains_get,
+ .name_get = scmi_reset_name_get,
+ .latency_get = scmi_reset_latency_get,
+ .reset = scmi_reset_domain_reset,
+ .assert = scmi_reset_domain_assert,
+ .deassert = scmi_reset_domain_deassert,
+};
+
+static int scmi_reset_notify(const struct scmi_handle *handle, u32 domain_id,
+ bool enable)
+{
+ int ret;
+ u32 evt_cntl = enable ? RESET_TP_NOTIFY_ALL : 0;
+ struct scmi_xfer *t;
+ struct scmi_msg_reset_notify *cfg;
+
+ ret = scmi_xfer_get_init(handle, RESET_NOTIFY,
+ SCMI_PROTOCOL_RESET, sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ cfg = t->tx.buf;
+ cfg->id = cpu_to_le32(domain_id);
+ cfg->event_control = cpu_to_le32(evt_cntl);
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_reset_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_reset_notify(handle, src_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_reset_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_reset_issued_notify_payld *p = payld;
+ struct scmi_reset_issued_report *r = report;
+
+ if (evt_id != SCMI_EVENT_RESET_ISSUED || sizeof(*p) != payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->domain_id = le32_to_cpu(p->domain_id);
+ r->reset_state = le32_to_cpu(p->reset_state);
+ *src_id = r->domain_id;
+
+ return r;
+}
+
+static const struct scmi_event reset_events[] = {
+ {
+ .id = SCMI_EVENT_RESET_ISSUED,
+ .max_payld_sz = sizeof(struct scmi_reset_issued_notify_payld),
+ .max_report_sz = sizeof(struct scmi_reset_issued_report),
+ },
+};
+
+static const struct scmi_event_ops reset_event_ops = {
+ .set_notify_enabled = scmi_reset_set_notify_enabled,
+ .fill_custom_report = scmi_reset_fill_custom_report,
+};
+
+static int scmi_reset_protocol_init(struct scmi_handle *handle)
+{
+ int domain;
+ u32 version;
+ struct scmi_reset_info *pinfo;
+
+ scmi_version_get(handle, SCMI_PROTOCOL_RESET, &version);
+
+ dev_dbg(handle->dev, "Reset Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ scmi_reset_attributes_get(handle, pinfo);
+
+ pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+ sizeof(*pinfo->dom_info), GFP_KERNEL);
+ if (!pinfo->dom_info)
+ return -ENOMEM;
+
+ for (domain = 0; domain < pinfo->num_domains; domain++) {
+ struct reset_dom_info *dom = pinfo->dom_info + domain;
+
+ scmi_reset_domain_attributes_get(handle, domain, dom);
+ }
+
+ scmi_register_protocol_events(handle,
+ SCMI_PROTOCOL_RESET, SCMI_PROTO_QUEUE_SZ,
+ &reset_event_ops, reset_events,
+ ARRAY_SIZE(reset_events),
+ pinfo->num_domains);
+
+ pinfo->version = version;
+ handle->reset_ops = &reset_ops;
+ handle->reset_priv = pinfo;
+
+ return 0;
+}
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_RESET, reset)
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
new file mode 100644
index 000000000..af74e521f
--- /dev/null
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SCMI Generic power domain support.
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pm_domain.h>
+#include <linux/scmi_protocol.h>
+
+struct scmi_pm_domain {
+ struct generic_pm_domain genpd;
+ const struct scmi_handle *handle;
+ const char *name;
+ u32 domain;
+};
+
+#define to_scmi_pd(gpd) container_of(gpd, struct scmi_pm_domain, genpd)
+
+static int scmi_pd_power(struct generic_pm_domain *domain, bool power_on)
+{
+ int ret;
+ u32 state, ret_state;
+ struct scmi_pm_domain *pd = to_scmi_pd(domain);
+ const struct scmi_power_ops *ops = pd->handle->power_ops;
+
+ if (power_on)
+ state = SCMI_POWER_STATE_GENERIC_ON;
+ else
+ state = SCMI_POWER_STATE_GENERIC_OFF;
+
+ ret = ops->state_set(pd->handle, pd->domain, state);
+ if (!ret)
+ ret = ops->state_get(pd->handle, pd->domain, &ret_state);
+ if (!ret && state != ret_state)
+ return -EIO;
+
+ return ret;
+}
+
+static int scmi_pd_power_on(struct generic_pm_domain *domain)
+{
+ return scmi_pd_power(domain, true);
+}
+
+static int scmi_pd_power_off(struct generic_pm_domain *domain)
+{
+ return scmi_pd_power(domain, false);
+}
+
+static int scmi_pm_domain_probe(struct scmi_device *sdev)
+{
+ int num_domains, i;
+ struct device *dev = &sdev->dev;
+ struct device_node *np = dev->of_node;
+ struct scmi_pm_domain *scmi_pd;
+ struct genpd_onecell_data *scmi_pd_data;
+ struct generic_pm_domain **domains;
+ const struct scmi_handle *handle = sdev->handle;
+
+ if (!handle || !handle->power_ops)
+ return -ENODEV;
+
+ num_domains = handle->power_ops->num_domains_get(handle);
+ if (num_domains < 0) {
+ dev_err(dev, "number of domains not found\n");
+ return num_domains;
+ }
+
+ scmi_pd = devm_kcalloc(dev, num_domains, sizeof(*scmi_pd), GFP_KERNEL);
+ if (!scmi_pd)
+ return -ENOMEM;
+
+ scmi_pd_data = devm_kzalloc(dev, sizeof(*scmi_pd_data), GFP_KERNEL);
+ if (!scmi_pd_data)
+ return -ENOMEM;
+
+ domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL);
+ if (!domains)
+ return -ENOMEM;
+
+ for (i = 0; i < num_domains; i++, scmi_pd++) {
+ u32 state;
+
+ if (handle->power_ops->state_get(handle, i, &state)) {
+ dev_warn(dev, "failed to get state for domain %d\n", i);
+ continue;
+ }
+
+ scmi_pd->domain = i;
+ scmi_pd->handle = handle;
+ scmi_pd->name = handle->power_ops->name_get(handle, i);
+ scmi_pd->genpd.name = scmi_pd->name;
+ scmi_pd->genpd.power_off = scmi_pd_power_off;
+ scmi_pd->genpd.power_on = scmi_pd_power_on;
+
+ pm_genpd_init(&scmi_pd->genpd, NULL,
+ state == SCMI_POWER_STATE_GENERIC_OFF);
+
+ domains[i] = &scmi_pd->genpd;
+ }
+
+ scmi_pd_data->domains = domains;
+ scmi_pd_data->num_domains = num_domains;
+
+ dev_set_drvdata(dev, scmi_pd_data);
+
+ return of_genpd_add_provider_onecell(np, scmi_pd_data);
+}
+
+static void scmi_pm_domain_remove(struct scmi_device *sdev)
+{
+ int i;
+ struct genpd_onecell_data *scmi_pd_data;
+ struct device *dev = &sdev->dev;
+ struct device_node *np = dev->of_node;
+
+ of_genpd_del_provider(np);
+
+ scmi_pd_data = dev_get_drvdata(dev);
+ for (i = 0; i < scmi_pd_data->num_domains; i++) {
+ if (!scmi_pd_data->domains[i])
+ continue;
+ pm_genpd_remove(scmi_pd_data->domains[i]);
+ }
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_POWER, "genpd" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_power_domain_driver = {
+ .name = "scmi-power-domain",
+ .probe = scmi_pm_domain_probe,
+ .remove = scmi_pm_domain_remove,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_power_domain_driver);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI power domain driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
new file mode 100644
index 000000000..b4232d611
--- /dev/null
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Sensor Protocol
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt
+
+#include <linux/scmi_protocol.h>
+
+#include "common.h"
+#include "notify.h"
+
+enum scmi_sensor_protocol_cmd {
+ SENSOR_DESCRIPTION_GET = 0x3,
+ SENSOR_TRIP_POINT_NOTIFY = 0x4,
+ SENSOR_TRIP_POINT_CONFIG = 0x5,
+ SENSOR_READING_GET = 0x6,
+};
+
+struct scmi_msg_resp_sensor_attributes {
+ __le16 num_sensors;
+ u8 max_requests;
+ u8 reserved;
+ __le32 reg_addr_low;
+ __le32 reg_addr_high;
+ __le32 reg_size;
+};
+
+struct scmi_msg_resp_sensor_description {
+ __le16 num_returned;
+ __le16 num_remaining;
+ struct {
+ __le32 id;
+ __le32 attributes_low;
+#define SUPPORTS_ASYNC_READ(x) ((x) & BIT(31))
+#define NUM_TRIP_POINTS(x) ((x) & 0xff)
+ __le32 attributes_high;
+#define SENSOR_TYPE(x) ((x) & 0xff)
+#define SENSOR_SCALE(x) (((x) >> 11) & 0x1f)
+#define SENSOR_SCALE_SIGN BIT(4)
+#define SENSOR_SCALE_EXTEND GENMASK(7, 5)
+#define SENSOR_UPDATE_SCALE(x) (((x) >> 22) & 0x1f)
+#define SENSOR_UPDATE_BASE(x) (((x) >> 27) & 0x1f)
+ u8 name[SCMI_MAX_STR_SIZE];
+ } desc[0];
+};
+
+struct scmi_msg_sensor_trip_point_notify {
+ __le32 id;
+ __le32 event_control;
+#define SENSOR_TP_NOTIFY_ALL BIT(0)
+};
+
+struct scmi_msg_set_sensor_trip_point {
+ __le32 id;
+ __le32 event_control;
+#define SENSOR_TP_EVENT_MASK (0x3)
+#define SENSOR_TP_DISABLED 0x0
+#define SENSOR_TP_POSITIVE 0x1
+#define SENSOR_TP_NEGATIVE 0x2
+#define SENSOR_TP_BOTH 0x3
+#define SENSOR_TP_ID(x) (((x) & 0xff) << 4)
+ __le32 value_low;
+ __le32 value_high;
+};
+
+struct scmi_msg_sensor_reading_get {
+ __le32 id;
+ __le32 flags;
+#define SENSOR_READ_ASYNC BIT(0)
+};
+
+struct scmi_sensor_trip_notify_payld {
+ __le32 agent_id;
+ __le32 sensor_id;
+ __le32 trip_point_desc;
+};
+
+struct sensors_info {
+ u32 version;
+ int num_sensors;
+ int max_requests;
+ u64 reg_addr;
+ u32 reg_size;
+ struct scmi_sensor_info *sensors;
+};
+
+static int scmi_sensor_attributes_get(const struct scmi_handle *handle,
+ struct sensors_info *si)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_sensor_attributes *attr;
+
+ ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+ SCMI_PROTOCOL_SENSOR, 0, sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ si->num_sensors = le16_to_cpu(attr->num_sensors);
+ si->max_requests = attr->max_requests;
+ si->reg_addr = le32_to_cpu(attr->reg_addr_low) |
+ (u64)le32_to_cpu(attr->reg_addr_high) << 32;
+ si->reg_size = le32_to_cpu(attr->reg_size);
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_sensor_description_get(const struct scmi_handle *handle,
+ struct sensors_info *si)
+{
+ int ret, cnt;
+ u32 desc_index = 0;
+ u16 num_returned, num_remaining;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_sensor_description *buf;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_DESCRIPTION_GET,
+ SCMI_PROTOCOL_SENSOR, sizeof(__le32), 0, &t);
+ if (ret)
+ return ret;
+
+ buf = t->rx.buf;
+
+ do {
+ /* Set the number of sensors to be skipped/already read */
+ put_unaligned_le32(desc_index, t->tx.buf);
+
+ ret = scmi_do_xfer(handle, t);
+ if (ret)
+ break;
+
+ num_returned = le16_to_cpu(buf->num_returned);
+ num_remaining = le16_to_cpu(buf->num_remaining);
+
+ if (desc_index + num_returned > si->num_sensors) {
+ dev_err(handle->dev, "No. of sensors can't exceed %d",
+ si->num_sensors);
+ break;
+ }
+
+ for (cnt = 0; cnt < num_returned; cnt++) {
+ u32 attrh, attrl;
+ struct scmi_sensor_info *s;
+
+ attrl = le32_to_cpu(buf->desc[cnt].attributes_low);
+ attrh = le32_to_cpu(buf->desc[cnt].attributes_high);
+ s = &si->sensors[desc_index + cnt];
+ s->id = le32_to_cpu(buf->desc[cnt].id);
+ s->type = SENSOR_TYPE(attrh);
+ s->scale = SENSOR_SCALE(attrh);
+ /* Sign extend to a full s8 */
+ if (s->scale & SENSOR_SCALE_SIGN)
+ s->scale |= SENSOR_SCALE_EXTEND;
+ s->async = SUPPORTS_ASYNC_READ(attrl);
+ s->num_trip_points = NUM_TRIP_POINTS(attrl);
+ strlcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE);
+ }
+
+ desc_index += num_returned;
+
+ scmi_reset_rx_to_maxsz(handle, t);
+ /*
+ * check for both returned and remaining to avoid infinite
+ * loop due to buggy firmware
+ */
+ } while (num_returned && num_remaining);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
+ u32 sensor_id, bool enable)
+{
+ int ret;
+ u32 evt_cntl = enable ? SENSOR_TP_NOTIFY_ALL : 0;
+ struct scmi_xfer *t;
+ struct scmi_msg_sensor_trip_point_notify *cfg;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_NOTIFY,
+ SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ cfg = t->tx.buf;
+ cfg->id = cpu_to_le32(sensor_id);
+ cfg->event_control = cpu_to_le32(evt_cntl);
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int
+scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id,
+ u8 trip_id, u64 trip_value)
+{
+ int ret;
+ u32 evt_cntl = SENSOR_TP_BOTH;
+ struct scmi_xfer *t;
+ struct scmi_msg_set_sensor_trip_point *trip;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_CONFIG,
+ SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t);
+ if (ret)
+ return ret;
+
+ trip = t->tx.buf;
+ trip->id = cpu_to_le32(sensor_id);
+ trip->event_control = cpu_to_le32(evt_cntl | SENSOR_TP_ID(trip_id));
+ trip->value_low = cpu_to_le32(trip_value & 0xffffffff);
+ trip->value_high = cpu_to_le32(trip_value >> 32);
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_sensor_reading_get(const struct scmi_handle *handle,
+ u32 sensor_id, u64 *value)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_sensor_reading_get *sensor;
+ struct sensors_info *si = handle->sensor_priv;
+ struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
+ SCMI_PROTOCOL_SENSOR, sizeof(*sensor),
+ sizeof(u64), &t);
+ if (ret)
+ return ret;
+
+ sensor = t->tx.buf;
+ sensor->id = cpu_to_le32(sensor_id);
+
+ if (s->async) {
+ sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
+ ret = scmi_do_xfer_with_response(handle, t);
+ if (!ret)
+ *value = get_unaligned_le64((void *)
+ ((__le32 *)t->rx.buf + 1));
+ } else {
+ sensor->flags = cpu_to_le32(0);
+ ret = scmi_do_xfer(handle, t);
+ if (!ret)
+ *value = get_unaligned_le64(t->rx.buf);
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static const struct scmi_sensor_info *
+scmi_sensor_info_get(const struct scmi_handle *handle, u32 sensor_id)
+{
+ struct sensors_info *si = handle->sensor_priv;
+
+ return si->sensors + sensor_id;
+}
+
+static int scmi_sensor_count_get(const struct scmi_handle *handle)
+{
+ struct sensors_info *si = handle->sensor_priv;
+
+ return si->num_sensors;
+}
+
+static const struct scmi_sensor_ops sensor_ops = {
+ .count_get = scmi_sensor_count_get,
+ .info_get = scmi_sensor_info_get,
+ .trip_point_config = scmi_sensor_trip_point_config,
+ .reading_get = scmi_sensor_reading_get,
+};
+
+static int scmi_sensor_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_sensor_trip_point_notify(handle, src_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_sensor_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_sensor_trip_notify_payld *p = payld;
+ struct scmi_sensor_trip_point_report *r = report;
+
+ if (evt_id != SCMI_EVENT_SENSOR_TRIP_POINT_EVENT ||
+ sizeof(*p) != payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->sensor_id = le32_to_cpu(p->sensor_id);
+ r->trip_point_desc = le32_to_cpu(p->trip_point_desc);
+ *src_id = r->sensor_id;
+
+ return r;
+}
+
+static const struct scmi_event sensor_events[] = {
+ {
+ .id = SCMI_EVENT_SENSOR_TRIP_POINT_EVENT,
+ .max_payld_sz = sizeof(struct scmi_sensor_trip_notify_payld),
+ .max_report_sz = sizeof(struct scmi_sensor_trip_point_report),
+ },
+};
+
+static const struct scmi_event_ops sensor_event_ops = {
+ .set_notify_enabled = scmi_sensor_set_notify_enabled,
+ .fill_custom_report = scmi_sensor_fill_custom_report,
+};
+
+static int scmi_sensors_protocol_init(struct scmi_handle *handle)
+{
+ u32 version;
+ struct sensors_info *sinfo;
+
+ scmi_version_get(handle, SCMI_PROTOCOL_SENSOR, &version);
+
+ dev_dbg(handle->dev, "Sensor Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ sinfo = devm_kzalloc(handle->dev, sizeof(*sinfo), GFP_KERNEL);
+ if (!sinfo)
+ return -ENOMEM;
+
+ scmi_sensor_attributes_get(handle, sinfo);
+
+ sinfo->sensors = devm_kcalloc(handle->dev, sinfo->num_sensors,
+ sizeof(*sinfo->sensors), GFP_KERNEL);
+ if (!sinfo->sensors)
+ return -ENOMEM;
+
+ scmi_sensor_description_get(handle, sinfo);
+
+ scmi_register_protocol_events(handle,
+ SCMI_PROTOCOL_SENSOR, SCMI_PROTO_QUEUE_SZ,
+ &sensor_event_ops, sensor_events,
+ ARRAY_SIZE(sensor_events),
+ sinfo->num_sensors);
+
+ sinfo->version = version;
+ handle->sensor_ops = &sensor_ops;
+ handle->sensor_priv = sinfo;
+
+ return 0;
+}
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_SENSOR, sensors)
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
new file mode 100644
index 000000000..56a1f61aa
--- /dev/null
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * For transport using shared mem structure.
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include <linux/io.h>
+#include <linux/processor.h>
+#include <linux/types.h>
+
+#include "common.h"
+
+/*
+ * SCMI specification requires all parameters, message headers, return
+ * arguments or any protocol data to be expressed in little endian
+ * format only.
+ */
+struct scmi_shared_mem {
+ __le32 reserved;
+ __le32 channel_status;
+#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
+#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
+ __le32 reserved1[2];
+ __le32 flags;
+#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
+ __le32 length;
+ __le32 msg_header;
+ u8 msg_payload[];
+};
+
+void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
+{
+ /*
+ * Ideally channel must be free by now unless OS timeout last
+ * request and platform continued to process the same, wait
+ * until it releases the shared memory, otherwise we may endup
+ * overwriting its response with new message payload or vice-versa
+ */
+ spin_until_cond(ioread32(&shmem->channel_status) &
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+ /* Mark channel busy + clear error */
+ iowrite32(0x0, &shmem->channel_status);
+ iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
+ &shmem->flags);
+ iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
+ iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
+ if (xfer->tx.buf)
+ memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
+}
+
+u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
+{
+ return ioread32(&shmem->msg_header);
+}
+
+void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
+{
+ size_t len = ioread32(&shmem->length);
+
+ xfer->hdr.status = ioread32(shmem->msg_payload);
+ /* Skip the length of header and status in shmem area i.e 8 bytes */
+ xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
+
+ /* Take a copy to the rx buffer.. */
+ memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
+}
+
+void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+ size_t max_len, struct scmi_xfer *xfer)
+{
+ size_t len = ioread32(&shmem->length);
+
+ /* Skip only the length of header in shmem area i.e 4 bytes */
+ xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
+
+ /* Take a copy to the rx buffer.. */
+ memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
+}
+
+void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
+{
+ iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status);
+}
+
+bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
+ struct scmi_xfer *xfer)
+{
+ u16 xfer_id;
+
+ xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header));
+
+ if (xfer->hdr.seq != xfer_id)
+ return false;
+
+ return ioread32(&shmem->channel_status) &
+ (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+}
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
new file mode 100644
index 000000000..82a82a5dc
--- /dev/null
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message SMC/HVC
+ * Transport driver
+ *
+ * Copyright 2020 NXP
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+/**
+ * struct scmi_smc - Structure representing a SCMI smc transport
+ *
+ * @cinfo: SCMI channel info
+ * @shmem: Transmit/Receive shared memory area
+ * @shmem_lock: Lock to protect access to Tx/Rx shared memory area
+ * @func_id: smc/hvc call function id
+ */
+
+struct scmi_smc {
+ struct scmi_chan_info *cinfo;
+ struct scmi_shared_mem __iomem *shmem;
+ struct mutex shmem_lock;
+ u32 func_id;
+};
+
+static bool smc_chan_available(struct device *dev, int idx)
+{
+ struct device_node *np = of_parse_phandle(dev->of_node, "shmem", 0);
+ if (!np)
+ return false;
+
+ of_node_put(np);
+ return true;
+}
+
+static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ bool tx)
+{
+ struct device *cdev = cinfo->dev;
+ struct scmi_smc *scmi_info;
+ resource_size_t size;
+ struct resource res;
+ struct device_node *np;
+ u32 func_id;
+ int ret;
+
+ if (!tx)
+ return -ENODEV;
+
+ scmi_info = devm_kzalloc(dev, sizeof(*scmi_info), GFP_KERNEL);
+ if (!scmi_info)
+ return -ENOMEM;
+
+ np = of_parse_phandle(cdev->of_node, "shmem", 0);
+ ret = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
+ if (ret) {
+ dev_err(cdev, "failed to get SCMI Tx shared memory\n");
+ return ret;
+ }
+
+ size = resource_size(&res);
+ scmi_info->shmem = devm_ioremap(dev, res.start, size);
+ if (!scmi_info->shmem) {
+ dev_err(dev, "failed to ioremap SCMI Tx shared memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id);
+ if (ret < 0)
+ return ret;
+
+ scmi_info->func_id = func_id;
+ scmi_info->cinfo = cinfo;
+ mutex_init(&scmi_info->shmem_lock);
+ cinfo->transport_info = scmi_info;
+
+ return 0;
+}
+
+static int smc_chan_free(int id, void *p, void *data)
+{
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ cinfo->transport_info = NULL;
+ scmi_info->cinfo = NULL;
+
+ scmi_free_channel(cinfo, data, id);
+
+ return 0;
+}
+
+static int smc_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+ struct arm_smccc_res res;
+
+ mutex_lock(&scmi_info->shmem_lock);
+
+ shmem_tx_prepare(scmi_info->shmem, xfer);
+
+ arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res);
+ scmi_rx_callback(scmi_info->cinfo, shmem_read_header(scmi_info->shmem));
+
+ mutex_unlock(&scmi_info->shmem_lock);
+
+ /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
+ if (res.a0)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static void smc_fetch_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ shmem_fetch_response(scmi_info->shmem, xfer);
+}
+
+static bool
+smc_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ return shmem_poll_done(scmi_info->shmem, xfer);
+}
+
+static const struct scmi_transport_ops scmi_smc_ops = {
+ .chan_available = smc_chan_available,
+ .chan_setup = smc_chan_setup,
+ .chan_free = smc_chan_free,
+ .send_message = smc_send_message,
+ .fetch_response = smc_fetch_response,
+ .poll_done = smc_poll_done,
+};
+
+const struct scmi_desc scmi_smc_desc = {
+ .ops = &scmi_smc_ops,
+ .max_rx_timeout_ms = 30,
+ .max_msg = 20,
+ .max_msg_size = 128,
+};
diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c
new file mode 100644
index 000000000..283e12d5f
--- /dev/null
+++ b/drivers/firmware/arm_scmi/system.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) System Power Protocol
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications SYSTEM - " fmt
+
+#include <linux/scmi_protocol.h>
+
+#include "common.h"
+#include "notify.h"
+
+#define SCMI_SYSTEM_NUM_SOURCES 1
+
+enum scmi_system_protocol_cmd {
+ SYSTEM_POWER_STATE_NOTIFY = 0x5,
+};
+
+struct scmi_system_power_state_notify {
+ __le32 notify_enable;
+};
+
+struct scmi_system_power_state_notifier_payld {
+ __le32 agent_id;
+ __le32 flags;
+ __le32 system_state;
+};
+
+struct scmi_system_info {
+ u32 version;
+};
+
+static int scmi_system_request_notify(const struct scmi_handle *handle,
+ bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_system_power_state_notify *notify;
+
+ ret = scmi_xfer_get_init(handle, SYSTEM_POWER_STATE_NOTIFY,
+ SCMI_PROTOCOL_SYSTEM, sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_system_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_system_request_notify(handle, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLE - evt[%X] - ret:%d\n", evt_id, ret);
+
+ return ret;
+}
+
+static void *scmi_system_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_system_power_state_notifier_payld *p = payld;
+ struct scmi_system_power_state_notifier_report *r = report;
+
+ if (evt_id != SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER ||
+ sizeof(*p) != payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->flags = le32_to_cpu(p->flags);
+ r->system_state = le32_to_cpu(p->system_state);
+ *src_id = 0;
+
+ return r;
+}
+
+static const struct scmi_event system_events[] = {
+ {
+ .id = SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER,
+ .max_payld_sz =
+ sizeof(struct scmi_system_power_state_notifier_payld),
+ .max_report_sz =
+ sizeof(struct scmi_system_power_state_notifier_report),
+ },
+};
+
+static const struct scmi_event_ops system_event_ops = {
+ .set_notify_enabled = scmi_system_set_notify_enabled,
+ .fill_custom_report = scmi_system_fill_custom_report,
+};
+
+static int scmi_system_protocol_init(struct scmi_handle *handle)
+{
+ u32 version;
+ struct scmi_system_info *pinfo;
+
+ scmi_version_get(handle, SCMI_PROTOCOL_SYSTEM, &version);
+
+ dev_dbg(handle->dev, "System Power Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ scmi_register_protocol_events(handle,
+ SCMI_PROTOCOL_SYSTEM, SCMI_PROTO_QUEUE_SZ,
+ &system_event_ops,
+ system_events,
+ ARRAY_SIZE(system_events),
+ SCMI_SYSTEM_NUM_SOURCES);
+
+ pinfo->version = version;
+ handle->system_priv = pinfo;
+
+ return 0;
+}
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_SYSTEM, system)
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
new file mode 100644
index 000000000..36391cb51
--- /dev/null
+++ b/drivers/firmware/arm_scpi.c
@@ -0,0 +1,1049 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * System Control and Power Interface (SCPI) Message Protocol driver
+ *
+ * SCPI Message Protocol is used between the System Control Processor(SCP)
+ * and the Application Processors(AP). The Message Handling Unit(MHU)
+ * provides a mechanism for inter-processor communication between SCP's
+ * Cortex M3 and AP.
+ *
+ * SCP offers control and management of the core/cluster power states,
+ * various power domain DVFS including the core/cluster, certain system
+ * clocks configuration, thermal sensors and many others.
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/printk.h>
+#include <linux/pm_opp.h>
+#include <linux/scpi_protocol.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/spinlock.h>
+
+#define CMD_ID_MASK GENMASK(6, 0)
+#define CMD_TOKEN_ID_MASK GENMASK(15, 8)
+#define CMD_DATA_SIZE_MASK GENMASK(24, 16)
+#define CMD_LEGACY_DATA_SIZE_MASK GENMASK(28, 20)
+#define PACK_SCPI_CMD(cmd_id, tx_sz) \
+ (FIELD_PREP(CMD_ID_MASK, cmd_id) | \
+ FIELD_PREP(CMD_DATA_SIZE_MASK, tx_sz))
+#define PACK_LEGACY_SCPI_CMD(cmd_id, tx_sz) \
+ (FIELD_PREP(CMD_ID_MASK, cmd_id) | \
+ FIELD_PREP(CMD_LEGACY_DATA_SIZE_MASK, tx_sz))
+
+#define CMD_SIZE(cmd) FIELD_GET(CMD_DATA_SIZE_MASK, cmd)
+#define CMD_UNIQ_MASK (CMD_TOKEN_ID_MASK | CMD_ID_MASK)
+#define CMD_XTRACT_UNIQ(cmd) ((cmd) & CMD_UNIQ_MASK)
+
+#define SCPI_SLOT 0
+
+#define MAX_DVFS_DOMAINS 8
+#define MAX_DVFS_OPPS 16
+
+#define PROTO_REV_MAJOR_MASK GENMASK(31, 16)
+#define PROTO_REV_MINOR_MASK GENMASK(15, 0)
+
+#define FW_REV_MAJOR_MASK GENMASK(31, 24)
+#define FW_REV_MINOR_MASK GENMASK(23, 16)
+#define FW_REV_PATCH_MASK GENMASK(15, 0)
+
+#define MAX_RX_TIMEOUT (msecs_to_jiffies(30))
+
+enum scpi_error_codes {
+ SCPI_SUCCESS = 0, /* Success */
+ SCPI_ERR_PARAM = 1, /* Invalid parameter(s) */
+ SCPI_ERR_ALIGN = 2, /* Invalid alignment */
+ SCPI_ERR_SIZE = 3, /* Invalid size */
+ SCPI_ERR_HANDLER = 4, /* Invalid handler/callback */
+ SCPI_ERR_ACCESS = 5, /* Invalid access/permission denied */
+ SCPI_ERR_RANGE = 6, /* Value out of range */
+ SCPI_ERR_TIMEOUT = 7, /* Timeout has occurred */
+ SCPI_ERR_NOMEM = 8, /* Invalid memory area or pointer */
+ SCPI_ERR_PWRSTATE = 9, /* Invalid power state */
+ SCPI_ERR_SUPPORT = 10, /* Not supported or disabled */
+ SCPI_ERR_DEVICE = 11, /* Device error */
+ SCPI_ERR_BUSY = 12, /* Device busy */
+ SCPI_ERR_MAX
+};
+
+/* SCPI Standard commands */
+enum scpi_std_cmd {
+ SCPI_CMD_INVALID = 0x00,
+ SCPI_CMD_SCPI_READY = 0x01,
+ SCPI_CMD_SCPI_CAPABILITIES = 0x02,
+ SCPI_CMD_SET_CSS_PWR_STATE = 0x03,
+ SCPI_CMD_GET_CSS_PWR_STATE = 0x04,
+ SCPI_CMD_SET_SYS_PWR_STATE = 0x05,
+ SCPI_CMD_SET_CPU_TIMER = 0x06,
+ SCPI_CMD_CANCEL_CPU_TIMER = 0x07,
+ SCPI_CMD_DVFS_CAPABILITIES = 0x08,
+ SCPI_CMD_GET_DVFS_INFO = 0x09,
+ SCPI_CMD_SET_DVFS = 0x0a,
+ SCPI_CMD_GET_DVFS = 0x0b,
+ SCPI_CMD_GET_DVFS_STAT = 0x0c,
+ SCPI_CMD_CLOCK_CAPABILITIES = 0x0d,
+ SCPI_CMD_GET_CLOCK_INFO = 0x0e,
+ SCPI_CMD_SET_CLOCK_VALUE = 0x0f,
+ SCPI_CMD_GET_CLOCK_VALUE = 0x10,
+ SCPI_CMD_PSU_CAPABILITIES = 0x11,
+ SCPI_CMD_GET_PSU_INFO = 0x12,
+ SCPI_CMD_SET_PSU = 0x13,
+ SCPI_CMD_GET_PSU = 0x14,
+ SCPI_CMD_SENSOR_CAPABILITIES = 0x15,
+ SCPI_CMD_SENSOR_INFO = 0x16,
+ SCPI_CMD_SENSOR_VALUE = 0x17,
+ SCPI_CMD_SENSOR_CFG_PERIODIC = 0x18,
+ SCPI_CMD_SENSOR_CFG_BOUNDS = 0x19,
+ SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1a,
+ SCPI_CMD_SET_DEVICE_PWR_STATE = 0x1b,
+ SCPI_CMD_GET_DEVICE_PWR_STATE = 0x1c,
+ SCPI_CMD_COUNT
+};
+
+/* SCPI Legacy Commands */
+enum legacy_scpi_std_cmd {
+ LEGACY_SCPI_CMD_INVALID = 0x00,
+ LEGACY_SCPI_CMD_SCPI_READY = 0x01,
+ LEGACY_SCPI_CMD_SCPI_CAPABILITIES = 0x02,
+ LEGACY_SCPI_CMD_EVENT = 0x03,
+ LEGACY_SCPI_CMD_SET_CSS_PWR_STATE = 0x04,
+ LEGACY_SCPI_CMD_GET_CSS_PWR_STATE = 0x05,
+ LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT = 0x06,
+ LEGACY_SCPI_CMD_GET_PWR_STATE_STAT = 0x07,
+ LEGACY_SCPI_CMD_SYS_PWR_STATE = 0x08,
+ LEGACY_SCPI_CMD_L2_READY = 0x09,
+ LEGACY_SCPI_CMD_SET_AP_TIMER = 0x0a,
+ LEGACY_SCPI_CMD_CANCEL_AP_TIME = 0x0b,
+ LEGACY_SCPI_CMD_DVFS_CAPABILITIES = 0x0c,
+ LEGACY_SCPI_CMD_GET_DVFS_INFO = 0x0d,
+ LEGACY_SCPI_CMD_SET_DVFS = 0x0e,
+ LEGACY_SCPI_CMD_GET_DVFS = 0x0f,
+ LEGACY_SCPI_CMD_GET_DVFS_STAT = 0x10,
+ LEGACY_SCPI_CMD_SET_RTC = 0x11,
+ LEGACY_SCPI_CMD_GET_RTC = 0x12,
+ LEGACY_SCPI_CMD_CLOCK_CAPABILITIES = 0x13,
+ LEGACY_SCPI_CMD_SET_CLOCK_INDEX = 0x14,
+ LEGACY_SCPI_CMD_SET_CLOCK_VALUE = 0x15,
+ LEGACY_SCPI_CMD_GET_CLOCK_VALUE = 0x16,
+ LEGACY_SCPI_CMD_PSU_CAPABILITIES = 0x17,
+ LEGACY_SCPI_CMD_SET_PSU = 0x18,
+ LEGACY_SCPI_CMD_GET_PSU = 0x19,
+ LEGACY_SCPI_CMD_SENSOR_CAPABILITIES = 0x1a,
+ LEGACY_SCPI_CMD_SENSOR_INFO = 0x1b,
+ LEGACY_SCPI_CMD_SENSOR_VALUE = 0x1c,
+ LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC = 0x1d,
+ LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS = 0x1e,
+ LEGACY_SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1f,
+ LEGACY_SCPI_CMD_COUNT
+};
+
+/* List all commands that are required to go through the high priority link */
+static int legacy_hpriority_cmds[] = {
+ LEGACY_SCPI_CMD_GET_CSS_PWR_STATE,
+ LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT,
+ LEGACY_SCPI_CMD_GET_PWR_STATE_STAT,
+ LEGACY_SCPI_CMD_SET_DVFS,
+ LEGACY_SCPI_CMD_GET_DVFS,
+ LEGACY_SCPI_CMD_SET_RTC,
+ LEGACY_SCPI_CMD_GET_RTC,
+ LEGACY_SCPI_CMD_SET_CLOCK_INDEX,
+ LEGACY_SCPI_CMD_SET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_GET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_SET_PSU,
+ LEGACY_SCPI_CMD_GET_PSU,
+ LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC,
+ LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS,
+};
+
+/* List all commands used by this driver, used as indexes */
+enum scpi_drv_cmds {
+ CMD_SCPI_CAPABILITIES = 0,
+ CMD_GET_CLOCK_INFO,
+ CMD_GET_CLOCK_VALUE,
+ CMD_SET_CLOCK_VALUE,
+ CMD_GET_DVFS,
+ CMD_SET_DVFS,
+ CMD_GET_DVFS_INFO,
+ CMD_SENSOR_CAPABILITIES,
+ CMD_SENSOR_INFO,
+ CMD_SENSOR_VALUE,
+ CMD_SET_DEVICE_PWR_STATE,
+ CMD_GET_DEVICE_PWR_STATE,
+ CMD_MAX_COUNT,
+};
+
+static int scpi_std_commands[CMD_MAX_COUNT] = {
+ SCPI_CMD_SCPI_CAPABILITIES,
+ SCPI_CMD_GET_CLOCK_INFO,
+ SCPI_CMD_GET_CLOCK_VALUE,
+ SCPI_CMD_SET_CLOCK_VALUE,
+ SCPI_CMD_GET_DVFS,
+ SCPI_CMD_SET_DVFS,
+ SCPI_CMD_GET_DVFS_INFO,
+ SCPI_CMD_SENSOR_CAPABILITIES,
+ SCPI_CMD_SENSOR_INFO,
+ SCPI_CMD_SENSOR_VALUE,
+ SCPI_CMD_SET_DEVICE_PWR_STATE,
+ SCPI_CMD_GET_DEVICE_PWR_STATE,
+};
+
+static int scpi_legacy_commands[CMD_MAX_COUNT] = {
+ LEGACY_SCPI_CMD_SCPI_CAPABILITIES,
+ -1, /* GET_CLOCK_INFO */
+ LEGACY_SCPI_CMD_GET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_SET_CLOCK_VALUE,
+ LEGACY_SCPI_CMD_GET_DVFS,
+ LEGACY_SCPI_CMD_SET_DVFS,
+ LEGACY_SCPI_CMD_GET_DVFS_INFO,
+ LEGACY_SCPI_CMD_SENSOR_CAPABILITIES,
+ LEGACY_SCPI_CMD_SENSOR_INFO,
+ LEGACY_SCPI_CMD_SENSOR_VALUE,
+ -1, /* SET_DEVICE_PWR_STATE */
+ -1, /* GET_DEVICE_PWR_STATE */
+};
+
+struct scpi_xfer {
+ u32 slot; /* has to be first element */
+ u32 cmd;
+ u32 status;
+ const void *tx_buf;
+ void *rx_buf;
+ unsigned int tx_len;
+ unsigned int rx_len;
+ struct list_head node;
+ struct completion done;
+};
+
+struct scpi_chan {
+ struct mbox_client cl;
+ struct mbox_chan *chan;
+ void __iomem *tx_payload;
+ void __iomem *rx_payload;
+ struct list_head rx_pending;
+ struct list_head xfers_list;
+ struct scpi_xfer *xfers;
+ spinlock_t rx_lock; /* locking for the rx pending list */
+ struct mutex xfers_lock;
+ u8 token;
+};
+
+struct scpi_drvinfo {
+ u32 protocol_version;
+ u32 firmware_version;
+ bool is_legacy;
+ int num_chans;
+ int *commands;
+ DECLARE_BITMAP(cmd_priority, LEGACY_SCPI_CMD_COUNT);
+ atomic_t next_chan;
+ struct scpi_ops *scpi_ops;
+ struct scpi_chan *channels;
+ struct scpi_dvfs_info *dvfs[MAX_DVFS_DOMAINS];
+};
+
+/*
+ * The SCP firmware only executes in little-endian mode, so any buffers
+ * shared through SCPI should have their contents converted to little-endian
+ */
+struct scpi_shared_mem {
+ __le32 command;
+ __le32 status;
+ u8 payload[];
+} __packed;
+
+struct legacy_scpi_shared_mem {
+ __le32 status;
+ u8 payload[];
+} __packed;
+
+struct scp_capabilities {
+ __le32 protocol_version;
+ __le32 event_version;
+ __le32 platform_version;
+ __le32 commands[4];
+} __packed;
+
+struct clk_get_info {
+ __le16 id;
+ __le16 flags;
+ __le32 min_rate;
+ __le32 max_rate;
+ u8 name[20];
+} __packed;
+
+struct clk_set_value {
+ __le16 id;
+ __le16 reserved;
+ __le32 rate;
+} __packed;
+
+struct legacy_clk_set_value {
+ __le32 rate;
+ __le16 id;
+ __le16 reserved;
+} __packed;
+
+struct dvfs_info {
+ u8 domain;
+ u8 opp_count;
+ __le16 latency;
+ struct {
+ __le32 freq;
+ __le32 m_volt;
+ } opps[MAX_DVFS_OPPS];
+} __packed;
+
+struct dvfs_set {
+ u8 domain;
+ u8 index;
+} __packed;
+
+struct _scpi_sensor_info {
+ __le16 sensor_id;
+ u8 class;
+ u8 trigger_type;
+ char name[20];
+};
+
+struct dev_pstate_set {
+ __le16 dev_id;
+ u8 pstate;
+} __packed;
+
+static struct scpi_drvinfo *scpi_info;
+
+static int scpi_linux_errmap[SCPI_ERR_MAX] = {
+ /* better than switch case as long as return value is continuous */
+ 0, /* SCPI_SUCCESS */
+ -EINVAL, /* SCPI_ERR_PARAM */
+ -ENOEXEC, /* SCPI_ERR_ALIGN */
+ -EMSGSIZE, /* SCPI_ERR_SIZE */
+ -EINVAL, /* SCPI_ERR_HANDLER */
+ -EACCES, /* SCPI_ERR_ACCESS */
+ -ERANGE, /* SCPI_ERR_RANGE */
+ -ETIMEDOUT, /* SCPI_ERR_TIMEOUT */
+ -ENOMEM, /* SCPI_ERR_NOMEM */
+ -EINVAL, /* SCPI_ERR_PWRSTATE */
+ -EOPNOTSUPP, /* SCPI_ERR_SUPPORT */
+ -EIO, /* SCPI_ERR_DEVICE */
+ -EBUSY, /* SCPI_ERR_BUSY */
+};
+
+static inline int scpi_to_linux_errno(int errno)
+{
+ if (errno >= SCPI_SUCCESS && errno < SCPI_ERR_MAX)
+ return scpi_linux_errmap[errno];
+ return -EIO;
+}
+
+static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
+{
+ unsigned long flags;
+ struct scpi_xfer *t, *match = NULL;
+
+ spin_lock_irqsave(&ch->rx_lock, flags);
+ if (list_empty(&ch->rx_pending)) {
+ spin_unlock_irqrestore(&ch->rx_lock, flags);
+ return;
+ }
+
+ /* Command type is not replied by the SCP Firmware in legacy Mode
+ * We should consider that command is the head of pending RX commands
+ * if the list is not empty. In TX only mode, the list would be empty.
+ */
+ if (scpi_info->is_legacy) {
+ match = list_first_entry(&ch->rx_pending, struct scpi_xfer,
+ node);
+ list_del(&match->node);
+ } else {
+ list_for_each_entry(t, &ch->rx_pending, node)
+ if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) {
+ list_del(&t->node);
+ match = t;
+ break;
+ }
+ }
+ /* check if wait_for_completion is in progress or timed-out */
+ if (match && !completion_done(&match->done)) {
+ unsigned int len;
+
+ if (scpi_info->is_legacy) {
+ struct legacy_scpi_shared_mem __iomem *mem =
+ ch->rx_payload;
+
+ /* RX Length is not replied by the legacy Firmware */
+ len = match->rx_len;
+
+ match->status = ioread32(&mem->status);
+ memcpy_fromio(match->rx_buf, mem->payload, len);
+ } else {
+ struct scpi_shared_mem __iomem *mem = ch->rx_payload;
+
+ len = min_t(unsigned int, match->rx_len, CMD_SIZE(cmd));
+
+ match->status = ioread32(&mem->status);
+ memcpy_fromio(match->rx_buf, mem->payload, len);
+ }
+
+ if (match->rx_len > len)
+ memset(match->rx_buf + len, 0, match->rx_len - len);
+ complete(&match->done);
+ }
+ spin_unlock_irqrestore(&ch->rx_lock, flags);
+}
+
+static void scpi_handle_remote_msg(struct mbox_client *c, void *msg)
+{
+ struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
+ struct scpi_shared_mem __iomem *mem = ch->rx_payload;
+ u32 cmd = 0;
+
+ if (!scpi_info->is_legacy)
+ cmd = ioread32(&mem->command);
+
+ scpi_process_cmd(ch, cmd);
+}
+
+static void scpi_tx_prepare(struct mbox_client *c, void *msg)
+{
+ unsigned long flags;
+ struct scpi_xfer *t = msg;
+ struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
+ struct scpi_shared_mem __iomem *mem = ch->tx_payload;
+
+ if (t->tx_buf) {
+ if (scpi_info->is_legacy)
+ memcpy_toio(ch->tx_payload, t->tx_buf, t->tx_len);
+ else
+ memcpy_toio(mem->payload, t->tx_buf, t->tx_len);
+ }
+
+ if (t->rx_buf) {
+ if (!(++ch->token))
+ ++ch->token;
+ t->cmd |= FIELD_PREP(CMD_TOKEN_ID_MASK, ch->token);
+ spin_lock_irqsave(&ch->rx_lock, flags);
+ list_add_tail(&t->node, &ch->rx_pending);
+ spin_unlock_irqrestore(&ch->rx_lock, flags);
+ }
+
+ if (!scpi_info->is_legacy)
+ iowrite32(t->cmd, &mem->command);
+}
+
+static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch)
+{
+ struct scpi_xfer *t;
+
+ mutex_lock(&ch->xfers_lock);
+ if (list_empty(&ch->xfers_list)) {
+ mutex_unlock(&ch->xfers_lock);
+ return NULL;
+ }
+ t = list_first_entry(&ch->xfers_list, struct scpi_xfer, node);
+ list_del(&t->node);
+ mutex_unlock(&ch->xfers_lock);
+ return t;
+}
+
+static void put_scpi_xfer(struct scpi_xfer *t, struct scpi_chan *ch)
+{
+ mutex_lock(&ch->xfers_lock);
+ list_add_tail(&t->node, &ch->xfers_list);
+ mutex_unlock(&ch->xfers_lock);
+}
+
+static int scpi_send_message(u8 idx, void *tx_buf, unsigned int tx_len,
+ void *rx_buf, unsigned int rx_len)
+{
+ int ret;
+ u8 chan;
+ u8 cmd;
+ struct scpi_xfer *msg;
+ struct scpi_chan *scpi_chan;
+
+ if (scpi_info->commands[idx] < 0)
+ return -EOPNOTSUPP;
+
+ cmd = scpi_info->commands[idx];
+
+ if (scpi_info->is_legacy)
+ chan = test_bit(cmd, scpi_info->cmd_priority) ? 1 : 0;
+ else
+ chan = atomic_inc_return(&scpi_info->next_chan) %
+ scpi_info->num_chans;
+ scpi_chan = scpi_info->channels + chan;
+
+ msg = get_scpi_xfer(scpi_chan);
+ if (!msg)
+ return -ENOMEM;
+
+ if (scpi_info->is_legacy) {
+ msg->cmd = PACK_LEGACY_SCPI_CMD(cmd, tx_len);
+ msg->slot = msg->cmd;
+ } else {
+ msg->slot = BIT(SCPI_SLOT);
+ msg->cmd = PACK_SCPI_CMD(cmd, tx_len);
+ }
+ msg->tx_buf = tx_buf;
+ msg->tx_len = tx_len;
+ msg->rx_buf = rx_buf;
+ msg->rx_len = rx_len;
+ reinit_completion(&msg->done);
+
+ ret = mbox_send_message(scpi_chan->chan, msg);
+ if (ret < 0 || !rx_buf)
+ goto out;
+
+ if (!wait_for_completion_timeout(&msg->done, MAX_RX_TIMEOUT))
+ ret = -ETIMEDOUT;
+ else
+ /* first status word */
+ ret = msg->status;
+out:
+ if (ret < 0 && rx_buf) /* remove entry from the list if timed-out */
+ scpi_process_cmd(scpi_chan, msg->cmd);
+
+ put_scpi_xfer(msg, scpi_chan);
+ /* SCPI error codes > 0, translate them to Linux scale*/
+ return ret > 0 ? scpi_to_linux_errno(ret) : ret;
+}
+
+static u32 scpi_get_version(void)
+{
+ return scpi_info->protocol_version;
+}
+
+static int
+scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max)
+{
+ int ret;
+ struct clk_get_info clk;
+ __le16 le_clk_id = cpu_to_le16(clk_id);
+
+ ret = scpi_send_message(CMD_GET_CLOCK_INFO, &le_clk_id,
+ sizeof(le_clk_id), &clk, sizeof(clk));
+ if (!ret) {
+ *min = le32_to_cpu(clk.min_rate);
+ *max = le32_to_cpu(clk.max_rate);
+ }
+ return ret;
+}
+
+static unsigned long scpi_clk_get_val(u16 clk_id)
+{
+ int ret;
+ __le32 rate;
+ __le16 le_clk_id = cpu_to_le16(clk_id);
+
+ ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
+ sizeof(le_clk_id), &rate, sizeof(rate));
+ if (ret)
+ return 0;
+
+ return le32_to_cpu(rate);
+}
+
+static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
+{
+ int stat;
+ struct clk_set_value clk = {
+ .id = cpu_to_le16(clk_id),
+ .rate = cpu_to_le32(rate)
+ };
+
+ return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
+ &stat, sizeof(stat));
+}
+
+static int legacy_scpi_clk_set_val(u16 clk_id, unsigned long rate)
+{
+ int stat;
+ struct legacy_clk_set_value clk = {
+ .id = cpu_to_le16(clk_id),
+ .rate = cpu_to_le32(rate)
+ };
+
+ return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
+ &stat, sizeof(stat));
+}
+
+static int scpi_dvfs_get_idx(u8 domain)
+{
+ int ret;
+ u8 dvfs_idx;
+
+ ret = scpi_send_message(CMD_GET_DVFS, &domain, sizeof(domain),
+ &dvfs_idx, sizeof(dvfs_idx));
+
+ return ret ? ret : dvfs_idx;
+}
+
+static int scpi_dvfs_set_idx(u8 domain, u8 index)
+{
+ int stat;
+ struct dvfs_set dvfs = {domain, index};
+
+ return scpi_send_message(CMD_SET_DVFS, &dvfs, sizeof(dvfs),
+ &stat, sizeof(stat));
+}
+
+static int opp_cmp_func(const void *opp1, const void *opp2)
+{
+ const struct scpi_opp *t1 = opp1, *t2 = opp2;
+
+ return t1->freq - t2->freq;
+}
+
+static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
+{
+ struct scpi_dvfs_info *info;
+ struct scpi_opp *opp;
+ struct dvfs_info buf;
+ int ret, i;
+
+ if (domain >= MAX_DVFS_DOMAINS)
+ return ERR_PTR(-EINVAL);
+
+ if (scpi_info->dvfs[domain]) /* data already populated */
+ return scpi_info->dvfs[domain];
+
+ ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain),
+ &buf, sizeof(buf));
+ if (ret)
+ return ERR_PTR(ret);
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ info->count = buf.opp_count;
+ info->latency = le16_to_cpu(buf.latency) * 1000; /* uS to nS */
+
+ info->opps = kcalloc(info->count, sizeof(*opp), GFP_KERNEL);
+ if (!info->opps) {
+ kfree(info);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0, opp = info->opps; i < info->count; i++, opp++) {
+ opp->freq = le32_to_cpu(buf.opps[i].freq);
+ opp->m_volt = le32_to_cpu(buf.opps[i].m_volt);
+ }
+
+ sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL);
+
+ scpi_info->dvfs[domain] = info;
+ return info;
+}
+
+static int scpi_dev_domain_id(struct device *dev)
+{
+ struct of_phandle_args clkspec;
+
+ if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
+ 0, &clkspec))
+ return -EINVAL;
+
+ return clkspec.args[0];
+}
+
+static struct scpi_dvfs_info *scpi_dvfs_info(struct device *dev)
+{
+ int domain = scpi_dev_domain_id(dev);
+
+ if (domain < 0)
+ return ERR_PTR(domain);
+
+ return scpi_dvfs_get_info(domain);
+}
+
+static int scpi_dvfs_get_transition_latency(struct device *dev)
+{
+ struct scpi_dvfs_info *info = scpi_dvfs_info(dev);
+
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ return info->latency;
+}
+
+static int scpi_dvfs_add_opps_to_device(struct device *dev)
+{
+ int idx, ret;
+ struct scpi_opp *opp;
+ struct scpi_dvfs_info *info = scpi_dvfs_info(dev);
+
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ if (!info->opps)
+ return -EIO;
+
+ for (opp = info->opps, idx = 0; idx < info->count; idx++, opp++) {
+ ret = dev_pm_opp_add(dev, opp->freq, opp->m_volt * 1000);
+ if (ret) {
+ dev_warn(dev, "failed to add opp %uHz %umV\n",
+ opp->freq, opp->m_volt);
+ while (idx-- > 0)
+ dev_pm_opp_remove(dev, (--opp)->freq);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int scpi_sensor_get_capability(u16 *sensors)
+{
+ __le16 cap;
+ int ret;
+
+ ret = scpi_send_message(CMD_SENSOR_CAPABILITIES, NULL, 0, &cap,
+ sizeof(cap));
+ if (!ret)
+ *sensors = le16_to_cpu(cap);
+
+ return ret;
+}
+
+static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
+{
+ __le16 id = cpu_to_le16(sensor_id);
+ struct _scpi_sensor_info _info;
+ int ret;
+
+ ret = scpi_send_message(CMD_SENSOR_INFO, &id, sizeof(id),
+ &_info, sizeof(_info));
+ if (!ret) {
+ memcpy(info, &_info, sizeof(*info));
+ info->sensor_id = le16_to_cpu(_info.sensor_id);
+ }
+
+ return ret;
+}
+
+static int scpi_sensor_get_value(u16 sensor, u64 *val)
+{
+ __le16 id = cpu_to_le16(sensor);
+ __le64 value;
+ int ret;
+
+ ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
+ &value, sizeof(value));
+ if (ret)
+ return ret;
+
+ if (scpi_info->is_legacy)
+ /* only 32-bits supported, upper 32 bits can be junk */
+ *val = le32_to_cpup((__le32 *)&value);
+ else
+ *val = le64_to_cpu(value);
+
+ return 0;
+}
+
+static int scpi_device_get_power_state(u16 dev_id)
+{
+ int ret;
+ u8 pstate;
+ __le16 id = cpu_to_le16(dev_id);
+
+ ret = scpi_send_message(CMD_GET_DEVICE_PWR_STATE, &id,
+ sizeof(id), &pstate, sizeof(pstate));
+ return ret ? ret : pstate;
+}
+
+static int scpi_device_set_power_state(u16 dev_id, u8 pstate)
+{
+ int stat;
+ struct dev_pstate_set dev_set = {
+ .dev_id = cpu_to_le16(dev_id),
+ .pstate = pstate,
+ };
+
+ return scpi_send_message(CMD_SET_DEVICE_PWR_STATE, &dev_set,
+ sizeof(dev_set), &stat, sizeof(stat));
+}
+
+static struct scpi_ops scpi_ops = {
+ .get_version = scpi_get_version,
+ .clk_get_range = scpi_clk_get_range,
+ .clk_get_val = scpi_clk_get_val,
+ .clk_set_val = scpi_clk_set_val,
+ .dvfs_get_idx = scpi_dvfs_get_idx,
+ .dvfs_set_idx = scpi_dvfs_set_idx,
+ .dvfs_get_info = scpi_dvfs_get_info,
+ .device_domain_id = scpi_dev_domain_id,
+ .get_transition_latency = scpi_dvfs_get_transition_latency,
+ .add_opps_to_device = scpi_dvfs_add_opps_to_device,
+ .sensor_get_capability = scpi_sensor_get_capability,
+ .sensor_get_info = scpi_sensor_get_info,
+ .sensor_get_value = scpi_sensor_get_value,
+ .device_get_power_state = scpi_device_get_power_state,
+ .device_set_power_state = scpi_device_set_power_state,
+};
+
+struct scpi_ops *get_scpi_ops(void)
+{
+ return scpi_info ? scpi_info->scpi_ops : NULL;
+}
+EXPORT_SYMBOL_GPL(get_scpi_ops);
+
+static int scpi_init_versions(struct scpi_drvinfo *info)
+{
+ int ret;
+ struct scp_capabilities caps;
+
+ ret = scpi_send_message(CMD_SCPI_CAPABILITIES, NULL, 0,
+ &caps, sizeof(caps));
+ if (!ret) {
+ info->protocol_version = le32_to_cpu(caps.protocol_version);
+ info->firmware_version = le32_to_cpu(caps.platform_version);
+ }
+ /* Ignore error if not implemented */
+ if (info->is_legacy && ret == -EOPNOTSUPP)
+ return 0;
+
+ return ret;
+}
+
+static ssize_t protocol_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lu.%lu\n",
+ FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version),
+ FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version));
+}
+static DEVICE_ATTR_RO(protocol_version);
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lu.%lu.%lu\n",
+ FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version),
+ FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version),
+ FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version));
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static struct attribute *versions_attrs[] = {
+ &dev_attr_firmware_version.attr,
+ &dev_attr_protocol_version.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(versions);
+
+static void scpi_free_channels(void *data)
+{
+ struct scpi_drvinfo *info = data;
+ int i;
+
+ for (i = 0; i < info->num_chans; i++)
+ mbox_free_channel(info->channels[i].chan);
+}
+
+static int scpi_remove(struct platform_device *pdev)
+{
+ int i;
+ struct scpi_drvinfo *info = platform_get_drvdata(pdev);
+
+ scpi_info = NULL; /* stop exporting SCPI ops through get_scpi_ops */
+
+ for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) {
+ kfree(info->dvfs[i]->opps);
+ kfree(info->dvfs[i]);
+ }
+
+ return 0;
+}
+
+#define MAX_SCPI_XFERS 10
+static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch)
+{
+ int i;
+ struct scpi_xfer *xfers;
+
+ xfers = devm_kcalloc(dev, MAX_SCPI_XFERS, sizeof(*xfers), GFP_KERNEL);
+ if (!xfers)
+ return -ENOMEM;
+
+ ch->xfers = xfers;
+ for (i = 0; i < MAX_SCPI_XFERS; i++, xfers++) {
+ init_completion(&xfers->done);
+ list_add_tail(&xfers->node, &ch->xfers_list);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id legacy_scpi_of_match[] = {
+ {.compatible = "arm,scpi-pre-1.0"},
+ {},
+};
+
+static int scpi_probe(struct platform_device *pdev)
+{
+ int count, idx, ret;
+ struct resource res;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct scpi_drvinfo *scpi_drvinfo;
+
+ scpi_drvinfo = devm_kzalloc(dev, sizeof(*scpi_drvinfo), GFP_KERNEL);
+ if (!scpi_drvinfo)
+ return -ENOMEM;
+
+ if (of_match_device(legacy_scpi_of_match, &pdev->dev))
+ scpi_drvinfo->is_legacy = true;
+
+ count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
+ if (count < 0) {
+ dev_err(dev, "no mboxes property in '%pOF'\n", np);
+ return -ENODEV;
+ }
+
+ scpi_drvinfo->channels =
+ devm_kcalloc(dev, count, sizeof(struct scpi_chan), GFP_KERNEL);
+ if (!scpi_drvinfo->channels)
+ return -ENOMEM;
+
+ ret = devm_add_action(dev, scpi_free_channels, scpi_drvinfo);
+ if (ret)
+ return ret;
+
+ for (; scpi_drvinfo->num_chans < count; scpi_drvinfo->num_chans++) {
+ resource_size_t size;
+ int idx = scpi_drvinfo->num_chans;
+ struct scpi_chan *pchan = scpi_drvinfo->channels + idx;
+ struct mbox_client *cl = &pchan->cl;
+ struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
+
+ ret = of_address_to_resource(shmem, 0, &res);
+ of_node_put(shmem);
+ if (ret) {
+ dev_err(dev, "failed to get SCPI payload mem resource\n");
+ return ret;
+ }
+
+ size = resource_size(&res);
+ pchan->rx_payload = devm_ioremap(dev, res.start, size);
+ if (!pchan->rx_payload) {
+ dev_err(dev, "failed to ioremap SCPI payload\n");
+ return -EADDRNOTAVAIL;
+ }
+ pchan->tx_payload = pchan->rx_payload + (size >> 1);
+
+ cl->dev = dev;
+ cl->rx_callback = scpi_handle_remote_msg;
+ cl->tx_prepare = scpi_tx_prepare;
+ cl->tx_block = true;
+ cl->tx_tout = 20;
+ cl->knows_txdone = false; /* controller can't ack */
+
+ INIT_LIST_HEAD(&pchan->rx_pending);
+ INIT_LIST_HEAD(&pchan->xfers_list);
+ spin_lock_init(&pchan->rx_lock);
+ mutex_init(&pchan->xfers_lock);
+
+ ret = scpi_alloc_xfer_list(dev, pchan);
+ if (!ret) {
+ pchan->chan = mbox_request_channel(cl, idx);
+ if (!IS_ERR(pchan->chan))
+ continue;
+ ret = PTR_ERR(pchan->chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get channel%d err %d\n",
+ idx, ret);
+ }
+ return ret;
+ }
+
+ scpi_drvinfo->commands = scpi_std_commands;
+
+ platform_set_drvdata(pdev, scpi_drvinfo);
+
+ if (scpi_drvinfo->is_legacy) {
+ /* Replace with legacy variants */
+ scpi_ops.clk_set_val = legacy_scpi_clk_set_val;
+ scpi_drvinfo->commands = scpi_legacy_commands;
+
+ /* Fill priority bitmap */
+ for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++)
+ set_bit(legacy_hpriority_cmds[idx],
+ scpi_drvinfo->cmd_priority);
+ }
+
+ scpi_info = scpi_drvinfo;
+
+ ret = scpi_init_versions(scpi_drvinfo);
+ if (ret) {
+ dev_err(dev, "incorrect or no SCP firmware found\n");
+ scpi_info = NULL;
+ return ret;
+ }
+
+ if (scpi_drvinfo->is_legacy && !scpi_drvinfo->protocol_version &&
+ !scpi_drvinfo->firmware_version)
+ dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n");
+ else
+ dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n",
+ FIELD_GET(PROTO_REV_MAJOR_MASK,
+ scpi_drvinfo->protocol_version),
+ FIELD_GET(PROTO_REV_MINOR_MASK,
+ scpi_drvinfo->protocol_version),
+ FIELD_GET(FW_REV_MAJOR_MASK,
+ scpi_drvinfo->firmware_version),
+ FIELD_GET(FW_REV_MINOR_MASK,
+ scpi_drvinfo->firmware_version),
+ FIELD_GET(FW_REV_PATCH_MASK,
+ scpi_drvinfo->firmware_version));
+
+ scpi_drvinfo->scpi_ops = &scpi_ops;
+
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ scpi_info = NULL;
+
+ return ret;
+}
+
+static const struct of_device_id scpi_of_match[] = {
+ {.compatible = "arm,scpi"},
+ {.compatible = "arm,scpi-pre-1.0"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, scpi_of_match);
+
+static struct platform_driver scpi_driver = {
+ .driver = {
+ .name = "scpi_protocol",
+ .of_match_table = scpi_of_match,
+ .dev_groups = versions_groups,
+ },
+ .probe = scpi_probe,
+ .remove = scpi_remove,
+};
+module_platform_driver(scpi_driver);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCPI mailbox protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
new file mode 100644
index 000000000..68e55ca74
--- /dev/null
+++ b/drivers/firmware/arm_sdei.c
@@ -0,0 +1,1130 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#define pr_fmt(fmt) "sdei: " fmt
+
+#include <acpi/ghes.h>
+#include <linux/acpi.h>
+#include <linux/arm_sdei.h>
+#include <linux/arm-smccc.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/errno.h>
+#include <linux/hardirq.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/kvm_host.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/percpu.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+/*
+ * The call to use to reach the firmware.
+ */
+static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res);
+
+/* entry point from firmware to arch asm code */
+static unsigned long sdei_entry_point;
+
+static int sdei_hp_state;
+
+struct sdei_event {
+ /* These three are protected by the sdei_list_lock */
+ struct list_head list;
+ bool reregister;
+ bool reenable;
+
+ u32 event_num;
+ u8 type;
+ u8 priority;
+
+ /* This pointer is handed to firmware as the event argument. */
+ union {
+ /* Shared events */
+ struct sdei_registered_event *registered;
+
+ /* CPU private events */
+ struct sdei_registered_event __percpu *private_registered;
+ };
+};
+
+/* Take the mutex for any API call or modification. Take the mutex first. */
+static DEFINE_MUTEX(sdei_events_lock);
+
+/* and then hold this when modifying the list */
+static DEFINE_SPINLOCK(sdei_list_lock);
+static LIST_HEAD(sdei_list);
+
+/* Private events are registered/enabled via IPI passing one of these */
+struct sdei_crosscall_args {
+ struct sdei_event *event;
+ atomic_t errors;
+ int first_error;
+};
+
+#define CROSSCALL_INIT(arg, event) \
+ do { \
+ arg.event = event; \
+ arg.first_error = 0; \
+ atomic_set(&arg.errors, 0); \
+ } while (0)
+
+static inline int sdei_do_local_call(smp_call_func_t fn,
+ struct sdei_event *event)
+{
+ struct sdei_crosscall_args arg;
+
+ CROSSCALL_INIT(arg, event);
+ fn(&arg);
+
+ return arg.first_error;
+}
+
+static inline int sdei_do_cross_call(smp_call_func_t fn,
+ struct sdei_event *event)
+{
+ struct sdei_crosscall_args arg;
+
+ CROSSCALL_INIT(arg, event);
+ on_each_cpu(fn, &arg, true);
+
+ return arg.first_error;
+}
+
+static inline void
+sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
+{
+ if (err && (atomic_inc_return(&arg->errors) == 1))
+ arg->first_error = err;
+}
+
+static int sdei_to_linux_errno(unsigned long sdei_err)
+{
+ switch (sdei_err) {
+ case SDEI_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case SDEI_INVALID_PARAMETERS:
+ return -EINVAL;
+ case SDEI_DENIED:
+ return -EPERM;
+ case SDEI_PENDING:
+ return -EINPROGRESS;
+ case SDEI_OUT_OF_RESOURCE:
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
+ unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4,
+ u64 *result)
+{
+ int err;
+ struct arm_smccc_res res;
+
+ if (sdei_firmware_call) {
+ sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
+ &res);
+ err = sdei_to_linux_errno(res.a0);
+ } else {
+ /*
+ * !sdei_firmware_call means we failed to probe or called
+ * sdei_mark_interface_broken(). -EIO is not an error returned
+ * by sdei_to_linux_errno() and is used to suppress messages
+ * from this driver.
+ */
+ err = -EIO;
+ res.a0 = SDEI_NOT_SUPPORTED;
+ }
+
+ if (result)
+ *result = res.a0;
+
+ return err;
+}
+NOKPROBE_SYMBOL(invoke_sdei_fn);
+
+static struct sdei_event *sdei_event_find(u32 event_num)
+{
+ struct sdei_event *e, *found = NULL;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(e, &sdei_list, list) {
+ if (e->event_num == event_num) {
+ found = e;
+ break;
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return found;
+}
+
+int sdei_api_event_context(u32 query, u64 *result)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
+ result);
+}
+NOKPROBE_SYMBOL(sdei_api_event_context);
+
+static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
+ 0, 0, result);
+}
+
+static struct sdei_event *sdei_event_create(u32 event_num,
+ sdei_event_callback *cb,
+ void *cb_arg)
+{
+ int err;
+ u64 result;
+ struct sdei_event *event;
+ struct sdei_registered_event *reg;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&event->list);
+ event->event_num = event_num;
+
+ err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
+ &result);
+ if (err)
+ goto fail;
+ event->priority = result;
+
+ err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
+ &result);
+ if (err)
+ goto fail;
+ event->type = result;
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED) {
+ reg = kzalloc(sizeof(*reg), GFP_KERNEL);
+ if (!reg) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ reg->event_num = event->event_num;
+ reg->priority = event->priority;
+
+ reg->callback = cb;
+ reg->callback_arg = cb_arg;
+ event->registered = reg;
+ } else {
+ int cpu;
+ struct sdei_registered_event __percpu *regs;
+
+ regs = alloc_percpu(struct sdei_registered_event);
+ if (!regs) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ for_each_possible_cpu(cpu) {
+ reg = per_cpu_ptr(regs, cpu);
+
+ reg->event_num = event->event_num;
+ reg->priority = event->priority;
+ reg->callback = cb;
+ reg->callback_arg = cb_arg;
+ }
+
+ event->private_registered = regs;
+ }
+
+ spin_lock(&sdei_list_lock);
+ list_add(&event->list, &sdei_list);
+ spin_unlock(&sdei_list_lock);
+
+ return event;
+
+fail:
+ kfree(event);
+ return ERR_PTR(err);
+}
+
+static void sdei_event_destroy_llocked(struct sdei_event *event)
+{
+ lockdep_assert_held(&sdei_events_lock);
+ lockdep_assert_held(&sdei_list_lock);
+
+ list_del(&event->list);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ kfree(event->registered);
+ else
+ free_percpu(event->private_registered);
+
+ kfree(event);
+}
+
+static void sdei_event_destroy(struct sdei_event *event)
+{
+ spin_lock(&sdei_list_lock);
+ sdei_event_destroy_llocked(event);
+ spin_unlock(&sdei_list_lock);
+}
+
+static int sdei_api_get_version(u64 *version)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
+}
+
+int sdei_mask_local_cpu(void)
+{
+ int err;
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
+ if (err && err != -EIO) {
+ pr_warn_once("failed to mask CPU[%u]: %d\n",
+ smp_processor_id(), err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void _ipi_mask_cpu(void *ignored)
+{
+ WARN_ON_ONCE(preemptible());
+ sdei_mask_local_cpu();
+}
+
+int sdei_unmask_local_cpu(void)
+{
+ int err;
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
+ if (err && err != -EIO) {
+ pr_warn_once("failed to unmask CPU[%u]: %d\n",
+ smp_processor_id(), err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void _ipi_unmask_cpu(void *ignored)
+{
+ WARN_ON_ONCE(preemptible());
+ sdei_unmask_local_cpu();
+}
+
+static void _ipi_private_reset(void *ignored)
+{
+ int err;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
+ NULL);
+ if (err && err != -EIO)
+ pr_warn_once("failed to reset CPU[%u]: %d\n",
+ smp_processor_id(), err);
+}
+
+static int sdei_api_shared_reset(void)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
+ NULL);
+}
+
+static void sdei_mark_interface_broken(void)
+{
+ pr_err("disabling SDEI firmware interface\n");
+ on_each_cpu(&_ipi_mask_cpu, NULL, true);
+ sdei_firmware_call = NULL;
+}
+
+static int sdei_platform_reset(void)
+{
+ int err;
+
+ on_each_cpu(&_ipi_private_reset, NULL, true);
+ err = sdei_api_shared_reset();
+ if (err) {
+ pr_err("Failed to reset platform: %d\n", err);
+ sdei_mark_interface_broken();
+ }
+
+ return err;
+}
+
+static int sdei_api_event_enable(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
+ 0, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_enable(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ err = sdei_api_event_enable(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_enable(u32 event_num)
+{
+ int err = -EINVAL;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ if (!event) {
+ mutex_unlock(&sdei_events_lock);
+ return -ENOENT;
+ }
+
+
+ cpus_read_lock();
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_enable(event->event_num);
+ else
+ err = sdei_do_cross_call(_local_event_enable, event);
+
+ if (!err) {
+ spin_lock(&sdei_list_lock);
+ event->reenable = true;
+ spin_unlock(&sdei_list_lock);
+ }
+ cpus_read_unlock();
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+static int sdei_api_event_disable(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
+ 0, 0, NULL);
+}
+
+static void _ipi_event_disable(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ err = sdei_api_event_disable(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_disable(u32 event_num)
+{
+ int err = -EINVAL;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ if (!event) {
+ mutex_unlock(&sdei_events_lock);
+ return -ENOENT;
+ }
+
+ spin_lock(&sdei_list_lock);
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_disable(event->event_num);
+ else
+ err = sdei_do_cross_call(_ipi_event_disable, event);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+static int sdei_api_event_unregister(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
+ 0, 0, 0, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_unregister(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ err = sdei_api_event_unregister(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_unregister(u32 event_num)
+{
+ int err;
+ struct sdei_event *event;
+
+ WARN_ON(in_nmi());
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ if (!event) {
+ pr_warn("Event %u not registered\n", event_num);
+ err = -ENOENT;
+ goto unlock;
+ }
+
+ spin_lock(&sdei_list_lock);
+ event->reregister = false;
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_unregister(event->event_num);
+ else
+ err = sdei_do_cross_call(_local_event_unregister, event);
+
+ if (err)
+ goto unlock;
+
+ sdei_event_destroy(event);
+unlock:
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+/*
+ * unregister events, but don't destroy them as they are re-registered by
+ * sdei_reregister_shared().
+ */
+static int sdei_unregister_shared(void)
+{
+ int err = 0;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type != SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ err = sdei_api_event_unregister(event->event_num);
+ if (err)
+ break;
+ }
+ spin_unlock(&sdei_list_lock);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
+ void *arg, u64 flags, u64 affinity)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
+ (unsigned long)entry_point, (unsigned long)arg,
+ flags, affinity, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_register(void *data)
+{
+ int err;
+ struct sdei_registered_event *reg;
+ struct sdei_crosscall_args *arg = data;
+
+ reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
+ err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
+ reg, 0, 0);
+
+ sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
+{
+ int err;
+ struct sdei_event *event;
+
+ WARN_ON(in_nmi());
+
+ mutex_lock(&sdei_events_lock);
+ if (sdei_event_find(event_num)) {
+ pr_warn("Event %u already registered\n", event_num);
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ event = sdei_event_create(event_num, cb, arg);
+ if (IS_ERR(event)) {
+ err = PTR_ERR(event);
+ pr_warn("Failed to create event %u: %d\n", event_num, err);
+ goto unlock;
+ }
+
+ cpus_read_lock();
+ if (event->type == SDEI_EVENT_TYPE_SHARED) {
+ err = sdei_api_event_register(event->event_num,
+ sdei_entry_point,
+ event->registered,
+ SDEI_EVENT_REGISTER_RM_ANY, 0);
+ } else {
+ err = sdei_do_cross_call(_local_event_register, event);
+ if (err)
+ sdei_do_cross_call(_local_event_unregister, event);
+ }
+
+ if (err) {
+ sdei_event_destroy(event);
+ pr_warn("Failed to register event %u: %d\n", event_num, err);
+ goto cpu_unlock;
+ }
+
+ spin_lock(&sdei_list_lock);
+ event->reregister = true;
+ spin_unlock(&sdei_list_lock);
+cpu_unlock:
+ cpus_read_unlock();
+unlock:
+ mutex_unlock(&sdei_events_lock);
+ return err;
+}
+
+static int sdei_reregister_shared(void)
+{
+ int err = 0;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type != SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ if (event->reregister) {
+ err = sdei_api_event_register(event->event_num,
+ sdei_entry_point, event->registered,
+ SDEI_EVENT_REGISTER_RM_ANY, 0);
+ if (err) {
+ pr_err("Failed to re-register event %u\n",
+ event->event_num);
+ sdei_event_destroy_llocked(event);
+ break;
+ }
+ }
+
+ if (event->reenable) {
+ err = sdei_api_event_enable(event->event_num);
+ if (err) {
+ pr_err("Failed to re-enable event %u\n",
+ event->event_num);
+ break;
+ }
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+static int sdei_cpuhp_down(unsigned int cpu)
+{
+ struct sdei_event *event;
+ int err;
+
+ /* un-register private events */
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ err = sdei_do_local_call(_local_event_unregister, event);
+ if (err) {
+ pr_err("Failed to unregister event %u: %d\n",
+ event->event_num, err);
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return sdei_mask_local_cpu();
+}
+
+static int sdei_cpuhp_up(unsigned int cpu)
+{
+ struct sdei_event *event;
+ int err;
+
+ /* re-register/enable private events */
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ if (event->reregister) {
+ err = sdei_do_local_call(_local_event_register, event);
+ if (err) {
+ pr_err("Failed to re-register event %u: %d\n",
+ event->event_num, err);
+ }
+ }
+
+ if (event->reenable) {
+ err = sdei_do_local_call(_local_event_enable, event);
+ if (err) {
+ pr_err("Failed to re-enable event %u: %d\n",
+ event->event_num, err);
+ }
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return sdei_unmask_local_cpu();
+}
+
+/* When entering idle, mask/unmask events for this cpu */
+static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ int rv;
+
+ WARN_ON_ONCE(preemptible());
+
+ switch (action) {
+ case CPU_PM_ENTER:
+ rv = sdei_mask_local_cpu();
+ break;
+ case CPU_PM_EXIT:
+ case CPU_PM_ENTER_FAILED:
+ rv = sdei_unmask_local_cpu();
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ if (rv)
+ return notifier_from_errno(rv);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sdei_pm_nb = {
+ .notifier_call = sdei_pm_notifier,
+};
+
+static int sdei_device_suspend(struct device *dev)
+{
+ on_each_cpu(_ipi_mask_cpu, NULL, true);
+
+ return 0;
+}
+
+static int sdei_device_resume(struct device *dev)
+{
+ on_each_cpu(_ipi_unmask_cpu, NULL, true);
+
+ return 0;
+}
+
+/*
+ * We need all events to be reregistered when we resume from hibernate.
+ *
+ * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
+ * events during freeze, then re-register and re-enable them during thaw
+ * and restore.
+ */
+static int sdei_device_freeze(struct device *dev)
+{
+ int err;
+
+ /* unregister private events */
+ cpuhp_remove_state(sdei_entry_point);
+
+ err = sdei_unregister_shared();
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int sdei_device_thaw(struct device *dev)
+{
+ int err;
+
+ /* re-register shared events */
+ err = sdei_reregister_shared();
+ if (err) {
+ pr_warn("Failed to re-register shared events...\n");
+ sdei_mark_interface_broken();
+ return err;
+ }
+
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
+ &sdei_cpuhp_up, &sdei_cpuhp_down);
+ if (err < 0) {
+ pr_warn("Failed to re-register CPU hotplug notifier...\n");
+ return err;
+ }
+
+ sdei_hp_state = err;
+ return 0;
+}
+
+static int sdei_device_restore(struct device *dev)
+{
+ int err;
+
+ err = sdei_platform_reset();
+ if (err)
+ return err;
+
+ return sdei_device_thaw(dev);
+}
+
+static const struct dev_pm_ops sdei_pm_ops = {
+ .suspend = sdei_device_suspend,
+ .resume = sdei_device_resume,
+ .freeze = sdei_device_freeze,
+ .thaw = sdei_device_thaw,
+ .restore = sdei_device_restore,
+};
+
+/*
+ * Mask all CPUs and unregister all events on panic, reboot or kexec.
+ */
+static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ /*
+ * We are going to reset the interface, after this there is no point
+ * doing work when we take CPUs offline.
+ */
+ cpuhp_remove_state(sdei_hp_state);
+
+ sdei_platform_reset();
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sdei_reboot_nb = {
+ .notifier_call = sdei_reboot_notifier,
+};
+
+static void sdei_smccc_smc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res)
+{
+ arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
+}
+NOKPROBE_SYMBOL(sdei_smccc_smc);
+
+static void sdei_smccc_hvc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res)
+{
+ arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
+}
+NOKPROBE_SYMBOL(sdei_smccc_hvc);
+
+int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
+ sdei_event_callback *critical_cb)
+{
+ int err;
+ u64 result;
+ u32 event_num;
+ sdei_event_callback *cb;
+
+ if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
+ return -EOPNOTSUPP;
+
+ event_num = ghes->generic->notify.vector;
+ if (event_num == 0) {
+ /*
+ * Event 0 is reserved by the specification for
+ * SDEI_EVENT_SIGNAL.
+ */
+ return -EINVAL;
+ }
+
+ err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
+ &result);
+ if (err)
+ return err;
+
+ if (result == SDEI_EVENT_PRIORITY_CRITICAL)
+ cb = critical_cb;
+ else
+ cb = normal_cb;
+
+ err = sdei_event_register(event_num, cb, ghes);
+ if (!err)
+ err = sdei_event_enable(event_num);
+
+ return err;
+}
+
+int sdei_unregister_ghes(struct ghes *ghes)
+{
+ int i;
+ int err;
+ u32 event_num = ghes->generic->notify.vector;
+
+ might_sleep();
+
+ if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
+ return -EOPNOTSUPP;
+
+ /*
+ * The event may be running on another CPU. Disable it
+ * to stop new events, then try to unregister a few times.
+ */
+ err = sdei_event_disable(event_num);
+ if (err)
+ return err;
+
+ for (i = 0; i < 3; i++) {
+ err = sdei_event_unregister(event_num);
+ if (err != -EINPROGRESS)
+ break;
+
+ schedule();
+ }
+
+ return err;
+}
+
+static int sdei_get_conduit(struct platform_device *pdev)
+{
+ const char *method;
+ struct device_node *np = pdev->dev.of_node;
+
+ sdei_firmware_call = NULL;
+ if (np) {
+ if (of_property_read_string(np, "method", &method)) {
+ pr_warn("missing \"method\" property\n");
+ return SMCCC_CONDUIT_NONE;
+ }
+
+ if (!strcmp("hvc", method)) {
+ sdei_firmware_call = &sdei_smccc_hvc;
+ return SMCCC_CONDUIT_HVC;
+ } else if (!strcmp("smc", method)) {
+ sdei_firmware_call = &sdei_smccc_smc;
+ return SMCCC_CONDUIT_SMC;
+ }
+
+ pr_warn("invalid \"method\" property: %s\n", method);
+ } else if (!acpi_disabled) {
+ if (acpi_psci_use_hvc()) {
+ sdei_firmware_call = &sdei_smccc_hvc;
+ return SMCCC_CONDUIT_HVC;
+ } else {
+ sdei_firmware_call = &sdei_smccc_smc;
+ return SMCCC_CONDUIT_SMC;
+ }
+ }
+
+ return SMCCC_CONDUIT_NONE;
+}
+
+static int sdei_probe(struct platform_device *pdev)
+{
+ int err;
+ u64 ver = 0;
+ int conduit;
+
+ conduit = sdei_get_conduit(pdev);
+ if (!sdei_firmware_call)
+ return 0;
+
+ err = sdei_api_get_version(&ver);
+ if (err) {
+ pr_err("Failed to get SDEI version: %d\n", err);
+ sdei_mark_interface_broken();
+ return err;
+ }
+
+ pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
+ (int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
+ (int)SDEI_VERSION_VENDOR(ver));
+
+ if (SDEI_VERSION_MAJOR(ver) != 1) {
+ pr_warn("Conflicting SDEI version detected.\n");
+ sdei_mark_interface_broken();
+ return -EINVAL;
+ }
+
+ err = sdei_platform_reset();
+ if (err)
+ return err;
+
+ sdei_entry_point = sdei_arch_get_entry_point(conduit);
+ if (!sdei_entry_point) {
+ /* Not supported due to hardware or boot configuration */
+ sdei_mark_interface_broken();
+ return 0;
+ }
+
+ err = cpu_pm_register_notifier(&sdei_pm_nb);
+ if (err) {
+ pr_warn("Failed to register CPU PM notifier...\n");
+ goto error;
+ }
+
+ err = register_reboot_notifier(&sdei_reboot_nb);
+ if (err) {
+ pr_warn("Failed to register reboot notifier...\n");
+ goto remove_cpupm;
+ }
+
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
+ &sdei_cpuhp_up, &sdei_cpuhp_down);
+ if (err < 0) {
+ pr_warn("Failed to register CPU hotplug notifier...\n");
+ goto remove_reboot;
+ }
+
+ sdei_hp_state = err;
+
+ return 0;
+
+remove_reboot:
+ unregister_reboot_notifier(&sdei_reboot_nb);
+
+remove_cpupm:
+ cpu_pm_unregister_notifier(&sdei_pm_nb);
+
+error:
+ sdei_mark_interface_broken();
+ return err;
+}
+
+static const struct of_device_id sdei_of_match[] = {
+ { .compatible = "arm,sdei-1.0" },
+ {}
+};
+
+static struct platform_driver sdei_driver = {
+ .driver = {
+ .name = "sdei",
+ .pm = &sdei_pm_ops,
+ .of_match_table = sdei_of_match,
+ },
+ .probe = sdei_probe,
+};
+
+static bool __init sdei_present_acpi(void)
+{
+ acpi_status status;
+ struct acpi_table_header *sdei_table_header;
+
+ if (acpi_disabled)
+ return false;
+
+ status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
+ }
+ if (ACPI_FAILURE(status))
+ return false;
+
+ acpi_put_table(sdei_table_header);
+
+ return true;
+}
+
+void __init sdei_init(void)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ ret = platform_driver_register(&sdei_driver);
+ if (ret || !sdei_present_acpi())
+ return;
+
+ pdev = platform_device_register_simple(sdei_driver.driver.name,
+ 0, NULL, 0);
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ platform_driver_unregister(&sdei_driver);
+ pr_info("Failed to register ACPI:SDEI platform device %d\n",
+ ret);
+ }
+}
+
+int sdei_event_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg)
+{
+ int err;
+ mm_segment_t orig_addr_limit;
+ u32 event_num = arg->event_num;
+
+ /*
+ * Save restore 'fs'.
+ * The architecture's entry code save/restores 'fs' when taking an
+ * exception from the kernel. This ensures addr_limit isn't inherited
+ * if you interrupted something that allowed the uaccess routines to
+ * access kernel memory.
+ * Do the same here because this doesn't come via the same entry code.
+ */
+ orig_addr_limit = force_uaccess_begin();
+
+ err = arg->callback(event_num, regs, arg->callback_arg);
+ if (err)
+ pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
+ event_num, smp_processor_id(), err);
+
+ force_uaccess_end(orig_addr_limit);
+
+ return err;
+}
+NOKPROBE_SYMBOL(sdei_event_handler);
+
+void sdei_handler_abort(void)
+{
+ /*
+ * If the crash happened in an SDEI event handler then we need to
+ * finish the handler with the firmware so that we can have working
+ * interrupts in the crash kernel.
+ */
+ if (__this_cpu_read(sdei_active_critical_event)) {
+ pr_warn("still in SDEI critical event context, attempting to finish handler.\n");
+ __sdei_handler_abort();
+ __this_cpu_write(sdei_active_critical_event, NULL);
+ }
+ if (__this_cpu_read(sdei_active_normal_event)) {
+ pr_warn("still in SDEI normal event context, attempting to finish handler.\n");
+ __sdei_handler_abort();
+ __this_cpu_write(sdei_active_normal_event, NULL);
+ }
+}
diff --git a/drivers/firmware/broadcom/Kconfig b/drivers/firmware/broadcom/Kconfig
new file mode 100644
index 000000000..8e3d355a6
--- /dev/null
+++ b/drivers/firmware/broadcom/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config BCM47XX_NVRAM
+ bool "Broadcom NVRAM driver"
+ depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
+ help
+ Broadcom home routers contain flash partition called "nvram" with all
+ important hardware configuration as well as some minor user setup.
+ NVRAM partition contains a text-like data representing name=value
+ pairs.
+ This driver provides an easy way to get value of requested parameter.
+ It simply reads content of NVRAM and parses it. It doesn't control any
+ hardware part itself.
+
+config BCM47XX_SPROM
+ bool "Broadcom SPROM driver"
+ depends on BCM47XX_NVRAM
+ select GENERIC_NET_UTILS
+ help
+ Broadcom devices store configuration data in SPROM. Accessing it is
+ specific to the bus host type, e.g. PCI(e) devices have it mapped in
+ a PCI BAR.
+ In case of SoC devices SPROM content is stored on a flash used by
+ bootloader firmware CFE. This driver provides method to ssb and bcma
+ drivers to read SPROM on SoC.
+
+config TEE_BNXT_FW
+ tristate "Broadcom BNXT firmware manager"
+ depends on (ARCH_BCM_IPROC && OPTEE) || (COMPILE_TEST && TEE)
+ default ARCH_BCM_IPROC
+ help
+ This module help to manage firmware on Broadcom BNXT device. The module
+ registers on tee bus and invoke calls to manage firmware on BNXT device.
diff --git a/drivers/firmware/broadcom/Makefile b/drivers/firmware/broadcom/Makefile
new file mode 100644
index 000000000..17c5061c4
--- /dev/null
+++ b/drivers/firmware/broadcom/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_BCM47XX_NVRAM) += bcm47xx_nvram.o
+obj-$(CONFIG_BCM47XX_SPROM) += bcm47xx_sprom.o
+obj-$(CONFIG_TEE_BNXT_FW) += tee_bnxt_fw.o
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
new file mode 100644
index 000000000..835ece9c0
--- /dev/null
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * BCM947xx nvram variable access
+ *
+ * Copyright (C) 2005 Broadcom Corporation
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mtd/mtd.h>
+#include <linux/bcm47xx_nvram.h>
+
+#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */
+#define NVRAM_SPACE 0x10000
+#define NVRAM_MAX_GPIO_ENTRIES 32
+#define NVRAM_MAX_GPIO_VALUE_LEN 30
+
+#define FLASH_MIN 0x00020000 /* Minimum flash size */
+
+struct nvram_header {
+ u32 magic;
+ u32 len;
+ u32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */
+ u32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */
+ u32 config_ncdl; /* ncdl values for memc */
+};
+
+static char nvram_buf[NVRAM_SPACE];
+static size_t nvram_len;
+static const u32 nvram_sizes[] = {0x6000, 0x8000, 0xF000, 0x10000};
+
+static u32 find_nvram_size(void __iomem *end)
+{
+ struct nvram_header __iomem *header;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) {
+ header = (struct nvram_header *)(end - nvram_sizes[i]);
+ if (header->magic == NVRAM_MAGIC)
+ return nvram_sizes[i];
+ }
+
+ return 0;
+}
+
+/* Probe for NVRAM header */
+static int nvram_find_and_copy(void __iomem *iobase, u32 lim)
+{
+ struct nvram_header __iomem *header;
+ u32 off;
+ u32 size;
+
+ if (nvram_len) {
+ pr_warn("nvram already initialized\n");
+ return -EEXIST;
+ }
+
+ /* TODO: when nvram is on nand flash check for bad blocks first. */
+ off = FLASH_MIN;
+ while (off <= lim) {
+ /* Windowed flash access */
+ size = find_nvram_size(iobase + off);
+ if (size) {
+ header = (struct nvram_header *)(iobase + off - size);
+ goto found;
+ }
+ off <<= 1;
+ }
+
+ /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */
+ header = (struct nvram_header *)(iobase + 4096);
+ if (header->magic == NVRAM_MAGIC) {
+ size = NVRAM_SPACE;
+ goto found;
+ }
+
+ header = (struct nvram_header *)(iobase + 1024);
+ if (header->magic == NVRAM_MAGIC) {
+ size = NVRAM_SPACE;
+ goto found;
+ }
+
+ pr_err("no nvram found\n");
+ return -ENXIO;
+
+found:
+ __ioread32_copy(nvram_buf, header, sizeof(*header) / 4);
+ nvram_len = ((struct nvram_header *)(nvram_buf))->len;
+ if (nvram_len > size) {
+ pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n");
+ nvram_len = size;
+ }
+ if (nvram_len >= NVRAM_SPACE) {
+ pr_err("nvram on flash (%zu bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
+ nvram_len, NVRAM_SPACE - 1);
+ nvram_len = NVRAM_SPACE - 1;
+ }
+ /* proceed reading data after header */
+ __ioread32_copy(nvram_buf + sizeof(*header), header + 1,
+ DIV_ROUND_UP(nvram_len, 4));
+ nvram_buf[NVRAM_SPACE - 1] = '\0';
+
+ return 0;
+}
+
+/*
+ * On bcm47xx we need access to the NVRAM very early, so we can't use mtd
+ * subsystem to access flash. We can't even use platform device / driver to
+ * store memory offset.
+ * To handle this we provide following symbol. It's supposed to be called as
+ * soon as we get info about flash device, before any NVRAM entry is needed.
+ */
+int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
+{
+ void __iomem *iobase;
+ int err;
+
+ iobase = ioremap(base, lim);
+ if (!iobase)
+ return -ENOMEM;
+
+ err = nvram_find_and_copy(iobase, lim);
+
+ iounmap(iobase);
+
+ return err;
+}
+
+static int nvram_init(void)
+{
+#ifdef CONFIG_MTD
+ struct mtd_info *mtd;
+ struct nvram_header header;
+ size_t bytes_read;
+ int err;
+
+ mtd = get_mtd_device_nm("nvram");
+ if (IS_ERR(mtd))
+ return -ENODEV;
+
+ err = mtd_read(mtd, 0, sizeof(header), &bytes_read, (uint8_t *)&header);
+ if (!err && header.magic == NVRAM_MAGIC &&
+ header.len > sizeof(header)) {
+ nvram_len = header.len;
+ if (nvram_len >= NVRAM_SPACE) {
+ pr_err("nvram on flash (%zu bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
+ nvram_len, NVRAM_SPACE);
+ nvram_len = NVRAM_SPACE - 1;
+ }
+
+ err = mtd_read(mtd, 0, nvram_len, &nvram_len,
+ (u8 *)nvram_buf);
+ return err;
+ }
+#endif
+
+ return -ENXIO;
+}
+
+int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len)
+{
+ char *var, *value, *end, *eq;
+ int err;
+
+ if (!name)
+ return -EINVAL;
+
+ if (!nvram_len) {
+ err = nvram_init();
+ if (err)
+ return err;
+ }
+
+ /* Look for name=value and return value */
+ var = &nvram_buf[sizeof(struct nvram_header)];
+ end = nvram_buf + sizeof(nvram_buf);
+ while (var < end && *var) {
+ eq = strchr(var, '=');
+ if (!eq)
+ break;
+ value = eq + 1;
+ if (eq - var == strlen(name) &&
+ strncmp(var, name, eq - var) == 0)
+ return snprintf(val, val_len, "%s", value);
+ var = value + strlen(value) + 1;
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL(bcm47xx_nvram_getenv);
+
+int bcm47xx_nvram_gpio_pin(const char *name)
+{
+ int i, err;
+ char nvram_var[] = "gpioXX";
+ char buf[NVRAM_MAX_GPIO_VALUE_LEN];
+
+ /* TODO: Optimize it to don't call getenv so many times */
+ for (i = 0; i < NVRAM_MAX_GPIO_ENTRIES; i++) {
+ err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i);
+ if (err <= 0)
+ continue;
+ err = bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf));
+ if (err <= 0)
+ continue;
+ if (!strcmp(name, buf))
+ return i;
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL(bcm47xx_nvram_gpio_pin);
+
+char *bcm47xx_nvram_get_contents(size_t *nvram_size)
+{
+ int err;
+ char *nvram;
+
+ if (!nvram_len) {
+ err = nvram_init();
+ if (err)
+ return NULL;
+ }
+
+ *nvram_size = nvram_len - sizeof(struct nvram_header);
+ nvram = vmalloc(*nvram_size);
+ if (!nvram)
+ return NULL;
+ memcpy(nvram, &nvram_buf[sizeof(struct nvram_header)], *nvram_size);
+
+ return nvram;
+}
+EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/broadcom/bcm47xx_sprom.c b/drivers/firmware/broadcom/bcm47xx_sprom.c
new file mode 100644
index 000000000..14fbcd116
--- /dev/null
+++ b/drivers/firmware/broadcom/bcm47xx_sprom.c
@@ -0,0 +1,726 @@
+/*
+ * Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org>
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2006 Michael Buesch <m@bues.ch>
+ * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org>
+ * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/bcm47xx_nvram.h>
+#include <linux/bcm47xx_sprom.h>
+#include <linux/bcma/bcma.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/ssb/ssb.h>
+
+static void create_key(const char *prefix, const char *postfix,
+ const char *name, char *buf, int len)
+{
+ if (prefix && postfix)
+ snprintf(buf, len, "%s%s%s", prefix, name, postfix);
+ else if (prefix)
+ snprintf(buf, len, "%s%s", prefix, name);
+ else if (postfix)
+ snprintf(buf, len, "%s%s", name, postfix);
+ else
+ snprintf(buf, len, "%s", name);
+}
+
+static int get_nvram_var(const char *prefix, const char *postfix,
+ const char *name, char *buf, int len, bool fallback)
+{
+ char key[40];
+ int err;
+
+ create_key(prefix, postfix, name, key, sizeof(key));
+
+ err = bcm47xx_nvram_getenv(key, buf, len);
+ if (fallback && err == -ENOENT && prefix) {
+ create_key(NULL, postfix, name, key, sizeof(key));
+ err = bcm47xx_nvram_getenv(key, buf, len);
+ }
+ return err;
+}
+
+#define NVRAM_READ_VAL(type) \
+static void nvram_read_ ## type(const char *prefix, \
+ const char *postfix, const char *name, \
+ type *val, type allset, bool fallback) \
+{ \
+ char buf[100]; \
+ int err; \
+ type var; \
+ \
+ err = get_nvram_var(prefix, postfix, name, buf, sizeof(buf), \
+ fallback); \
+ if (err < 0) \
+ return; \
+ err = kstrto ## type(strim(buf), 0, &var); \
+ if (err) { \
+ pr_warn("can not parse nvram name %s%s%s with value %s got %i\n", \
+ prefix, name, postfix, buf, err); \
+ return; \
+ } \
+ if (allset && var == allset) \
+ return; \
+ *val = var; \
+}
+
+NVRAM_READ_VAL(u8)
+NVRAM_READ_VAL(s8)
+NVRAM_READ_VAL(u16)
+NVRAM_READ_VAL(u32)
+
+#undef NVRAM_READ_VAL
+
+static void nvram_read_u32_2(const char *prefix, const char *name,
+ u16 *val_lo, u16 *val_hi, bool fallback)
+{
+ char buf[100];
+ int err;
+ u32 val;
+
+ err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+ if (err < 0)
+ return;
+ err = kstrtou32(strim(buf), 0, &val);
+ if (err) {
+ pr_warn("can not parse nvram name %s%s with value %s got %i\n",
+ prefix, name, buf, err);
+ return;
+ }
+ *val_lo = (val & 0x0000FFFFU);
+ *val_hi = (val & 0xFFFF0000U) >> 16;
+}
+
+static void nvram_read_leddc(const char *prefix, const char *name,
+ u8 *leddc_on_time, u8 *leddc_off_time,
+ bool fallback)
+{
+ char buf[100];
+ int err;
+ u32 val;
+
+ err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+ if (err < 0)
+ return;
+ err = kstrtou32(strim(buf), 0, &val);
+ if (err) {
+ pr_warn("can not parse nvram name %s%s with value %s got %i\n",
+ prefix, name, buf, err);
+ return;
+ }
+
+ if (val == 0xffff || val == 0xffffffff)
+ return;
+
+ *leddc_on_time = val & 0xff;
+ *leddc_off_time = (val >> 16) & 0xff;
+}
+
+static void nvram_read_macaddr(const char *prefix, const char *name,
+ u8 val[6], bool fallback)
+{
+ char buf[100];
+ int err;
+
+ err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+ if (err < 0)
+ return;
+
+ strreplace(buf, '-', ':');
+ if (!mac_pton(buf, val))
+ pr_warn("Can not parse mac address: %s\n", buf);
+}
+
+static void nvram_read_alpha2(const char *prefix, const char *name,
+ char val[2], bool fallback)
+{
+ char buf[10];
+ int err;
+
+ err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+ if (err < 0)
+ return;
+ if (buf[0] == '0')
+ return;
+ if (strlen(buf) > 2) {
+ pr_warn("alpha2 is too long %s\n", buf);
+ return;
+ }
+ memcpy(val, buf, 2);
+}
+
+/* This is one-function-only macro, it uses local "sprom" variable! */
+#define ENTRY(_revmask, _type, _prefix, _name, _val, _allset, _fallback) \
+ if (_revmask & BIT(sprom->revision)) \
+ nvram_read_ ## _type(_prefix, NULL, _name, &sprom->_val, \
+ _allset, _fallback)
+/*
+ * Special version of filling function that can be safely called for any SPROM
+ * revision. For every NVRAM to SPROM mapping it contains bitmask of revisions
+ * for which the mapping is valid.
+ * It obviously requires some hexadecimal/bitmasks knowledge, but allows
+ * writing cleaner code (easy revisions handling).
+ * Note that while SPROM revision 0 was never used, we still keep BIT(0)
+ * reserved for it, just to keep numbering sane.
+ */
+static void bcm47xx_sprom_fill_auto(struct ssb_sprom *sprom,
+ const char *prefix, bool fallback)
+{
+ const char *pre = prefix;
+ bool fb = fallback;
+
+ /* Broadcom extracts it for rev 8+ but it was found on 2 and 4 too */
+ ENTRY(0xfffffffe, u16, pre, "devid", dev_id, 0, fallback);
+
+ ENTRY(0xfffffffe, u16, pre, "boardrev", board_rev, 0, true);
+ ENTRY(0xfffffffe, u32, pre, "boardflags", boardflags, 0, fb);
+ ENTRY(0xfffffff0, u32, pre, "boardflags2", boardflags2, 0, fb);
+ ENTRY(0xfffff800, u32, pre, "boardflags3", boardflags3, 0, fb);
+ ENTRY(0x00000002, u16, pre, "boardflags", boardflags_lo, 0, fb);
+ ENTRY(0xfffffffc, u16, pre, "boardtype", board_type, 0, true);
+ ENTRY(0xfffffffe, u16, pre, "boardnum", board_num, 0, fb);
+ ENTRY(0x00000002, u8, pre, "cc", country_code, 0, fb);
+ ENTRY(0xfffffff8, u8, pre, "regrev", regrev, 0, fb);
+
+ ENTRY(0xfffffffe, u8, pre, "ledbh0", gpio0, 0xff, fb);
+ ENTRY(0xfffffffe, u8, pre, "ledbh1", gpio1, 0xff, fb);
+ ENTRY(0xfffffffe, u8, pre, "ledbh2", gpio2, 0xff, fb);
+ ENTRY(0xfffffffe, u8, pre, "ledbh3", gpio3, 0xff, fb);
+
+ ENTRY(0x0000070e, u16, pre, "pa0b0", pa0b0, 0, fb);
+ ENTRY(0x0000070e, u16, pre, "pa0b1", pa0b1, 0, fb);
+ ENTRY(0x0000070e, u16, pre, "pa0b2", pa0b2, 0, fb);
+ ENTRY(0x0000070e, u8, pre, "pa0itssit", itssi_bg, 0, fb);
+ ENTRY(0x0000070e, u8, pre, "pa0maxpwr", maxpwr_bg, 0, fb);
+
+ ENTRY(0x0000070c, u8, pre, "opo", opo, 0, fb);
+ ENTRY(0xfffffffe, u8, pre, "aa2g", ant_available_bg, 0, fb);
+ ENTRY(0xfffffffe, u8, pre, "aa5g", ant_available_a, 0, fb);
+ ENTRY(0x000007fe, s8, pre, "ag0", antenna_gain.a0, 0, fb);
+ ENTRY(0x000007fe, s8, pre, "ag1", antenna_gain.a1, 0, fb);
+ ENTRY(0x000007f0, s8, pre, "ag2", antenna_gain.a2, 0, fb);
+ ENTRY(0x000007f0, s8, pre, "ag3", antenna_gain.a3, 0, fb);
+
+ ENTRY(0x0000070e, u16, pre, "pa1b0", pa1b0, 0, fb);
+ ENTRY(0x0000070e, u16, pre, "pa1b1", pa1b1, 0, fb);
+ ENTRY(0x0000070e, u16, pre, "pa1b2", pa1b2, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1lob0", pa1lob0, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1lob1", pa1lob1, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1lob2", pa1lob2, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1hib0", pa1hib0, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1hib1", pa1hib1, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1hib2", pa1hib2, 0, fb);
+ ENTRY(0x0000070e, u8, pre, "pa1itssit", itssi_a, 0, fb);
+ ENTRY(0x0000070e, u8, pre, "pa1maxpwr", maxpwr_a, 0, fb);
+ ENTRY(0x0000070c, u8, pre, "pa1lomaxpwr", maxpwr_al, 0, fb);
+ ENTRY(0x0000070c, u8, pre, "pa1himaxpwr", maxpwr_ah, 0, fb);
+
+ ENTRY(0x00000708, u8, pre, "bxa2g", bxa2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssisav2g", rssisav2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssismc2g", rssismc2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssismf2g", rssismf2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "bxa5g", bxa5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssisav5g", rssisav5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssismc5g", rssismc5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssismf5g", rssismf5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "tri2g", tri2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "tri5g", tri5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "tri5gl", tri5gl, 0, fb);
+ ENTRY(0x00000708, u8, pre, "tri5gh", tri5gh, 0, fb);
+ ENTRY(0x00000708, s8, pre, "rxpo2g", rxpo2g, 0, fb);
+ ENTRY(0x00000708, s8, pre, "rxpo5g", rxpo5g, 0, fb);
+ ENTRY(0xfffffff0, u8, pre, "txchain", txchain, 0xf, fb);
+ ENTRY(0xfffffff0, u8, pre, "rxchain", rxchain, 0xf, fb);
+ ENTRY(0xfffffff0, u8, pre, "antswitch", antswitch, 0xff, fb);
+ ENTRY(0x00000700, u8, pre, "tssipos2g", fem.ghz2.tssipos, 0, fb);
+ ENTRY(0x00000700, u8, pre, "extpagain2g", fem.ghz2.extpa_gain, 0, fb);
+ ENTRY(0x00000700, u8, pre, "pdetrange2g", fem.ghz2.pdet_range, 0, fb);
+ ENTRY(0x00000700, u8, pre, "triso2g", fem.ghz2.tr_iso, 0, fb);
+ ENTRY(0x00000700, u8, pre, "antswctl2g", fem.ghz2.antswlut, 0, fb);
+ ENTRY(0x00000700, u8, pre, "tssipos5g", fem.ghz5.tssipos, 0, fb);
+ ENTRY(0x00000700, u8, pre, "extpagain5g", fem.ghz5.extpa_gain, 0, fb);
+ ENTRY(0x00000700, u8, pre, "pdetrange5g", fem.ghz5.pdet_range, 0, fb);
+ ENTRY(0x00000700, u8, pre, "triso5g", fem.ghz5.tr_iso, 0, fb);
+ ENTRY(0x00000700, u8, pre, "antswctl5g", fem.ghz5.antswlut, 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid2ga0", txpid2g[0], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid2ga1", txpid2g[1], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid2ga2", txpid2g[2], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid2ga3", txpid2g[3], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5ga0", txpid5g[0], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5ga1", txpid5g[1], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5ga2", txpid5g[2], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5ga3", txpid5g[3], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gla0", txpid5gl[0], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gla1", txpid5gl[1], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gla2", txpid5gl[2], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gla3", txpid5gl[3], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gha0", txpid5gh[0], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gha1", txpid5gh[1], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gha2", txpid5gh[2], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gha3", txpid5gh[3], 0, fb);
+
+ ENTRY(0xffffff00, u8, pre, "tempthresh", tempthresh, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "tempoffset", tempoffset, 0, fb);
+ ENTRY(0xffffff00, u16, pre, "rawtempsense", rawtempsense, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "measpower", measpower, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "tempsense_slope", tempsense_slope, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "tempcorrx", tempcorrx, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "tempsense_option", tempsense_option, 0, fb);
+ ENTRY(0x00000700, u8, pre, "freqoffset_corr", freqoffset_corr, 0, fb);
+ ENTRY(0x00000700, u8, pre, "iqcal_swp_dis", iqcal_swp_dis, 0, fb);
+ ENTRY(0x00000700, u8, pre, "hw_iqcal_en", hw_iqcal_en, 0, fb);
+ ENTRY(0x00000700, u8, pre, "elna2g", elna2g, 0, fb);
+ ENTRY(0x00000700, u8, pre, "elna5g", elna5g, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "phycal_tempdelta", phycal_tempdelta, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "temps_period", temps_period, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "temps_hysteresis", temps_hysteresis, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "measpower1", measpower1, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "measpower2", measpower2, 0, fb);
+
+ ENTRY(0x000001f0, u16, pre, "cck2gpo", cck2gpo, 0, fb);
+ ENTRY(0x000001f0, u32, pre, "ofdm2gpo", ofdm2gpo, 0, fb);
+ ENTRY(0x000001f0, u32, pre, "ofdm5gpo", ofdm5gpo, 0, fb);
+ ENTRY(0x000001f0, u32, pre, "ofdm5glpo", ofdm5glpo, 0, fb);
+ ENTRY(0x000001f0, u32, pre, "ofdm5ghpo", ofdm5ghpo, 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo0", mcs2gpo[0], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo1", mcs2gpo[1], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo2", mcs2gpo[2], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo3", mcs2gpo[3], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo4", mcs2gpo[4], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo5", mcs2gpo[5], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo6", mcs2gpo[6], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo7", mcs2gpo[7], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo0", mcs5gpo[0], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo1", mcs5gpo[1], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo2", mcs5gpo[2], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo3", mcs5gpo[3], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo4", mcs5gpo[4], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo5", mcs5gpo[5], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo6", mcs5gpo[6], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo7", mcs5gpo[7], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo0", mcs5glpo[0], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo1", mcs5glpo[1], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo2", mcs5glpo[2], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo3", mcs5glpo[3], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo4", mcs5glpo[4], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo5", mcs5glpo[5], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo6", mcs5glpo[6], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo7", mcs5glpo[7], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo0", mcs5ghpo[0], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo1", mcs5ghpo[1], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo2", mcs5ghpo[2], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo3", mcs5ghpo[3], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo4", mcs5ghpo[4], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo5", mcs5ghpo[5], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo6", mcs5ghpo[6], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo7", mcs5ghpo[7], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "cddpo", cddpo, 0, fb);
+ ENTRY(0x000001f0, u16, pre, "stbcpo", stbcpo, 0, fb);
+ ENTRY(0x000001f0, u16, pre, "bw40po", bw40po, 0, fb);
+ ENTRY(0x000001f0, u16, pre, "bwduppo", bwduppo, 0, fb);
+
+ ENTRY(0xfffffe00, u16, pre, "cckbw202gpo", cckbw202gpo, 0, fb);
+ ENTRY(0xfffffe00, u16, pre, "cckbw20ul2gpo", cckbw20ul2gpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw202gpo", legofdmbw202gpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw20ul2gpo", legofdmbw20ul2gpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw205glpo", legofdmbw205glpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw20ul5glpo", legofdmbw20ul5glpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw205gmpo", legofdmbw205gmpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw20ul5gmpo", legofdmbw20ul5gmpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw205ghpo", legofdmbw205ghpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw20ul5ghpo", legofdmbw20ul5ghpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw202gpo", mcsbw202gpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "mcsbw20ul2gpo", mcsbw20ul2gpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw402gpo", mcsbw402gpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw205glpo", mcsbw205glpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "mcsbw20ul5glpo", mcsbw20ul5glpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw405glpo", mcsbw405glpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw205gmpo", mcsbw205gmpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "mcsbw20ul5gmpo", mcsbw20ul5gmpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw405gmpo", mcsbw405gmpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw205ghpo", mcsbw205ghpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "mcsbw20ul5ghpo", mcsbw20ul5ghpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw405ghpo", mcsbw405ghpo, 0, fb);
+ ENTRY(0x00000600, u16, pre, "mcs32po", mcs32po, 0, fb);
+ ENTRY(0x00000600, u16, pre, "legofdm40duppo", legofdm40duppo, 0, fb);
+ ENTRY(0x00000700, u8, pre, "pcieingress_war", pcieingress_war, 0, fb);
+
+ /* TODO: rev 11 support */
+ ENTRY(0x00000700, u8, pre, "rxgainerr2ga0", rxgainerr2ga[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr2ga1", rxgainerr2ga[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr2ga2", rxgainerr2ga[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gla0", rxgainerr5gla[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gla1", rxgainerr5gla[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gla2", rxgainerr5gla[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gma0", rxgainerr5gma[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gma1", rxgainerr5gma[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gma2", rxgainerr5gma[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gha0", rxgainerr5gha[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gha1", rxgainerr5gha[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gha2", rxgainerr5gha[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gua0", rxgainerr5gua[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gua1", rxgainerr5gua[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gua2", rxgainerr5gua[2], 0, fb);
+
+ ENTRY(0xfffffe00, u8, pre, "sar2g", sar2g, 0, fb);
+ ENTRY(0xfffffe00, u8, pre, "sar5g", sar5g, 0, fb);
+
+ /* TODO: rev 11 support */
+ ENTRY(0x00000700, u8, pre, "noiselvl2ga0", noiselvl2ga[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl2ga1", noiselvl2ga[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl2ga2", noiselvl2ga[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gla0", noiselvl5gla[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gla1", noiselvl5gla[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gla2", noiselvl5gla[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gma0", noiselvl5gma[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gma1", noiselvl5gma[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gma2", noiselvl5gma[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gha0", noiselvl5gha[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gha1", noiselvl5gha[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gha2", noiselvl5gha[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gua0", noiselvl5gua[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gua1", noiselvl5gua[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gua2", noiselvl5gua[2], 0, fb);
+}
+#undef ENTRY /* It's specififc, uses local variable, don't use it (again). */
+
+static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
+ const char *prefix, bool fallback)
+{
+ char postfix[2];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
+ struct ssb_sprom_core_pwr_info *pwr_info;
+
+ pwr_info = &sprom->core_pwr_info[i];
+
+ snprintf(postfix, sizeof(postfix), "%i", i);
+ nvram_read_u8(prefix, postfix, "maxp2ga",
+ &pwr_info->maxpwr_2g, 0, fallback);
+ nvram_read_u8(prefix, postfix, "itt2ga",
+ &pwr_info->itssi_2g, 0, fallback);
+ nvram_read_u8(prefix, postfix, "itt5ga",
+ &pwr_info->itssi_5g, 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa2gw0a",
+ &pwr_info->pa_2g[0], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa2gw1a",
+ &pwr_info->pa_2g[1], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa2gw2a",
+ &pwr_info->pa_2g[2], 0, fallback);
+ nvram_read_u8(prefix, postfix, "maxp5ga",
+ &pwr_info->maxpwr_5g, 0, fallback);
+ nvram_read_u8(prefix, postfix, "maxp5gha",
+ &pwr_info->maxpwr_5gh, 0, fallback);
+ nvram_read_u8(prefix, postfix, "maxp5gla",
+ &pwr_info->maxpwr_5gl, 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5gw0a",
+ &pwr_info->pa_5g[0], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5gw1a",
+ &pwr_info->pa_5g[1], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5gw2a",
+ &pwr_info->pa_5g[2], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5glw0a",
+ &pwr_info->pa_5gl[0], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5glw1a",
+ &pwr_info->pa_5gl[1], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5glw2a",
+ &pwr_info->pa_5gl[2], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5ghw0a",
+ &pwr_info->pa_5gh[0], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5ghw1a",
+ &pwr_info->pa_5gh[1], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5ghw2a",
+ &pwr_info->pa_5gh[2], 0, fallback);
+ }
+}
+
+static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom,
+ const char *prefix, bool fallback)
+{
+ char postfix[2];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
+ struct ssb_sprom_core_pwr_info *pwr_info;
+
+ pwr_info = &sprom->core_pwr_info[i];
+
+ snprintf(postfix, sizeof(postfix), "%i", i);
+ nvram_read_u16(prefix, postfix, "pa2gw3a",
+ &pwr_info->pa_2g[3], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5gw3a",
+ &pwr_info->pa_5g[3], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5glw3a",
+ &pwr_info->pa_5gl[3], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5ghw3a",
+ &pwr_info->pa_5gh[3], 0, fallback);
+ }
+}
+
+static bool bcm47xx_is_valid_mac(u8 *mac)
+{
+ return mac && !(mac[0] == 0x00 && mac[1] == 0x90 && mac[2] == 0x4c);
+}
+
+static int bcm47xx_increase_mac_addr(u8 *mac, u8 num)
+{
+ u8 *oui = mac + ETH_ALEN/2 - 1;
+ u8 *p = mac + ETH_ALEN - 1;
+
+ do {
+ (*p) += num;
+ if (*p > num)
+ break;
+ p--;
+ num = 1;
+ } while (p != oui);
+
+ if (p == oui) {
+ pr_err("unable to fetch mac address\n");
+ return -ENOENT;
+ }
+ return 0;
+}
+
+static int mac_addr_used = 2;
+
+static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom,
+ const char *prefix, bool fallback)
+{
+ bool fb = fallback;
+
+ nvram_read_macaddr(prefix, "et0macaddr", sprom->et0mac, fallback);
+ nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0,
+ fallback);
+
+ nvram_read_macaddr(prefix, "et1macaddr", sprom->et1mac, fallback);
+ nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0,
+ fallback);
+
+ nvram_read_macaddr(prefix, "et2macaddr", sprom->et2mac, fb);
+ nvram_read_u8(prefix, NULL, "et2mdcport", &sprom->et2mdcport, 0, fb);
+ nvram_read_u8(prefix, NULL, "et2phyaddr", &sprom->et2phyaddr, 0, fb);
+
+ nvram_read_macaddr(prefix, "macaddr", sprom->il0mac, fallback);
+ nvram_read_macaddr(prefix, "il0macaddr", sprom->il0mac, fallback);
+
+ /* The address prefix 00:90:4C is used by Broadcom in their initial
+ * configuration. When a mac address with the prefix 00:90:4C is used
+ * all devices from the same series are sharing the same mac address.
+ * To prevent mac address collisions we replace them with a mac address
+ * based on the base address.
+ */
+ if (!bcm47xx_is_valid_mac(sprom->il0mac)) {
+ u8 mac[6];
+
+ nvram_read_macaddr(NULL, "et0macaddr", mac, false);
+ if (bcm47xx_is_valid_mac(mac)) {
+ int err = bcm47xx_increase_mac_addr(mac, mac_addr_used);
+
+ if (!err) {
+ ether_addr_copy(sprom->il0mac, mac);
+ mac_addr_used++;
+ }
+ }
+ }
+}
+
+static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback)
+{
+ nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
+ &sprom->boardflags_hi, fallback);
+ nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
+ &sprom->boardflags2_hi, fallback);
+}
+
+void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback)
+{
+ bcm47xx_fill_sprom_ethernet(sprom, prefix, fallback);
+ bcm47xx_fill_board_data(sprom, prefix, fallback);
+
+ nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0, fallback);
+
+ /* Entries requiring custom functions */
+ nvram_read_alpha2(prefix, "ccode", sprom->alpha2, fallback);
+ if (sprom->revision >= 3)
+ nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time,
+ &sprom->leddc_off_time, fallback);
+
+ switch (sprom->revision) {
+ case 4:
+ case 5:
+ bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_path_r45(sprom, prefix, fallback);
+ break;
+ case 8:
+ case 9:
+ bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
+ break;
+ }
+
+ bcm47xx_sprom_fill_auto(sprom, prefix, fallback);
+}
+
+#if IS_BUILTIN(CONFIG_SSB) && IS_ENABLED(CONFIG_SSB_SPROM)
+static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
+{
+ char prefix[10];
+
+ switch (bus->bustype) {
+ case SSB_BUSTYPE_SSB:
+ bcm47xx_fill_sprom(out, NULL, false);
+ return 0;
+ case SSB_BUSTYPE_PCI:
+ memset(out, 0, sizeof(struct ssb_sprom));
+ snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
+ bus->host_pci->bus->number + 1,
+ PCI_SLOT(bus->host_pci->devfn));
+ bcm47xx_fill_sprom(out, prefix, false);
+ return 0;
+ default:
+ pr_warn("Unable to fill SPROM for given bustype.\n");
+ return -EINVAL;
+ }
+}
+#endif
+
+#if IS_BUILTIN(CONFIG_BCMA)
+/*
+ * Having many NVRAM entries for PCI devices led to repeating prefixes like
+ * pci/1/1/ all the time and wasting flash space. So at some point Broadcom
+ * decided to introduce prefixes like 0: 1: 2: etc.
+ * If we find e.g. devpath0=pci/2/1 or devpath0=pci/2/1/ we should use 0:
+ * instead of pci/2/1/.
+ */
+static void bcm47xx_sprom_apply_prefix_alias(char *prefix, size_t prefix_size)
+{
+ size_t prefix_len = strlen(prefix);
+ size_t short_len = prefix_len - 1;
+ char nvram_var[10];
+ char buf[20];
+ int i;
+
+ /* Passed prefix has to end with a slash */
+ if (prefix_len <= 0 || prefix[prefix_len - 1] != '/')
+ return;
+
+ for (i = 0; i < 3; i++) {
+ if (snprintf(nvram_var, sizeof(nvram_var), "devpath%d", i) <= 0)
+ continue;
+ if (bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf)) < 0)
+ continue;
+ if (!strcmp(buf, prefix) ||
+ (short_len && strlen(buf) == short_len && !strncmp(buf, prefix, short_len))) {
+ snprintf(prefix, prefix_size, "%d:", i);
+ return;
+ }
+ }
+}
+
+static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
+{
+ struct bcma_boardinfo *binfo = &bus->boardinfo;
+ struct bcma_device *core;
+ char buf[10];
+ char *prefix;
+ bool fallback = false;
+
+ switch (bus->hosttype) {
+ case BCMA_HOSTTYPE_PCI:
+ memset(out, 0, sizeof(struct ssb_sprom));
+ /* On BCM47XX all PCI buses share the same domain */
+ if (IS_ENABLED(CONFIG_BCM47XX))
+ snprintf(buf, sizeof(buf), "pci/%u/%u/",
+ bus->host_pci->bus->number + 1,
+ PCI_SLOT(bus->host_pci->devfn));
+ else
+ snprintf(buf, sizeof(buf), "pci/%u/%u/",
+ pci_domain_nr(bus->host_pci->bus) + 1,
+ bus->host_pci->bus->number);
+ bcm47xx_sprom_apply_prefix_alias(buf, sizeof(buf));
+ prefix = buf;
+ break;
+ case BCMA_HOSTTYPE_SOC:
+ memset(out, 0, sizeof(struct ssb_sprom));
+ core = bcma_find_core(bus, BCMA_CORE_80211);
+ if (core) {
+ snprintf(buf, sizeof(buf), "sb/%u/",
+ core->core_index);
+ prefix = buf;
+ fallback = true;
+ } else {
+ prefix = NULL;
+ }
+ break;
+ default:
+ pr_warn("Unable to fill SPROM for given bustype.\n");
+ return -EINVAL;
+ }
+
+ nvram_read_u16(prefix, NULL, "boardvendor", &binfo->vendor, 0, true);
+ if (!binfo->vendor)
+ binfo->vendor = SSB_BOARDVENDOR_BCM;
+ nvram_read_u16(prefix, NULL, "boardtype", &binfo->type, 0, true);
+
+ bcm47xx_fill_sprom(out, prefix, fallback);
+
+ return 0;
+}
+#endif
+
+static unsigned int bcm47xx_sprom_registered;
+
+/*
+ * On bcm47xx we need to register SPROM fallback handler very early, so we can't
+ * use anything like platform device / driver for this.
+ */
+int bcm47xx_sprom_register_fallbacks(void)
+{
+ if (bcm47xx_sprom_registered)
+ return 0;
+
+#if IS_BUILTIN(CONFIG_SSB) && IS_ENABLED(CONFIG_SSB_SPROM)
+ if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb))
+ pr_warn("Failed to register ssb SPROM handler\n");
+#endif
+
+#if IS_BUILTIN(CONFIG_BCMA)
+ if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma))
+ pr_warn("Failed to register bcma SPROM handler\n");
+#endif
+
+ bcm47xx_sprom_registered = 1;
+
+ return 0;
+}
+
+fs_initcall(bcm47xx_sprom_register_fallbacks);
diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c
new file mode 100644
index 000000000..a5bf4c3f6
--- /dev/null
+++ b/drivers/firmware/broadcom/tee_bnxt_fw.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Broadcom.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+
+#include <linux/firmware/broadcom/tee_bnxt_fw.h>
+
+#define MAX_SHM_MEM_SZ SZ_4M
+
+#define MAX_TEE_PARAM_ARRY_MEMB 4
+
+enum ta_cmd {
+ /*
+ * TA_CMD_BNXT_FASTBOOT - boot bnxt device by copying f/w into sram
+ *
+ * param[0] unused
+ * param[1] unused
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_ITEM_NOT_FOUND - Corrupt f/w image found on memory
+ */
+ TA_CMD_BNXT_FASTBOOT = 0,
+
+ /*
+ * TA_CMD_BNXT_COPY_COREDUMP - copy the core dump into shm
+ *
+ * param[0] (inout memref) - Coredump buffer memory reference
+ * param[1] (in value) - value.a: offset, data to be copied from
+ * value.b: size of data to be copied
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ * TEE_ERROR_ITEM_NOT_FOUND - Corrupt core dump
+ */
+ TA_CMD_BNXT_COPY_COREDUMP = 3,
+};
+
+/**
+ * struct tee_bnxt_fw_private - OP-TEE bnxt private data
+ * @dev: OP-TEE based bnxt device.
+ * @ctx: OP-TEE context handler.
+ * @session_id: TA session identifier.
+ */
+struct tee_bnxt_fw_private {
+ struct device *dev;
+ struct tee_context *ctx;
+ u32 session_id;
+ struct tee_shm *fw_shm_pool;
+};
+
+static struct tee_bnxt_fw_private pvt_data;
+
+static void prepare_args(int cmd,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ memset(arg, 0, sizeof(*arg));
+ memset(param, 0, MAX_TEE_PARAM_ARRY_MEMB * sizeof(*param));
+
+ arg->func = cmd;
+ arg->session = pvt_data.session_id;
+ arg->num_params = MAX_TEE_PARAM_ARRY_MEMB;
+
+ /* Fill invoke cmd params */
+ switch (cmd) {
+ case TA_CMD_BNXT_COPY_COREDUMP:
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
+ param[0].u.memref.shm = pvt_data.fw_shm_pool;
+ param[0].u.memref.size = MAX_SHM_MEM_SZ;
+ param[0].u.memref.shm_offs = 0;
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ break;
+ case TA_CMD_BNXT_FASTBOOT:
+ default:
+ /* Nothing to do */
+ break;
+ }
+}
+
+/**
+ * tee_bnxt_fw_load() - Load the bnxt firmware
+ * Uses an OP-TEE call to start a secure
+ * boot process.
+ * Returns 0 on success, negative errno otherwise.
+ */
+int tee_bnxt_fw_load(void)
+{
+ int ret = 0;
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_param param[MAX_TEE_PARAM_ARRY_MEMB];
+
+ if (!pvt_data.ctx)
+ return -ENODEV;
+
+ prepare_args(TA_CMD_BNXT_FASTBOOT, &arg, param);
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ dev_err(pvt_data.dev,
+ "TA_CMD_BNXT_FASTBOOT invoke failed TEE err: %x, ret:%x\n",
+ arg.ret, ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(tee_bnxt_fw_load);
+
+/**
+ * tee_bnxt_copy_coredump() - Copy coredump from the allocated memory
+ * Uses an OP-TEE call to copy coredump
+ * @buf: destination buffer where core dump is copied into
+ * @offset: offset from the base address of core dump area
+ * @size: size of the dump
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int tee_bnxt_copy_coredump(void *buf, u32 offset, u32 size)
+{
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_param param[MAX_TEE_PARAM_ARRY_MEMB];
+ void *core_data;
+ u32 rbytes = size;
+ u32 nbytes = 0;
+ int ret = 0;
+
+ if (!pvt_data.ctx)
+ return -ENODEV;
+
+ prepare_args(TA_CMD_BNXT_COPY_COREDUMP, &arg, param);
+
+ while (rbytes) {
+ nbytes = rbytes;
+
+ nbytes = min_t(u32, rbytes, param[0].u.memref.size);
+
+ /* Fill additional invoke cmd params */
+ param[1].u.value.a = offset;
+ param[1].u.value.b = nbytes;
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ dev_err(pvt_data.dev,
+ "TA_CMD_BNXT_COPY_COREDUMP invoke failed TEE err: %x, ret:%x\n",
+ arg.ret, ret);
+ return -EINVAL;
+ }
+
+ core_data = tee_shm_get_va(pvt_data.fw_shm_pool, 0);
+ if (IS_ERR(core_data)) {
+ dev_err(pvt_data.dev, "tee_shm_get_va failed\n");
+ return PTR_ERR(core_data);
+ }
+
+ memcpy(buf, core_data, nbytes);
+
+ rbytes -= nbytes;
+ buf += nbytes;
+ offset += nbytes;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(tee_bnxt_copy_coredump);
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ return (ver->impl_id == TEE_IMPL_ID_OPTEE);
+}
+
+static int tee_bnxt_fw_probe(struct device *dev)
+{
+ struct tee_client_device *bnxt_device = to_tee_client_device(dev);
+ int ret, err = -ENODEV;
+ struct tee_ioctl_open_session_arg sess_arg;
+ struct tee_shm *fw_shm_pool;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ /* Open context with TEE driver */
+ pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL,
+ NULL);
+ if (IS_ERR(pvt_data.ctx))
+ return -ENODEV;
+
+ /* Open session with Bnxt load Trusted App */
+ memcpy(sess_arg.uuid, bnxt_device->id.uuid.b, TEE_IOCTL_UUID_LEN);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL);
+ if (ret < 0 || sess_arg.ret != 0) {
+ dev_err(dev, "tee_client_open_session failed, err: %x\n",
+ sess_arg.ret);
+ err = -EINVAL;
+ goto out_ctx;
+ }
+ pvt_data.session_id = sess_arg.session;
+
+ pvt_data.dev = dev;
+
+ fw_shm_pool = tee_shm_alloc_kernel_buf(pvt_data.ctx, MAX_SHM_MEM_SZ);
+ if (IS_ERR(fw_shm_pool)) {
+ dev_err(pvt_data.dev, "tee_shm_alloc_kernel_buf failed\n");
+ err = PTR_ERR(fw_shm_pool);
+ goto out_sess;
+ }
+
+ pvt_data.fw_shm_pool = fw_shm_pool;
+
+ return 0;
+
+out_sess:
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+out_ctx:
+ tee_client_close_context(pvt_data.ctx);
+
+ return err;
+}
+
+static int tee_bnxt_fw_remove(struct device *dev)
+{
+ tee_shm_free(pvt_data.fw_shm_pool);
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+ tee_client_close_context(pvt_data.ctx);
+ pvt_data.ctx = NULL;
+
+ return 0;
+}
+
+static void tee_bnxt_fw_shutdown(struct device *dev)
+{
+ tee_shm_free(pvt_data.fw_shm_pool);
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+ tee_client_close_context(pvt_data.ctx);
+ pvt_data.ctx = NULL;
+}
+
+static const struct tee_client_device_id tee_bnxt_fw_id_table[] = {
+ {UUID_INIT(0x6272636D, 0x2019, 0x0716,
+ 0x42, 0x43, 0x4D, 0x5F, 0x53, 0x43, 0x48, 0x49)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(tee, tee_bnxt_fw_id_table);
+
+static struct tee_client_driver tee_bnxt_fw_driver = {
+ .id_table = tee_bnxt_fw_id_table,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .bus = &tee_bus_type,
+ .probe = tee_bnxt_fw_probe,
+ .remove = tee_bnxt_fw_remove,
+ .shutdown = tee_bnxt_fw_shutdown,
+ },
+};
+
+static int __init tee_bnxt_fw_mod_init(void)
+{
+ return driver_register(&tee_bnxt_fw_driver.driver);
+}
+
+static void __exit tee_bnxt_fw_mod_exit(void)
+{
+ driver_unregister(&tee_bnxt_fw_driver.driver);
+}
+
+module_init(tee_bnxt_fw_mod_init);
+module_exit(tee_bnxt_fw_mod_exit);
+
+MODULE_AUTHOR("Vikas Gupta <vikas.gupta@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom bnxt firmware manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
new file mode 100644
index 000000000..86d71b021
--- /dev/null
+++ b/drivers/firmware/dmi-id.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Export SMBIOS/DMI info via sysfs to userspace
+ *
+ * Copyright 2007, Lennart Poettering
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+struct dmi_device_attribute{
+ struct device_attribute dev_attr;
+ int field;
+};
+#define to_dmi_dev_attr(_dev_attr) \
+ container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
+
+static ssize_t sys_dmi_field_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ int field = to_dmi_dev_attr(attr)->field;
+ ssize_t len;
+ len = scnprintf(page, PAGE_SIZE, "%s\n", dmi_get_system_info(field));
+ page[len-1] = '\n';
+ return len;
+}
+
+#define DMI_ATTR(_name, _mode, _show, _field) \
+ { .dev_attr = __ATTR(_name, _mode, _show, NULL), \
+ .field = _field }
+
+#define DEFINE_DMI_ATTR_WITH_SHOW(_name, _mode, _field) \
+static struct dmi_device_attribute sys_dmi_##_name##_attr = \
+ DMI_ATTR(_name, _mode, sys_dmi_field_show, _field);
+
+DEFINE_DMI_ATTR_WITH_SHOW(bios_vendor, 0444, DMI_BIOS_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(bios_version, 0444, DMI_BIOS_VERSION);
+DEFINE_DMI_ATTR_WITH_SHOW(bios_date, 0444, DMI_BIOS_DATE);
+DEFINE_DMI_ATTR_WITH_SHOW(sys_vendor, 0444, DMI_SYS_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(bios_release, 0444, DMI_BIOS_RELEASE);
+DEFINE_DMI_ATTR_WITH_SHOW(ec_firmware_release, 0444, DMI_EC_FIRMWARE_RELEASE);
+DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME);
+DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION);
+DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL);
+DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID);
+DEFINE_DMI_ATTR_WITH_SHOW(product_sku, 0444, DMI_PRODUCT_SKU);
+DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY);
+DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME);
+DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION);
+DEFINE_DMI_ATTR_WITH_SHOW(board_serial, 0400, DMI_BOARD_SERIAL);
+DEFINE_DMI_ATTR_WITH_SHOW(board_asset_tag, 0444, DMI_BOARD_ASSET_TAG);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_vendor, 0444, DMI_CHASSIS_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_type, 0444, DMI_CHASSIS_TYPE);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_version, 0444, DMI_CHASSIS_VERSION);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_serial, 0400, DMI_CHASSIS_SERIAL);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_asset_tag, 0444, DMI_CHASSIS_ASSET_TAG);
+
+static void ascii_filter(char *d, const char *s)
+{
+ /* Filter out characters we don't want to see in the modalias string */
+ for (; *s; s++)
+ if (*s > ' ' && *s < 127 && *s != ':')
+ *(d++) = *s;
+
+ *d = 0;
+}
+
+static ssize_t get_modalias(char *buffer, size_t buffer_size)
+{
+ static const struct mafield {
+ const char *prefix;
+ int field;
+ } fields[] = {
+ { "bvn", DMI_BIOS_VENDOR },
+ { "bvr", DMI_BIOS_VERSION },
+ { "bd", DMI_BIOS_DATE },
+ { "br", DMI_BIOS_RELEASE },
+ { "efr", DMI_EC_FIRMWARE_RELEASE },
+ { "svn", DMI_SYS_VENDOR },
+ { "pn", DMI_PRODUCT_NAME },
+ { "pvr", DMI_PRODUCT_VERSION },
+ { "rvn", DMI_BOARD_VENDOR },
+ { "rn", DMI_BOARD_NAME },
+ { "rvr", DMI_BOARD_VERSION },
+ { "cvn", DMI_CHASSIS_VENDOR },
+ { "ct", DMI_CHASSIS_TYPE },
+ { "cvr", DMI_CHASSIS_VERSION },
+ { NULL, DMI_NONE }
+ };
+
+ ssize_t l, left;
+ char *p;
+ const struct mafield *f;
+
+ strcpy(buffer, "dmi");
+ p = buffer + 3; left = buffer_size - 4;
+
+ for (f = fields; f->prefix && left > 0; f++) {
+ const char *c;
+ char *t;
+
+ c = dmi_get_system_info(f->field);
+ if (!c)
+ continue;
+
+ t = kmalloc(strlen(c) + 1, GFP_KERNEL);
+ if (!t)
+ break;
+ ascii_filter(t, c);
+ l = scnprintf(p, left, ":%s%s", f->prefix, t);
+ kfree(t);
+
+ p += l;
+ left -= l;
+ }
+
+ p[0] = ':';
+ p[1] = 0;
+
+ return p - buffer + 1;
+}
+
+static ssize_t sys_dmi_modalias_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ ssize_t r;
+ r = get_modalias(page, PAGE_SIZE-1);
+ page[r] = '\n';
+ page[r+1] = 0;
+ return r+1;
+}
+
+static struct device_attribute sys_dmi_modalias_attr =
+ __ATTR(modalias, 0444, sys_dmi_modalias_show, NULL);
+
+static struct attribute *sys_dmi_attributes[DMI_STRING_MAX+2];
+
+static struct attribute_group sys_dmi_attribute_group = {
+ .attrs = sys_dmi_attributes,
+};
+
+static const struct attribute_group* sys_dmi_attribute_groups[] = {
+ &sys_dmi_attribute_group,
+ NULL
+};
+
+static int dmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ ssize_t len;
+
+ if (add_uevent_var(env, "MODALIAS="))
+ return -ENOMEM;
+ len = get_modalias(&env->buf[env->buflen - 1],
+ sizeof(env->buf) - env->buflen);
+ if (len >= (sizeof(env->buf) - env->buflen))
+ return -ENOMEM;
+ env->buflen += len;
+ return 0;
+}
+
+static struct class dmi_class = {
+ .name = "dmi",
+ .dev_release = (void(*)(struct device *)) kfree,
+ .dev_uevent = dmi_dev_uevent,
+};
+
+static struct device *dmi_dev;
+
+/* Initialization */
+
+#define ADD_DMI_ATTR(_name, _field) \
+ if (dmi_get_system_info(_field)) \
+ sys_dmi_attributes[i++] = &sys_dmi_##_name##_attr.dev_attr.attr;
+
+/* In a separate function to keep gcc 3.2 happy - do NOT merge this in
+ dmi_id_init! */
+static void __init dmi_id_init_attr_table(void)
+{
+ int i;
+
+ /* Not necessarily all DMI fields are available on all
+ * systems, hence let's built an attribute table of just
+ * what's available */
+ i = 0;
+ ADD_DMI_ATTR(bios_vendor, DMI_BIOS_VENDOR);
+ ADD_DMI_ATTR(bios_version, DMI_BIOS_VERSION);
+ ADD_DMI_ATTR(bios_date, DMI_BIOS_DATE);
+ ADD_DMI_ATTR(bios_release, DMI_BIOS_RELEASE);
+ ADD_DMI_ATTR(ec_firmware_release, DMI_EC_FIRMWARE_RELEASE);
+ ADD_DMI_ATTR(sys_vendor, DMI_SYS_VENDOR);
+ ADD_DMI_ATTR(product_name, DMI_PRODUCT_NAME);
+ ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION);
+ ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL);
+ ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID);
+ ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY);
+ ADD_DMI_ATTR(product_sku, DMI_PRODUCT_SKU);
+ ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR);
+ ADD_DMI_ATTR(board_name, DMI_BOARD_NAME);
+ ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION);
+ ADD_DMI_ATTR(board_serial, DMI_BOARD_SERIAL);
+ ADD_DMI_ATTR(board_asset_tag, DMI_BOARD_ASSET_TAG);
+ ADD_DMI_ATTR(chassis_vendor, DMI_CHASSIS_VENDOR);
+ ADD_DMI_ATTR(chassis_type, DMI_CHASSIS_TYPE);
+ ADD_DMI_ATTR(chassis_version, DMI_CHASSIS_VERSION);
+ ADD_DMI_ATTR(chassis_serial, DMI_CHASSIS_SERIAL);
+ ADD_DMI_ATTR(chassis_asset_tag, DMI_CHASSIS_ASSET_TAG);
+ sys_dmi_attributes[i++] = &sys_dmi_modalias_attr.attr;
+}
+
+static int __init dmi_id_init(void)
+{
+ int ret;
+
+ if (!dmi_available)
+ return -ENODEV;
+
+ dmi_id_init_attr_table();
+
+ ret = class_register(&dmi_class);
+ if (ret)
+ return ret;
+
+ dmi_dev = kzalloc(sizeof(*dmi_dev), GFP_KERNEL);
+ if (!dmi_dev) {
+ ret = -ENOMEM;
+ goto fail_class_unregister;
+ }
+
+ dmi_dev->class = &dmi_class;
+ dev_set_name(dmi_dev, "id");
+ dmi_dev->groups = sys_dmi_attribute_groups;
+
+ ret = device_register(dmi_dev);
+ if (ret)
+ goto fail_put_dmi_dev;
+
+ return 0;
+
+fail_put_dmi_dev:
+ put_device(dmi_dev);
+
+fail_class_unregister:
+ class_unregister(&dmi_class);
+
+ return ret;
+}
+
+arch_initcall(dmi_id_init);
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
new file mode 100644
index 000000000..4a93fb490
--- /dev/null
+++ b/drivers/firmware/dmi-sysfs.c
@@ -0,0 +1,698 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * dmi-sysfs.c
+ *
+ * This module exports the DMI tables read-only to userspace through the
+ * sysfs file system.
+ *
+ * Data is currently found below
+ * /sys/firmware/dmi/...
+ *
+ * DMI attributes are presented in attribute files with names
+ * formatted using %d-%d, so that the first integer indicates the
+ * structure type (0-255), and the second field is the instance of that
+ * entry.
+ *
+ * Copyright 2011 Google, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kobject.h>
+#include <linux/dmi.h>
+#include <linux/capability.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <asm/dmi.h>
+
+#define MAX_ENTRY_TYPE 255 /* Most of these aren't used, but we consider
+ the top entry type is only 8 bits */
+
+struct dmi_sysfs_entry {
+ struct dmi_header dh;
+ struct kobject kobj;
+ int instance;
+ int position;
+ struct list_head list;
+ struct kobject *child;
+};
+
+/*
+ * Global list of dmi_sysfs_entry. Even though this should only be
+ * manipulated at setup and teardown, the lazy nature of the kobject
+ * system means we get lazy removes.
+ */
+static LIST_HEAD(entry_list);
+static DEFINE_SPINLOCK(entry_list_lock);
+
+/* dmi_sysfs_attribute - Top level attribute. used by all entries. */
+struct dmi_sysfs_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct dmi_sysfs_entry *entry, char *buf);
+};
+
+#define DMI_SYSFS_ATTR(_entry, _name) \
+struct dmi_sysfs_attribute dmi_sysfs_attr_##_entry##_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = 0400}, \
+ .show = dmi_sysfs_##_entry##_##_name, \
+}
+
+/*
+ * dmi_sysfs_mapped_attribute - Attribute where we require the entry be
+ * mapped in. Use in conjunction with dmi_sysfs_specialize_attr_ops.
+ */
+struct dmi_sysfs_mapped_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct dmi_sysfs_entry *entry,
+ const struct dmi_header *dh,
+ char *buf);
+};
+
+#define DMI_SYSFS_MAPPED_ATTR(_entry, _name) \
+struct dmi_sysfs_mapped_attribute dmi_sysfs_attr_##_entry##_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = 0400}, \
+ .show = dmi_sysfs_##_entry##_##_name, \
+}
+
+/*************************************************
+ * Generic DMI entry support.
+ *************************************************/
+static void dmi_entry_free(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static struct dmi_sysfs_entry *to_entry(struct kobject *kobj)
+{
+ return container_of(kobj, struct dmi_sysfs_entry, kobj);
+}
+
+static struct dmi_sysfs_attribute *to_attr(struct attribute *attr)
+{
+ return container_of(attr, struct dmi_sysfs_attribute, attr);
+}
+
+static ssize_t dmi_sysfs_attr_show(struct kobject *kobj,
+ struct attribute *_attr, char *buf)
+{
+ struct dmi_sysfs_entry *entry = to_entry(kobj);
+ struct dmi_sysfs_attribute *attr = to_attr(_attr);
+
+ /* DMI stuff is only ever admin visible */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ return attr->show(entry, buf);
+}
+
+static const struct sysfs_ops dmi_sysfs_attr_ops = {
+ .show = dmi_sysfs_attr_show,
+};
+
+typedef ssize_t (*dmi_callback)(struct dmi_sysfs_entry *,
+ const struct dmi_header *dh, void *);
+
+struct find_dmi_data {
+ struct dmi_sysfs_entry *entry;
+ dmi_callback callback;
+ void *private;
+ int instance_countdown;
+ ssize_t ret;
+};
+
+static void find_dmi_entry_helper(const struct dmi_header *dh,
+ void *_data)
+{
+ struct find_dmi_data *data = _data;
+ struct dmi_sysfs_entry *entry = data->entry;
+
+ /* Is this the entry we want? */
+ if (dh->type != entry->dh.type)
+ return;
+
+ if (data->instance_countdown != 0) {
+ /* try the next instance? */
+ data->instance_countdown--;
+ return;
+ }
+
+ /*
+ * Don't ever revisit the instance. Short circuit later
+ * instances by letting the instance_countdown run negative
+ */
+ data->instance_countdown--;
+
+ /* Found the entry */
+ data->ret = data->callback(entry, dh, data->private);
+}
+
+/* State for passing the read parameters through dmi_find_entry() */
+struct dmi_read_state {
+ char *buf;
+ loff_t pos;
+ size_t count;
+};
+
+static ssize_t find_dmi_entry(struct dmi_sysfs_entry *entry,
+ dmi_callback callback, void *private)
+{
+ struct find_dmi_data data = {
+ .entry = entry,
+ .callback = callback,
+ .private = private,
+ .instance_countdown = entry->instance,
+ .ret = -EIO, /* To signal the entry disappeared */
+ };
+ int ret;
+
+ ret = dmi_walk(find_dmi_entry_helper, &data);
+ /* This shouldn't happen, but just in case. */
+ if (ret)
+ return -EINVAL;
+ return data.ret;
+}
+
+/*
+ * Calculate and return the byte length of the dmi entry identified by
+ * dh. This includes both the formatted portion as well as the
+ * unformatted string space, including the two trailing nul characters.
+ */
+static size_t dmi_entry_length(const struct dmi_header *dh)
+{
+ const char *p = (const char *)dh;
+
+ p += dh->length;
+
+ while (p[0] || p[1])
+ p++;
+
+ return 2 + p - (const char *)dh;
+}
+
+/*************************************************
+ * Support bits for specialized DMI entry support
+ *************************************************/
+struct dmi_entry_attr_show_data {
+ struct attribute *attr;
+ char *buf;
+};
+
+static ssize_t dmi_entry_attr_show_helper(struct dmi_sysfs_entry *entry,
+ const struct dmi_header *dh,
+ void *_data)
+{
+ struct dmi_entry_attr_show_data *data = _data;
+ struct dmi_sysfs_mapped_attribute *attr;
+
+ attr = container_of(data->attr,
+ struct dmi_sysfs_mapped_attribute, attr);
+ return attr->show(entry, dh, data->buf);
+}
+
+static ssize_t dmi_entry_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct dmi_entry_attr_show_data data = {
+ .attr = attr,
+ .buf = buf,
+ };
+ /* Find the entry according to our parent and call the
+ * normalized show method hanging off of the attribute */
+ return find_dmi_entry(to_entry(kobj->parent),
+ dmi_entry_attr_show_helper, &data);
+}
+
+static const struct sysfs_ops dmi_sysfs_specialize_attr_ops = {
+ .show = dmi_entry_attr_show,
+};
+
+/*************************************************
+ * Specialized DMI entry support.
+ *************************************************/
+
+/*** Type 15 - System Event Table ***/
+
+#define DMI_SEL_ACCESS_METHOD_IO8 0x00
+#define DMI_SEL_ACCESS_METHOD_IO2x8 0x01
+#define DMI_SEL_ACCESS_METHOD_IO16 0x02
+#define DMI_SEL_ACCESS_METHOD_PHYS32 0x03
+#define DMI_SEL_ACCESS_METHOD_GPNV 0x04
+
+struct dmi_system_event_log {
+ struct dmi_header header;
+ u16 area_length;
+ u16 header_start_offset;
+ u16 data_start_offset;
+ u8 access_method;
+ u8 status;
+ u32 change_token;
+ union {
+ struct {
+ u16 index_addr;
+ u16 data_addr;
+ } io;
+ u32 phys_addr32;
+ u16 gpnv_handle;
+ u32 access_method_address;
+ };
+ u8 header_format;
+ u8 type_descriptors_supported_count;
+ u8 per_log_type_descriptor_length;
+ u8 supported_log_type_descriptos[];
+} __packed;
+
+#define DMI_SYSFS_SEL_FIELD(_field) \
+static ssize_t dmi_sysfs_sel_##_field(struct dmi_sysfs_entry *entry, \
+ const struct dmi_header *dh, \
+ char *buf) \
+{ \
+ struct dmi_system_event_log sel; \
+ if (sizeof(sel) > dmi_entry_length(dh)) \
+ return -EIO; \
+ memcpy(&sel, dh, sizeof(sel)); \
+ return sprintf(buf, "%u\n", sel._field); \
+} \
+static DMI_SYSFS_MAPPED_ATTR(sel, _field)
+
+DMI_SYSFS_SEL_FIELD(area_length);
+DMI_SYSFS_SEL_FIELD(header_start_offset);
+DMI_SYSFS_SEL_FIELD(data_start_offset);
+DMI_SYSFS_SEL_FIELD(access_method);
+DMI_SYSFS_SEL_FIELD(status);
+DMI_SYSFS_SEL_FIELD(change_token);
+DMI_SYSFS_SEL_FIELD(access_method_address);
+DMI_SYSFS_SEL_FIELD(header_format);
+DMI_SYSFS_SEL_FIELD(type_descriptors_supported_count);
+DMI_SYSFS_SEL_FIELD(per_log_type_descriptor_length);
+
+static struct attribute *dmi_sysfs_sel_attrs[] = {
+ &dmi_sysfs_attr_sel_area_length.attr,
+ &dmi_sysfs_attr_sel_header_start_offset.attr,
+ &dmi_sysfs_attr_sel_data_start_offset.attr,
+ &dmi_sysfs_attr_sel_access_method.attr,
+ &dmi_sysfs_attr_sel_status.attr,
+ &dmi_sysfs_attr_sel_change_token.attr,
+ &dmi_sysfs_attr_sel_access_method_address.attr,
+ &dmi_sysfs_attr_sel_header_format.attr,
+ &dmi_sysfs_attr_sel_type_descriptors_supported_count.attr,
+ &dmi_sysfs_attr_sel_per_log_type_descriptor_length.attr,
+ NULL,
+};
+
+
+static struct kobj_type dmi_system_event_log_ktype = {
+ .release = dmi_entry_free,
+ .sysfs_ops = &dmi_sysfs_specialize_attr_ops,
+ .default_attrs = dmi_sysfs_sel_attrs,
+};
+
+typedef u8 (*sel_io_reader)(const struct dmi_system_event_log *sel,
+ loff_t offset);
+
+static DEFINE_MUTEX(io_port_lock);
+
+static u8 read_sel_8bit_indexed_io(const struct dmi_system_event_log *sel,
+ loff_t offset)
+{
+ u8 ret;
+
+ mutex_lock(&io_port_lock);
+ outb((u8)offset, sel->io.index_addr);
+ ret = inb(sel->io.data_addr);
+ mutex_unlock(&io_port_lock);
+ return ret;
+}
+
+static u8 read_sel_2x8bit_indexed_io(const struct dmi_system_event_log *sel,
+ loff_t offset)
+{
+ u8 ret;
+
+ mutex_lock(&io_port_lock);
+ outb((u8)offset, sel->io.index_addr);
+ outb((u8)(offset >> 8), sel->io.index_addr + 1);
+ ret = inb(sel->io.data_addr);
+ mutex_unlock(&io_port_lock);
+ return ret;
+}
+
+static u8 read_sel_16bit_indexed_io(const struct dmi_system_event_log *sel,
+ loff_t offset)
+{
+ u8 ret;
+
+ mutex_lock(&io_port_lock);
+ outw((u16)offset, sel->io.index_addr);
+ ret = inb(sel->io.data_addr);
+ mutex_unlock(&io_port_lock);
+ return ret;
+}
+
+static sel_io_reader sel_io_readers[] = {
+ [DMI_SEL_ACCESS_METHOD_IO8] = read_sel_8bit_indexed_io,
+ [DMI_SEL_ACCESS_METHOD_IO2x8] = read_sel_2x8bit_indexed_io,
+ [DMI_SEL_ACCESS_METHOD_IO16] = read_sel_16bit_indexed_io,
+};
+
+static ssize_t dmi_sel_raw_read_io(struct dmi_sysfs_entry *entry,
+ const struct dmi_system_event_log *sel,
+ char *buf, loff_t pos, size_t count)
+{
+ ssize_t wrote = 0;
+
+ sel_io_reader io_reader = sel_io_readers[sel->access_method];
+
+ while (count && pos < sel->area_length) {
+ count--;
+ *(buf++) = io_reader(sel, pos++);
+ wrote++;
+ }
+
+ return wrote;
+}
+
+static ssize_t dmi_sel_raw_read_phys32(struct dmi_sysfs_entry *entry,
+ const struct dmi_system_event_log *sel,
+ char *buf, loff_t pos, size_t count)
+{
+ u8 __iomem *mapped;
+ ssize_t wrote = 0;
+
+ mapped = dmi_remap(sel->access_method_address, sel->area_length);
+ if (!mapped)
+ return -EIO;
+
+ while (count && pos < sel->area_length) {
+ count--;
+ *(buf++) = readb(mapped + pos++);
+ wrote++;
+ }
+
+ dmi_unmap(mapped);
+ return wrote;
+}
+
+static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry,
+ const struct dmi_header *dh,
+ void *_state)
+{
+ struct dmi_read_state *state = _state;
+ struct dmi_system_event_log sel;
+
+ if (sizeof(sel) > dmi_entry_length(dh))
+ return -EIO;
+
+ memcpy(&sel, dh, sizeof(sel));
+
+ switch (sel.access_method) {
+ case DMI_SEL_ACCESS_METHOD_IO8:
+ case DMI_SEL_ACCESS_METHOD_IO2x8:
+ case DMI_SEL_ACCESS_METHOD_IO16:
+ return dmi_sel_raw_read_io(entry, &sel, state->buf,
+ state->pos, state->count);
+ case DMI_SEL_ACCESS_METHOD_PHYS32:
+ return dmi_sel_raw_read_phys32(entry, &sel, state->buf,
+ state->pos, state->count);
+ case DMI_SEL_ACCESS_METHOD_GPNV:
+ pr_info("dmi-sysfs: GPNV support missing.\n");
+ return -EIO;
+ default:
+ pr_info("dmi-sysfs: Unknown access method %02x\n",
+ sel.access_method);
+ return -EIO;
+ }
+}
+
+static ssize_t dmi_sel_raw_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct dmi_sysfs_entry *entry = to_entry(kobj->parent);
+ struct dmi_read_state state = {
+ .buf = buf,
+ .pos = pos,
+ .count = count,
+ };
+
+ return find_dmi_entry(entry, dmi_sel_raw_read_helper, &state);
+}
+
+static struct bin_attribute dmi_sel_raw_attr = {
+ .attr = {.name = "raw_event_log", .mode = 0400},
+ .read = dmi_sel_raw_read,
+};
+
+static int dmi_system_event_log(struct dmi_sysfs_entry *entry)
+{
+ int ret;
+
+ entry->child = kzalloc(sizeof(*entry->child), GFP_KERNEL);
+ if (!entry->child)
+ return -ENOMEM;
+ ret = kobject_init_and_add(entry->child,
+ &dmi_system_event_log_ktype,
+ &entry->kobj,
+ "system_event_log");
+ if (ret)
+ goto out_free;
+
+ ret = sysfs_create_bin_file(entry->child, &dmi_sel_raw_attr);
+ if (ret)
+ goto out_del;
+
+ return 0;
+
+out_del:
+ kobject_del(entry->child);
+out_free:
+ kfree(entry->child);
+ return ret;
+}
+
+/*************************************************
+ * Generic DMI entry support.
+ *************************************************/
+
+static ssize_t dmi_sysfs_entry_length(struct dmi_sysfs_entry *entry, char *buf)
+{
+ return sprintf(buf, "%d\n", entry->dh.length);
+}
+
+static ssize_t dmi_sysfs_entry_handle(struct dmi_sysfs_entry *entry, char *buf)
+{
+ return sprintf(buf, "%d\n", entry->dh.handle);
+}
+
+static ssize_t dmi_sysfs_entry_type(struct dmi_sysfs_entry *entry, char *buf)
+{
+ return sprintf(buf, "%d\n", entry->dh.type);
+}
+
+static ssize_t dmi_sysfs_entry_instance(struct dmi_sysfs_entry *entry,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", entry->instance);
+}
+
+static ssize_t dmi_sysfs_entry_position(struct dmi_sysfs_entry *entry,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", entry->position);
+}
+
+static DMI_SYSFS_ATTR(entry, length);
+static DMI_SYSFS_ATTR(entry, handle);
+static DMI_SYSFS_ATTR(entry, type);
+static DMI_SYSFS_ATTR(entry, instance);
+static DMI_SYSFS_ATTR(entry, position);
+
+static struct attribute *dmi_sysfs_entry_attrs[] = {
+ &dmi_sysfs_attr_entry_length.attr,
+ &dmi_sysfs_attr_entry_handle.attr,
+ &dmi_sysfs_attr_entry_type.attr,
+ &dmi_sysfs_attr_entry_instance.attr,
+ &dmi_sysfs_attr_entry_position.attr,
+ NULL,
+};
+
+static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry,
+ const struct dmi_header *dh,
+ void *_state)
+{
+ struct dmi_read_state *state = _state;
+ size_t entry_length;
+
+ entry_length = dmi_entry_length(dh);
+
+ return memory_read_from_buffer(state->buf, state->count,
+ &state->pos, dh, entry_length);
+}
+
+static ssize_t dmi_entry_raw_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct dmi_sysfs_entry *entry = to_entry(kobj);
+ struct dmi_read_state state = {
+ .buf = buf,
+ .pos = pos,
+ .count = count,
+ };
+
+ return find_dmi_entry(entry, dmi_entry_raw_read_helper, &state);
+}
+
+static const struct bin_attribute dmi_entry_raw_attr = {
+ .attr = {.name = "raw", .mode = 0400},
+ .read = dmi_entry_raw_read,
+};
+
+static void dmi_sysfs_entry_release(struct kobject *kobj)
+{
+ struct dmi_sysfs_entry *entry = to_entry(kobj);
+
+ spin_lock(&entry_list_lock);
+ list_del(&entry->list);
+ spin_unlock(&entry_list_lock);
+ kfree(entry);
+}
+
+static struct kobj_type dmi_sysfs_entry_ktype = {
+ .release = dmi_sysfs_entry_release,
+ .sysfs_ops = &dmi_sysfs_attr_ops,
+ .default_attrs = dmi_sysfs_entry_attrs,
+};
+
+static struct kset *dmi_kset;
+
+/* Global count of all instances seen. Only for setup */
+static int __initdata instance_counts[MAX_ENTRY_TYPE + 1];
+
+/* Global positional count of all entries seen. Only for setup */
+static int __initdata position_count;
+
+static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
+ void *_ret)
+{
+ struct dmi_sysfs_entry *entry;
+ int *ret = _ret;
+
+ /* If a previous entry saw an error, short circuit */
+ if (*ret)
+ return;
+
+ /* Allocate and register a new entry into the entries set */
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ *ret = -ENOMEM;
+ return;
+ }
+
+ /* Set the key */
+ memcpy(&entry->dh, dh, sizeof(*dh));
+ entry->instance = instance_counts[dh->type]++;
+ entry->position = position_count++;
+
+ entry->kobj.kset = dmi_kset;
+ *ret = kobject_init_and_add(&entry->kobj, &dmi_sysfs_entry_ktype, NULL,
+ "%d-%d", dh->type, entry->instance);
+
+ if (*ret) {
+ kobject_put(&entry->kobj);
+ return;
+ }
+
+ /* Thread on the global list for cleanup */
+ spin_lock(&entry_list_lock);
+ list_add_tail(&entry->list, &entry_list);
+ spin_unlock(&entry_list_lock);
+
+ /* Handle specializations by type */
+ switch (dh->type) {
+ case DMI_ENTRY_SYSTEM_EVENT_LOG:
+ *ret = dmi_system_event_log(entry);
+ break;
+ default:
+ /* No specialization */
+ break;
+ }
+ if (*ret)
+ goto out_err;
+
+ /* Create the raw binary file to access the entry */
+ *ret = sysfs_create_bin_file(&entry->kobj, &dmi_entry_raw_attr);
+ if (*ret)
+ goto out_err;
+
+ return;
+out_err:
+ kobject_put(entry->child);
+ kobject_put(&entry->kobj);
+ return;
+}
+
+static void cleanup_entry_list(void)
+{
+ struct dmi_sysfs_entry *entry, *next;
+
+ /* No locks, we are on our way out */
+ list_for_each_entry_safe(entry, next, &entry_list, list) {
+ kobject_put(entry->child);
+ kobject_put(&entry->kobj);
+ }
+}
+
+static int __init dmi_sysfs_init(void)
+{
+ int error;
+ int val;
+
+ if (!dmi_kobj) {
+ pr_debug("dmi-sysfs: dmi entry is absent.\n");
+ error = -ENODATA;
+ goto err;
+ }
+
+ dmi_kset = kset_create_and_add("entries", NULL, dmi_kobj);
+ if (!dmi_kset) {
+ error = -ENOMEM;
+ goto err;
+ }
+
+ val = 0;
+ error = dmi_walk(dmi_sysfs_register_handle, &val);
+ if (error)
+ goto err;
+ if (val) {
+ error = val;
+ goto err;
+ }
+
+ pr_debug("dmi-sysfs: loaded.\n");
+
+ return 0;
+err:
+ cleanup_entry_list();
+ kset_unregister(dmi_kset);
+ return error;
+}
+
+/* clean up everything. */
+static void __exit dmi_sysfs_exit(void)
+{
+ pr_debug("dmi-sysfs: unloading.\n");
+ cleanup_entry_list();
+ kset_unregister(dmi_kset);
+}
+
+module_init(dmi_sysfs_init);
+module_exit(dmi_sysfs_exit);
+
+MODULE_AUTHOR("Mike Waychison <mikew@google.com>");
+MODULE_DESCRIPTION("DMI sysfs support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
new file mode 100644
index 000000000..d51ca0428
--- /dev/null
+++ b/drivers/firmware/dmi_scan.c
@@ -0,0 +1,1203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/memblock.h>
+#include <linux/random.h>
+#include <asm/dmi.h>
+#include <asm/unaligned.h>
+
+#ifndef SMBIOS_ENTRY_POINT_SCAN_START
+#define SMBIOS_ENTRY_POINT_SCAN_START 0xF0000
+#endif
+
+struct kobject *dmi_kobj;
+EXPORT_SYMBOL_GPL(dmi_kobj);
+
+/*
+ * DMI stands for "Desktop Management Interface". It is part
+ * of and an antecedent to, SMBIOS, which stands for System
+ * Management BIOS. See further: https://www.dmtf.org/standards
+ */
+static const char dmi_empty_string[] = "";
+
+static u32 dmi_ver __initdata;
+static u32 dmi_len;
+static u16 dmi_num;
+static u8 smbios_entry_point[32];
+static int smbios_entry_point_size;
+
+/* DMI system identification string used during boot */
+static char dmi_ids_string[128] __initdata;
+
+static struct dmi_memdev_info {
+ const char *device;
+ const char *bank;
+ u64 size; /* bytes */
+ u16 handle;
+ u8 type; /* DDR2, DDR3, DDR4 etc */
+} *dmi_memdev;
+static int dmi_memdev_nr;
+
+static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
+{
+ const u8 *bp = ((u8 *) dm) + dm->length;
+ const u8 *nsp;
+
+ if (s) {
+ while (--s > 0 && *bp)
+ bp += strlen(bp) + 1;
+
+ /* Strings containing only spaces are considered empty */
+ nsp = bp;
+ while (*nsp == ' ')
+ nsp++;
+ if (*nsp != '\0')
+ return bp;
+ }
+
+ return dmi_empty_string;
+}
+
+static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
+{
+ const char *bp = dmi_string_nosave(dm, s);
+ char *str;
+ size_t len;
+
+ if (bp == dmi_empty_string)
+ return dmi_empty_string;
+
+ len = strlen(bp) + 1;
+ str = dmi_alloc(len);
+ if (str != NULL)
+ strcpy(str, bp);
+
+ return str;
+}
+
+/*
+ * We have to be cautious here. We have seen BIOSes with DMI pointers
+ * pointing to completely the wrong place for example
+ */
+static void dmi_decode_table(u8 *buf,
+ void (*decode)(const struct dmi_header *, void *),
+ void *private_data)
+{
+ u8 *data = buf;
+ int i = 0;
+
+ /*
+ * Stop when we have seen all the items the table claimed to have
+ * (SMBIOS < 3.0 only) OR we reach an end-of-table marker (SMBIOS
+ * >= 3.0 only) OR we run off the end of the table (should never
+ * happen but sometimes does on bogus implementations.)
+ */
+ while ((!dmi_num || i < dmi_num) &&
+ (data - buf + sizeof(struct dmi_header)) <= dmi_len) {
+ const struct dmi_header *dm = (const struct dmi_header *)data;
+
+ /*
+ * We want to know the total length (formatted area and
+ * strings) before decoding to make sure we won't run off the
+ * table in dmi_decode or dmi_string
+ */
+ data += dm->length;
+ while ((data - buf < dmi_len - 1) && (data[0] || data[1]))
+ data++;
+ if (data - buf < dmi_len - 1)
+ decode(dm, private_data);
+
+ data += 2;
+ i++;
+
+ /*
+ * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
+ * For tables behind a 64-bit entry point, we have no item
+ * count and no exact table length, so stop on end-of-table
+ * marker. For tables behind a 32-bit entry point, we have
+ * seen OEM structures behind the end-of-table marker on
+ * some systems, so don't trust it.
+ */
+ if (!dmi_num && dm->type == DMI_ENTRY_END_OF_TABLE)
+ break;
+ }
+
+ /* Trim DMI table length if needed */
+ if (dmi_len > data - buf)
+ dmi_len = data - buf;
+}
+
+static phys_addr_t dmi_base;
+
+static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
+ void *))
+{
+ u8 *buf;
+ u32 orig_dmi_len = dmi_len;
+
+ buf = dmi_early_remap(dmi_base, orig_dmi_len);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ dmi_decode_table(buf, decode, NULL);
+
+ add_device_randomness(buf, dmi_len);
+
+ dmi_early_unmap(buf, orig_dmi_len);
+ return 0;
+}
+
+static int __init dmi_checksum(const u8 *buf, u8 len)
+{
+ u8 sum = 0;
+ int a;
+
+ for (a = 0; a < len; a++)
+ sum += buf[a];
+
+ return sum == 0;
+}
+
+static const char *dmi_ident[DMI_STRING_MAX];
+static LIST_HEAD(dmi_devices);
+int dmi_available;
+
+/*
+ * Save a DMI string
+ */
+static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
+ int string)
+{
+ const char *d = (const char *) dm;
+ const char *p;
+
+ if (dmi_ident[slot] || dm->length <= string)
+ return;
+
+ p = dmi_string(dm, d[string]);
+ if (p == NULL)
+ return;
+
+ dmi_ident[slot] = p;
+}
+
+static void __init dmi_save_release(const struct dmi_header *dm, int slot,
+ int index)
+{
+ const u8 *minor, *major;
+ char *s;
+
+ /* If the table doesn't have the field, let's return */
+ if (dmi_ident[slot] || dm->length < index)
+ return;
+
+ minor = (u8 *) dm + index;
+ major = (u8 *) dm + index - 1;
+
+ /* As per the spec, if the system doesn't support this field,
+ * the value is FF
+ */
+ if (*major == 0xFF && *minor == 0xFF)
+ return;
+
+ s = dmi_alloc(8);
+ if (!s)
+ return;
+
+ sprintf(s, "%u.%u", *major, *minor);
+
+ dmi_ident[slot] = s;
+}
+
+static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
+ int index)
+{
+ const u8 *d;
+ char *s;
+ int is_ff = 1, is_00 = 1, i;
+
+ if (dmi_ident[slot] || dm->length < index + 16)
+ return;
+
+ d = (u8 *) dm + index;
+ for (i = 0; i < 16 && (is_ff || is_00); i++) {
+ if (d[i] != 0x00)
+ is_00 = 0;
+ if (d[i] != 0xFF)
+ is_ff = 0;
+ }
+
+ if (is_ff || is_00)
+ return;
+
+ s = dmi_alloc(16*2+4+1);
+ if (!s)
+ return;
+
+ /*
+ * As of version 2.6 of the SMBIOS specification, the first 3 fields of
+ * the UUID are supposed to be little-endian encoded. The specification
+ * says that this is the defacto standard.
+ */
+ if (dmi_ver >= 0x020600)
+ sprintf(s, "%pUl", d);
+ else
+ sprintf(s, "%pUb", d);
+
+ dmi_ident[slot] = s;
+}
+
+static void __init dmi_save_type(const struct dmi_header *dm, int slot,
+ int index)
+{
+ const u8 *d;
+ char *s;
+
+ if (dmi_ident[slot] || dm->length <= index)
+ return;
+
+ s = dmi_alloc(4);
+ if (!s)
+ return;
+
+ d = (u8 *) dm + index;
+ sprintf(s, "%u", *d & 0x7F);
+ dmi_ident[slot] = s;
+}
+
+static void __init dmi_save_one_device(int type, const char *name)
+{
+ struct dmi_device *dev;
+
+ /* No duplicate device */
+ if (dmi_find_device(type, name, NULL))
+ return;
+
+ dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
+ if (!dev)
+ return;
+
+ dev->type = type;
+ strcpy((char *)(dev + 1), name);
+ dev->name = (char *)(dev + 1);
+ dev->device_data = NULL;
+ list_add(&dev->list, &dmi_devices);
+}
+
+static void __init dmi_save_devices(const struct dmi_header *dm)
+{
+ int i, count = (dm->length - sizeof(struct dmi_header)) / 2;
+
+ for (i = 0; i < count; i++) {
+ const char *d = (char *)(dm + 1) + (i * 2);
+
+ /* Skip disabled device */
+ if ((*d & 0x80) == 0)
+ continue;
+
+ dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d + 1)));
+ }
+}
+
+static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
+{
+ int i, count;
+ struct dmi_device *dev;
+
+ if (dm->length < 0x05)
+ return;
+
+ count = *(u8 *)(dm + 1);
+ for (i = 1; i <= count; i++) {
+ const char *devname = dmi_string(dm, i);
+
+ if (devname == dmi_empty_string)
+ continue;
+
+ dev = dmi_alloc(sizeof(*dev));
+ if (!dev)
+ break;
+
+ dev->type = DMI_DEV_TYPE_OEM_STRING;
+ dev->name = devname;
+ dev->device_data = NULL;
+
+ list_add(&dev->list, &dmi_devices);
+ }
+}
+
+static void __init dmi_save_ipmi_device(const struct dmi_header *dm)
+{
+ struct dmi_device *dev;
+ void *data;
+
+ data = dmi_alloc(dm->length);
+ if (data == NULL)
+ return;
+
+ memcpy(data, dm, dm->length);
+
+ dev = dmi_alloc(sizeof(*dev));
+ if (!dev)
+ return;
+
+ dev->type = DMI_DEV_TYPE_IPMI;
+ dev->name = "IPMI controller";
+ dev->device_data = data;
+
+ list_add_tail(&dev->list, &dmi_devices);
+}
+
+static void __init dmi_save_dev_pciaddr(int instance, int segment, int bus,
+ int devfn, const char *name, int type)
+{
+ struct dmi_dev_onboard *dev;
+
+ /* Ignore invalid values */
+ if (type == DMI_DEV_TYPE_DEV_SLOT &&
+ segment == 0xFFFF && bus == 0xFF && devfn == 0xFF)
+ return;
+
+ dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
+ if (!dev)
+ return;
+
+ dev->instance = instance;
+ dev->segment = segment;
+ dev->bus = bus;
+ dev->devfn = devfn;
+
+ strcpy((char *)&dev[1], name);
+ dev->dev.type = type;
+ dev->dev.name = (char *)&dev[1];
+ dev->dev.device_data = dev;
+
+ list_add(&dev->dev.list, &dmi_devices);
+}
+
+static void __init dmi_save_extended_devices(const struct dmi_header *dm)
+{
+ const char *name;
+ const u8 *d = (u8 *)dm;
+
+ if (dm->length < 0x0B)
+ return;
+
+ /* Skip disabled device */
+ if ((d[0x5] & 0x80) == 0)
+ return;
+
+ name = dmi_string_nosave(dm, d[0x4]);
+ dmi_save_dev_pciaddr(d[0x6], *(u16 *)(d + 0x7), d[0x9], d[0xA], name,
+ DMI_DEV_TYPE_DEV_ONBOARD);
+ dmi_save_one_device(d[0x5] & 0x7f, name);
+}
+
+static void __init dmi_save_system_slot(const struct dmi_header *dm)
+{
+ const u8 *d = (u8 *)dm;
+
+ /* Need SMBIOS 2.6+ structure */
+ if (dm->length < 0x11)
+ return;
+ dmi_save_dev_pciaddr(*(u16 *)(d + 0x9), *(u16 *)(d + 0xD), d[0xF],
+ d[0x10], dmi_string_nosave(dm, d[0x4]),
+ DMI_DEV_TYPE_DEV_SLOT);
+}
+
+static void __init count_mem_devices(const struct dmi_header *dm, void *v)
+{
+ if (dm->type != DMI_ENTRY_MEM_DEVICE)
+ return;
+ dmi_memdev_nr++;
+}
+
+static void __init save_mem_devices(const struct dmi_header *dm, void *v)
+{
+ const char *d = (const char *)dm;
+ static int nr;
+ u64 bytes;
+ u16 size;
+
+ if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x13)
+ return;
+ if (nr >= dmi_memdev_nr) {
+ pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
+ return;
+ }
+ dmi_memdev[nr].handle = get_unaligned(&dm->handle);
+ dmi_memdev[nr].device = dmi_string(dm, d[0x10]);
+ dmi_memdev[nr].bank = dmi_string(dm, d[0x11]);
+ dmi_memdev[nr].type = d[0x12];
+
+ size = get_unaligned((u16 *)&d[0xC]);
+ if (size == 0)
+ bytes = 0;
+ else if (size == 0xffff)
+ bytes = ~0ull;
+ else if (size & 0x8000)
+ bytes = (u64)(size & 0x7fff) << 10;
+ else if (size != 0x7fff || dm->length < 0x20)
+ bytes = (u64)size << 20;
+ else
+ bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20;
+
+ dmi_memdev[nr].size = bytes;
+ nr++;
+}
+
+static void __init dmi_memdev_walk(void)
+{
+ if (dmi_walk_early(count_mem_devices) == 0 && dmi_memdev_nr) {
+ dmi_memdev = dmi_alloc(sizeof(*dmi_memdev) * dmi_memdev_nr);
+ if (dmi_memdev)
+ dmi_walk_early(save_mem_devices);
+ }
+}
+
+/*
+ * Process a DMI table entry. Right now all we care about are the BIOS
+ * and machine entries. For 2.5 we should pull the smbus controller info
+ * out of here.
+ */
+static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
+{
+ switch (dm->type) {
+ case 0: /* BIOS Information */
+ dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
+ dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
+ dmi_save_ident(dm, DMI_BIOS_DATE, 8);
+ dmi_save_release(dm, DMI_BIOS_RELEASE, 21);
+ dmi_save_release(dm, DMI_EC_FIRMWARE_RELEASE, 23);
+ break;
+ case 1: /* System Information */
+ dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
+ dmi_save_ident(dm, DMI_PRODUCT_NAME, 5);
+ dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
+ dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
+ dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
+ dmi_save_ident(dm, DMI_PRODUCT_SKU, 25);
+ dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
+ break;
+ case 2: /* Base Board Information */
+ dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
+ dmi_save_ident(dm, DMI_BOARD_NAME, 5);
+ dmi_save_ident(dm, DMI_BOARD_VERSION, 6);
+ dmi_save_ident(dm, DMI_BOARD_SERIAL, 7);
+ dmi_save_ident(dm, DMI_BOARD_ASSET_TAG, 8);
+ break;
+ case 3: /* Chassis Information */
+ dmi_save_ident(dm, DMI_CHASSIS_VENDOR, 4);
+ dmi_save_type(dm, DMI_CHASSIS_TYPE, 5);
+ dmi_save_ident(dm, DMI_CHASSIS_VERSION, 6);
+ dmi_save_ident(dm, DMI_CHASSIS_SERIAL, 7);
+ dmi_save_ident(dm, DMI_CHASSIS_ASSET_TAG, 8);
+ break;
+ case 9: /* System Slots */
+ dmi_save_system_slot(dm);
+ break;
+ case 10: /* Onboard Devices Information */
+ dmi_save_devices(dm);
+ break;
+ case 11: /* OEM Strings */
+ dmi_save_oem_strings_devices(dm);
+ break;
+ case 38: /* IPMI Device Information */
+ dmi_save_ipmi_device(dm);
+ break;
+ case 41: /* Onboard Devices Extended Information */
+ dmi_save_extended_devices(dm);
+ }
+}
+
+static int __init print_filtered(char *buf, size_t len, const char *info)
+{
+ int c = 0;
+ const char *p;
+
+ if (!info)
+ return c;
+
+ for (p = info; *p; p++)
+ if (isprint(*p))
+ c += scnprintf(buf + c, len - c, "%c", *p);
+ else
+ c += scnprintf(buf + c, len - c, "\\x%02x", *p & 0xff);
+ return c;
+}
+
+static void __init dmi_format_ids(char *buf, size_t len)
+{
+ int c = 0;
+ const char *board; /* Board Name is optional */
+
+ c += print_filtered(buf + c, len - c,
+ dmi_get_system_info(DMI_SYS_VENDOR));
+ c += scnprintf(buf + c, len - c, " ");
+ c += print_filtered(buf + c, len - c,
+ dmi_get_system_info(DMI_PRODUCT_NAME));
+
+ board = dmi_get_system_info(DMI_BOARD_NAME);
+ if (board) {
+ c += scnprintf(buf + c, len - c, "/");
+ c += print_filtered(buf + c, len - c, board);
+ }
+ c += scnprintf(buf + c, len - c, ", BIOS ");
+ c += print_filtered(buf + c, len - c,
+ dmi_get_system_info(DMI_BIOS_VERSION));
+ c += scnprintf(buf + c, len - c, " ");
+ c += print_filtered(buf + c, len - c,
+ dmi_get_system_info(DMI_BIOS_DATE));
+}
+
+/*
+ * Check for DMI/SMBIOS headers in the system firmware image. Any
+ * SMBIOS header must start 16 bytes before the DMI header, so take a
+ * 32 byte buffer and check for DMI at offset 16 and SMBIOS at offset
+ * 0. If the DMI header is present, set dmi_ver accordingly (SMBIOS
+ * takes precedence) and return 0. Otherwise return 1.
+ */
+static int __init dmi_present(const u8 *buf)
+{
+ u32 smbios_ver;
+
+ if (memcmp(buf, "_SM_", 4) == 0 &&
+ buf[5] < 32 && dmi_checksum(buf, buf[5])) {
+ smbios_ver = get_unaligned_be16(buf + 6);
+ smbios_entry_point_size = buf[5];
+ memcpy(smbios_entry_point, buf, smbios_entry_point_size);
+
+ /* Some BIOS report weird SMBIOS version, fix that up */
+ switch (smbios_ver) {
+ case 0x021F:
+ case 0x0221:
+ pr_debug("SMBIOS version fixup (2.%d->2.%d)\n",
+ smbios_ver & 0xFF, 3);
+ smbios_ver = 0x0203;
+ break;
+ case 0x0233:
+ pr_debug("SMBIOS version fixup (2.%d->2.%d)\n", 51, 6);
+ smbios_ver = 0x0206;
+ break;
+ }
+ } else {
+ smbios_ver = 0;
+ }
+
+ buf += 16;
+
+ if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) {
+ if (smbios_ver)
+ dmi_ver = smbios_ver;
+ else
+ dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F);
+ dmi_ver <<= 8;
+ dmi_num = get_unaligned_le16(buf + 12);
+ dmi_len = get_unaligned_le16(buf + 6);
+ dmi_base = get_unaligned_le32(buf + 8);
+
+ if (dmi_walk_early(dmi_decode) == 0) {
+ if (smbios_ver) {
+ pr_info("SMBIOS %d.%d present.\n",
+ dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
+ } else {
+ smbios_entry_point_size = 15;
+ memcpy(smbios_entry_point, buf,
+ smbios_entry_point_size);
+ pr_info("Legacy DMI %d.%d present.\n",
+ dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
+ }
+ dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
+ pr_info("DMI: %s\n", dmi_ids_string);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/*
+ * Check for the SMBIOS 3.0 64-bit entry point signature. Unlike the legacy
+ * 32-bit entry point, there is no embedded DMI header (_DMI_) in here.
+ */
+static int __init dmi_smbios3_present(const u8 *buf)
+{
+ if (memcmp(buf, "_SM3_", 5) == 0 &&
+ buf[6] < 32 && dmi_checksum(buf, buf[6])) {
+ dmi_ver = get_unaligned_be32(buf + 6) & 0xFFFFFF;
+ dmi_num = 0; /* No longer specified */
+ dmi_len = get_unaligned_le32(buf + 12);
+ dmi_base = get_unaligned_le64(buf + 16);
+ smbios_entry_point_size = buf[6];
+ memcpy(smbios_entry_point, buf, smbios_entry_point_size);
+
+ if (dmi_walk_early(dmi_decode) == 0) {
+ pr_info("SMBIOS %d.%d.%d present.\n",
+ dmi_ver >> 16, (dmi_ver >> 8) & 0xFF,
+ dmi_ver & 0xFF);
+ dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
+ pr_info("DMI: %s\n", dmi_ids_string);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static void __init dmi_scan_machine(void)
+{
+ char __iomem *p, *q;
+ char buf[32];
+
+ if (efi_enabled(EFI_CONFIG_TABLES)) {
+ /*
+ * According to the DMTF SMBIOS reference spec v3.0.0, it is
+ * allowed to define both the 64-bit entry point (smbios3) and
+ * the 32-bit entry point (smbios), in which case they should
+ * either both point to the same SMBIOS structure table, or the
+ * table pointed to by the 64-bit entry point should contain a
+ * superset of the table contents pointed to by the 32-bit entry
+ * point (section 5.2)
+ * This implies that the 64-bit entry point should have
+ * precedence if it is defined and supported by the OS. If we
+ * have the 64-bit entry point, but fail to decode it, fall
+ * back to the legacy one (if available)
+ */
+ if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) {
+ p = dmi_early_remap(efi.smbios3, 32);
+ if (p == NULL)
+ goto error;
+ memcpy_fromio(buf, p, 32);
+ dmi_early_unmap(p, 32);
+
+ if (!dmi_smbios3_present(buf)) {
+ dmi_available = 1;
+ return;
+ }
+ }
+ if (efi.smbios == EFI_INVALID_TABLE_ADDR)
+ goto error;
+
+ /* This is called as a core_initcall() because it isn't
+ * needed during early boot. This also means we can
+ * iounmap the space when we're done with it.
+ */
+ p = dmi_early_remap(efi.smbios, 32);
+ if (p == NULL)
+ goto error;
+ memcpy_fromio(buf, p, 32);
+ dmi_early_unmap(p, 32);
+
+ if (!dmi_present(buf)) {
+ dmi_available = 1;
+ return;
+ }
+ } else if (IS_ENABLED(CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK)) {
+ p = dmi_early_remap(SMBIOS_ENTRY_POINT_SCAN_START, 0x10000);
+ if (p == NULL)
+ goto error;
+
+ /*
+ * Same logic as above, look for a 64-bit entry point
+ * first, and if not found, fall back to 32-bit entry point.
+ */
+ memcpy_fromio(buf, p, 16);
+ for (q = p + 16; q < p + 0x10000; q += 16) {
+ memcpy_fromio(buf + 16, q, 16);
+ if (!dmi_smbios3_present(buf)) {
+ dmi_available = 1;
+ dmi_early_unmap(p, 0x10000);
+ return;
+ }
+ memcpy(buf, buf + 16, 16);
+ }
+
+ /*
+ * Iterate over all possible DMI header addresses q.
+ * Maintain the 32 bytes around q in buf. On the
+ * first iteration, substitute zero for the
+ * out-of-range bytes so there is no chance of falsely
+ * detecting an SMBIOS header.
+ */
+ memset(buf, 0, 16);
+ for (q = p; q < p + 0x10000; q += 16) {
+ memcpy_fromio(buf + 16, q, 16);
+ if (!dmi_present(buf)) {
+ dmi_available = 1;
+ dmi_early_unmap(p, 0x10000);
+ return;
+ }
+ memcpy(buf, buf + 16, 16);
+ }
+ dmi_early_unmap(p, 0x10000);
+ }
+ error:
+ pr_info("DMI not present or invalid.\n");
+}
+
+static ssize_t raw_table_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count)
+{
+ memcpy(buf, attr->private + pos, count);
+ return count;
+}
+
+static BIN_ATTR(smbios_entry_point, S_IRUSR, raw_table_read, NULL, 0);
+static BIN_ATTR(DMI, S_IRUSR, raw_table_read, NULL, 0);
+
+static int __init dmi_init(void)
+{
+ struct kobject *tables_kobj;
+ u8 *dmi_table;
+ int ret = -ENOMEM;
+
+ if (!dmi_available)
+ return 0;
+
+ /*
+ * Set up dmi directory at /sys/firmware/dmi. This entry should stay
+ * even after farther error, as it can be used by other modules like
+ * dmi-sysfs.
+ */
+ dmi_kobj = kobject_create_and_add("dmi", firmware_kobj);
+ if (!dmi_kobj)
+ goto err;
+
+ tables_kobj = kobject_create_and_add("tables", dmi_kobj);
+ if (!tables_kobj)
+ goto err;
+
+ dmi_table = dmi_remap(dmi_base, dmi_len);
+ if (!dmi_table)
+ goto err_tables;
+
+ bin_attr_smbios_entry_point.size = smbios_entry_point_size;
+ bin_attr_smbios_entry_point.private = smbios_entry_point;
+ ret = sysfs_create_bin_file(tables_kobj, &bin_attr_smbios_entry_point);
+ if (ret)
+ goto err_unmap;
+
+ bin_attr_DMI.size = dmi_len;
+ bin_attr_DMI.private = dmi_table;
+ ret = sysfs_create_bin_file(tables_kobj, &bin_attr_DMI);
+ if (!ret)
+ return 0;
+
+ sysfs_remove_bin_file(tables_kobj,
+ &bin_attr_smbios_entry_point);
+ err_unmap:
+ dmi_unmap(dmi_table);
+ err_tables:
+ kobject_del(tables_kobj);
+ kobject_put(tables_kobj);
+ err:
+ pr_err("dmi: Firmware registration failed.\n");
+
+ return ret;
+}
+subsys_initcall(dmi_init);
+
+/**
+ * dmi_setup - scan and setup DMI system information
+ *
+ * Scan the DMI system information. This setups DMI identifiers
+ * (dmi_system_id) for printing it out on task dumps and prepares
+ * DIMM entry information (dmi_memdev_info) from the SMBIOS table
+ * for using this when reporting memory errors.
+ */
+void __init dmi_setup(void)
+{
+ dmi_scan_machine();
+ if (!dmi_available)
+ return;
+
+ dmi_memdev_walk();
+ dump_stack_set_arch_desc("%s", dmi_ids_string);
+}
+
+/**
+ * dmi_matches - check if dmi_system_id structure matches system DMI data
+ * @dmi: pointer to the dmi_system_id structure to check
+ */
+static bool dmi_matches(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dmi->matches); i++) {
+ int s = dmi->matches[i].slot;
+ if (s == DMI_NONE)
+ break;
+ if (s == DMI_OEM_STRING) {
+ /* DMI_OEM_STRING must be exact match */
+ const struct dmi_device *valid;
+
+ valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING,
+ dmi->matches[i].substr, NULL);
+ if (valid)
+ continue;
+ } else if (dmi_ident[s]) {
+ if (dmi->matches[i].exact_match) {
+ if (!strcmp(dmi_ident[s],
+ dmi->matches[i].substr))
+ continue;
+ } else {
+ if (strstr(dmi_ident[s],
+ dmi->matches[i].substr))
+ continue;
+ }
+ }
+
+ /* No match */
+ return false;
+ }
+ return true;
+}
+
+/**
+ * dmi_is_end_of_table - check for end-of-table marker
+ * @dmi: pointer to the dmi_system_id structure to check
+ */
+static bool dmi_is_end_of_table(const struct dmi_system_id *dmi)
+{
+ return dmi->matches[0].slot == DMI_NONE;
+}
+
+/**
+ * dmi_check_system - check system DMI data
+ * @list: array of dmi_system_id structures to match against
+ * All non-null elements of the list must match
+ * their slot's (field index's) data (i.e., each
+ * list string must be a substring of the specified
+ * DMI slot's string data) to be considered a
+ * successful match.
+ *
+ * Walk the blacklist table running matching functions until someone
+ * returns non zero or we hit the end. Callback function is called for
+ * each successful match. Returns the number of matches.
+ *
+ * dmi_setup must be called before this function is called.
+ */
+int dmi_check_system(const struct dmi_system_id *list)
+{
+ int count = 0;
+ const struct dmi_system_id *d;
+
+ for (d = list; !dmi_is_end_of_table(d); d++)
+ if (dmi_matches(d)) {
+ count++;
+ if (d->callback && d->callback(d))
+ break;
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(dmi_check_system);
+
+/**
+ * dmi_first_match - find dmi_system_id structure matching system DMI data
+ * @list: array of dmi_system_id structures to match against
+ * All non-null elements of the list must match
+ * their slot's (field index's) data (i.e., each
+ * list string must be a substring of the specified
+ * DMI slot's string data) to be considered a
+ * successful match.
+ *
+ * Walk the blacklist table until the first match is found. Return the
+ * pointer to the matching entry or NULL if there's no match.
+ *
+ * dmi_setup must be called before this function is called.
+ */
+const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list)
+{
+ const struct dmi_system_id *d;
+
+ for (d = list; !dmi_is_end_of_table(d); d++)
+ if (dmi_matches(d))
+ return d;
+
+ return NULL;
+}
+EXPORT_SYMBOL(dmi_first_match);
+
+/**
+ * dmi_get_system_info - return DMI data value
+ * @field: data index (see enum dmi_field)
+ *
+ * Returns one DMI data value, can be used to perform
+ * complex DMI data checks.
+ */
+const char *dmi_get_system_info(int field)
+{
+ return dmi_ident[field];
+}
+EXPORT_SYMBOL(dmi_get_system_info);
+
+/**
+ * dmi_name_in_serial - Check if string is in the DMI product serial information
+ * @str: string to check for
+ */
+int dmi_name_in_serial(const char *str)
+{
+ int f = DMI_PRODUCT_SERIAL;
+ if (dmi_ident[f] && strstr(dmi_ident[f], str))
+ return 1;
+ return 0;
+}
+
+/**
+ * dmi_name_in_vendors - Check if string is in the DMI system or board vendor name
+ * @str: Case sensitive Name
+ */
+int dmi_name_in_vendors(const char *str)
+{
+ static int fields[] = { DMI_SYS_VENDOR, DMI_BOARD_VENDOR, DMI_NONE };
+ int i;
+ for (i = 0; fields[i] != DMI_NONE; i++) {
+ int f = fields[i];
+ if (dmi_ident[f] && strstr(dmi_ident[f], str))
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(dmi_name_in_vendors);
+
+/**
+ * dmi_find_device - find onboard device by type/name
+ * @type: device type or %DMI_DEV_TYPE_ANY to match all device types
+ * @name: device name string or %NULL to match all
+ * @from: previous device found in search, or %NULL for new search.
+ *
+ * Iterates through the list of known onboard devices. If a device is
+ * found with a matching @type and @name, a pointer to its device
+ * structure is returned. Otherwise, %NULL is returned.
+ * A new search is initiated by passing %NULL as the @from argument.
+ * If @from is not %NULL, searches continue from next device.
+ */
+const struct dmi_device *dmi_find_device(int type, const char *name,
+ const struct dmi_device *from)
+{
+ const struct list_head *head = from ? &from->list : &dmi_devices;
+ struct list_head *d;
+
+ for (d = head->next; d != &dmi_devices; d = d->next) {
+ const struct dmi_device *dev =
+ list_entry(d, struct dmi_device, list);
+
+ if (((type == DMI_DEV_TYPE_ANY) || (dev->type == type)) &&
+ ((name == NULL) || (strcmp(dev->name, name) == 0)))
+ return dev;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(dmi_find_device);
+
+/**
+ * dmi_get_date - parse a DMI date
+ * @field: data index (see enum dmi_field)
+ * @yearp: optional out parameter for the year
+ * @monthp: optional out parameter for the month
+ * @dayp: optional out parameter for the day
+ *
+ * The date field is assumed to be in the form resembling
+ * [mm[/dd]]/yy[yy] and the result is stored in the out
+ * parameters any or all of which can be omitted.
+ *
+ * If the field doesn't exist, all out parameters are set to zero
+ * and false is returned. Otherwise, true is returned with any
+ * invalid part of date set to zero.
+ *
+ * On return, year, month and day are guaranteed to be in the
+ * range of [0,9999], [0,12] and [0,31] respectively.
+ */
+bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
+{
+ int year = 0, month = 0, day = 0;
+ bool exists;
+ const char *s, *y;
+ char *e;
+
+ s = dmi_get_system_info(field);
+ exists = s;
+ if (!exists)
+ goto out;
+
+ /*
+ * Determine year first. We assume the date string resembles
+ * mm/dd/yy[yy] but the original code extracted only the year
+ * from the end. Keep the behavior in the spirit of no
+ * surprises.
+ */
+ y = strrchr(s, '/');
+ if (!y)
+ goto out;
+
+ y++;
+ year = simple_strtoul(y, &e, 10);
+ if (y != e && year < 100) { /* 2-digit year */
+ year += 1900;
+ if (year < 1996) /* no dates < spec 1.0 */
+ year += 100;
+ }
+ if (year > 9999) /* year should fit in %04d */
+ year = 0;
+
+ /* parse the mm and dd */
+ month = simple_strtoul(s, &e, 10);
+ if (s == e || *e != '/' || !month || month > 12) {
+ month = 0;
+ goto out;
+ }
+
+ s = e + 1;
+ day = simple_strtoul(s, &e, 10);
+ if (s == y || s == e || *e != '/' || day > 31)
+ day = 0;
+out:
+ if (yearp)
+ *yearp = year;
+ if (monthp)
+ *monthp = month;
+ if (dayp)
+ *dayp = day;
+ return exists;
+}
+EXPORT_SYMBOL(dmi_get_date);
+
+/**
+ * dmi_get_bios_year - get a year out of DMI_BIOS_DATE field
+ *
+ * Returns year on success, -ENXIO if DMI is not selected,
+ * or a different negative error code if DMI field is not present
+ * or not parseable.
+ */
+int dmi_get_bios_year(void)
+{
+ bool exists;
+ int year;
+
+ exists = dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL);
+ if (!exists)
+ return -ENODATA;
+
+ return year ? year : -ERANGE;
+}
+EXPORT_SYMBOL(dmi_get_bios_year);
+
+/**
+ * dmi_walk - Walk the DMI table and get called back for every record
+ * @decode: Callback function
+ * @private_data: Private data to be passed to the callback function
+ *
+ * Returns 0 on success, -ENXIO if DMI is not selected or not present,
+ * or a different negative error code if DMI walking fails.
+ */
+int dmi_walk(void (*decode)(const struct dmi_header *, void *),
+ void *private_data)
+{
+ u8 *buf;
+
+ if (!dmi_available)
+ return -ENXIO;
+
+ buf = dmi_remap(dmi_base, dmi_len);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ dmi_decode_table(buf, decode, private_data);
+
+ dmi_unmap(buf);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dmi_walk);
+
+/**
+ * dmi_match - compare a string to the dmi field (if exists)
+ * @f: DMI field identifier
+ * @str: string to compare the DMI field to
+ *
+ * Returns true if the requested field equals to the str (including NULL).
+ */
+bool dmi_match(enum dmi_field f, const char *str)
+{
+ const char *info = dmi_get_system_info(f);
+
+ if (info == NULL || str == NULL)
+ return info == str;
+
+ return !strcmp(info, str);
+}
+EXPORT_SYMBOL_GPL(dmi_match);
+
+void dmi_memdev_name(u16 handle, const char **bank, const char **device)
+{
+ int n;
+
+ if (dmi_memdev == NULL)
+ return;
+
+ for (n = 0; n < dmi_memdev_nr; n++) {
+ if (handle == dmi_memdev[n].handle) {
+ *bank = dmi_memdev[n].bank;
+ *device = dmi_memdev[n].device;
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_name);
+
+u64 dmi_memdev_size(u16 handle)
+{
+ int n;
+
+ if (dmi_memdev) {
+ for (n = 0; n < dmi_memdev_nr; n++) {
+ if (handle == dmi_memdev[n].handle)
+ return dmi_memdev[n].size;
+ }
+ }
+ return ~0ull;
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_size);
+
+/**
+ * dmi_memdev_type - get the memory type
+ * @handle: DMI structure handle
+ *
+ * Return the DMI memory type of the module in the slot associated with the
+ * given DMI handle, or 0x0 if no such DMI handle exists.
+ */
+u8 dmi_memdev_type(u16 handle)
+{
+ int n;
+
+ if (dmi_memdev) {
+ for (n = 0; n < dmi_memdev_nr; n++) {
+ if (handle == dmi_memdev[n].handle)
+ return dmi_memdev[n].type;
+ }
+ }
+ return 0x0; /* Not a valid value */
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_type);
+
+/**
+ * dmi_memdev_handle - get the DMI handle of a memory slot
+ * @slot: slot number
+ *
+ * Return the DMI handle associated with a given memory slot, or %0xFFFF
+ * if there is no such slot.
+ */
+u16 dmi_memdev_handle(int slot)
+{
+ if (dmi_memdev && slot >= 0 && slot < dmi_memdev_nr)
+ return dmi_memdev[slot].handle;
+
+ return 0xffff; /* Not a valid value */
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_handle);
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
new file mode 100644
index 000000000..14d0970a7
--- /dev/null
+++ b/drivers/firmware/edd.c
@@ -0,0 +1,790 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/firmware/edd.c
+ * Copyright (C) 2002, 2003, 2004 Dell Inc.
+ * by Matt Domsch <Matt_Domsch@dell.com>
+ * disk signature by Matt Domsch, Andrew Wilks, and Sandeep K. Shandilya
+ * legacy CHS by Patrick J. LoPresti <patl@users.sourceforge.net>
+ *
+ * BIOS Enhanced Disk Drive Services (EDD)
+ * conformant to T13 Committee www.t13.org
+ * projects 1572D, 1484D, 1386D, 1226DT
+ *
+ * This code takes information provided by BIOS EDD calls
+ * fn41 - Check Extensions Present and
+ * fn48 - Get Device Parameters with EDD extensions
+ * made in setup.S, copied to safe structures in setup.c,
+ * and presents it in sysfs.
+ *
+ * Please see http://linux.dell.com/edd/results.html for
+ * the list of BIOSs which have been reported to implement EDD.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/limits.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/edd.h>
+
+#define EDD_VERSION "0.16"
+#define EDD_DATE "2004-Jun-25"
+
+MODULE_AUTHOR("Matt Domsch <Matt_Domsch@Dell.com>");
+MODULE_DESCRIPTION("sysfs interface to BIOS EDD information");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(EDD_VERSION);
+
+#define left (PAGE_SIZE - (p - buf) - 1)
+
+struct edd_device {
+ unsigned int index;
+ unsigned int mbr_signature;
+ struct edd_info *info;
+ struct kobject kobj;
+};
+
+struct edd_attribute {
+ struct attribute attr;
+ ssize_t(*show) (struct edd_device * edev, char *buf);
+ int (*test) (struct edd_device * edev);
+};
+
+/* forward declarations */
+static int edd_dev_is_type(struct edd_device *edev, const char *type);
+static struct pci_dev *edd_get_pci_dev(struct edd_device *edev);
+
+static struct edd_device *edd_devices[EDD_MBR_SIG_MAX];
+
+#define EDD_DEVICE_ATTR(_name,_mode,_show,_test) \
+struct edd_attribute edd_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .test = _test, \
+};
+
+static int
+edd_has_mbr_signature(struct edd_device *edev)
+{
+ return edev->index < min_t(unsigned char, edd.mbr_signature_nr, EDD_MBR_SIG_MAX);
+}
+
+static int
+edd_has_edd_info(struct edd_device *edev)
+{
+ return edev->index < min_t(unsigned char, edd.edd_info_nr, EDDMAXNR);
+}
+
+static inline struct edd_info *
+edd_dev_get_info(struct edd_device *edev)
+{
+ return edev->info;
+}
+
+static inline void
+edd_dev_set_info(struct edd_device *edev, int i)
+{
+ edev->index = i;
+ if (edd_has_mbr_signature(edev))
+ edev->mbr_signature = edd.mbr_signature[i];
+ if (edd_has_edd_info(edev))
+ edev->info = &edd.edd_info[i];
+}
+
+#define to_edd_attr(_attr) container_of(_attr,struct edd_attribute,attr)
+#define to_edd_device(obj) container_of(obj,struct edd_device,kobj)
+
+static ssize_t
+edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
+{
+ struct edd_device *dev = to_edd_device(kobj);
+ struct edd_attribute *edd_attr = to_edd_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (edd_attr->show)
+ ret = edd_attr->show(dev, buf);
+ return ret;
+}
+
+static const struct sysfs_ops edd_attr_ops = {
+ .show = edd_attr_show,
+};
+
+static ssize_t
+edd_show_host_bus(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ int i;
+
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ for (i = 0; i < 4; i++) {
+ if (isprint(info->params.host_bus_type[i])) {
+ p += scnprintf(p, left, "%c", info->params.host_bus_type[i]);
+ } else {
+ p += scnprintf(p, left, " ");
+ }
+ }
+
+ if (!strncmp(info->params.host_bus_type, "ISA", 3)) {
+ p += scnprintf(p, left, "\tbase_address: %x\n",
+ info->params.interface_path.isa.base_address);
+ } else if (!strncmp(info->params.host_bus_type, "PCIX", 4) ||
+ !strncmp(info->params.host_bus_type, "PCI", 3) ||
+ !strncmp(info->params.host_bus_type, "XPRS", 4)) {
+ p += scnprintf(p, left,
+ "\t%02x:%02x.%d channel: %u\n",
+ info->params.interface_path.pci.bus,
+ info->params.interface_path.pci.slot,
+ info->params.interface_path.pci.function,
+ info->params.interface_path.pci.channel);
+ } else if (!strncmp(info->params.host_bus_type, "IBND", 4) ||
+ !strncmp(info->params.host_bus_type, "HTPT", 4)) {
+ p += scnprintf(p, left,
+ "\tTBD: %llx\n",
+ info->params.interface_path.ibnd.reserved);
+
+ } else {
+ p += scnprintf(p, left, "\tunknown: %llx\n",
+ info->params.interface_path.unknown.reserved);
+ }
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_interface(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ int i;
+
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ for (i = 0; i < 8; i++) {
+ if (isprint(info->params.interface_type[i])) {
+ p += scnprintf(p, left, "%c", info->params.interface_type[i]);
+ } else {
+ p += scnprintf(p, left, " ");
+ }
+ }
+ if (!strncmp(info->params.interface_type, "ATAPI", 5)) {
+ p += scnprintf(p, left, "\tdevice: %u lun: %u\n",
+ info->params.device_path.atapi.device,
+ info->params.device_path.atapi.lun);
+ } else if (!strncmp(info->params.interface_type, "ATA", 3)) {
+ p += scnprintf(p, left, "\tdevice: %u\n",
+ info->params.device_path.ata.device);
+ } else if (!strncmp(info->params.interface_type, "SCSI", 4)) {
+ p += scnprintf(p, left, "\tid: %u lun: %llu\n",
+ info->params.device_path.scsi.id,
+ info->params.device_path.scsi.lun);
+ } else if (!strncmp(info->params.interface_type, "USB", 3)) {
+ p += scnprintf(p, left, "\tserial_number: %llx\n",
+ info->params.device_path.usb.serial_number);
+ } else if (!strncmp(info->params.interface_type, "1394", 4)) {
+ p += scnprintf(p, left, "\teui: %llx\n",
+ info->params.device_path.i1394.eui);
+ } else if (!strncmp(info->params.interface_type, "FIBRE", 5)) {
+ p += scnprintf(p, left, "\twwid: %llx lun: %llx\n",
+ info->params.device_path.fibre.wwid,
+ info->params.device_path.fibre.lun);
+ } else if (!strncmp(info->params.interface_type, "I2O", 3)) {
+ p += scnprintf(p, left, "\tidentity_tag: %llx\n",
+ info->params.device_path.i2o.identity_tag);
+ } else if (!strncmp(info->params.interface_type, "RAID", 4)) {
+ p += scnprintf(p, left, "\tidentity_tag: %x\n",
+ info->params.device_path.raid.array_number);
+ } else if (!strncmp(info->params.interface_type, "SATA", 4)) {
+ p += scnprintf(p, left, "\tdevice: %u\n",
+ info->params.device_path.sata.device);
+ } else {
+ p += scnprintf(p, left, "\tunknown: %llx %llx\n",
+ info->params.device_path.unknown.reserved1,
+ info->params.device_path.unknown.reserved2);
+ }
+
+ return (p - buf);
+}
+
+/**
+ * edd_show_raw_data() - copies raw data to buffer for userspace to parse
+ * @edev: target edd_device
+ * @buf: output buffer
+ *
+ * Returns: number of bytes written, or -EINVAL on failure
+ */
+static ssize_t
+edd_show_raw_data(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ ssize_t len = sizeof (info->params);
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE))
+ len = info->params.length;
+
+ /* In case of buggy BIOSs */
+ if (len > (sizeof(info->params)))
+ len = sizeof(info->params);
+
+ memcpy(buf, &info->params, len);
+ return len;
+}
+
+static ssize_t
+edd_show_version(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ p += scnprintf(p, left, "0x%02x\n", info->version);
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_mbr_signature(struct edd_device *edev, char *buf)
+{
+ char *p = buf;
+ p += scnprintf(p, left, "0x%08x\n", edev->mbr_signature);
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_extensions(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ if (info->interface_support & EDD_EXT_FIXED_DISK_ACCESS) {
+ p += scnprintf(p, left, "Fixed disk access\n");
+ }
+ if (info->interface_support & EDD_EXT_DEVICE_LOCKING_AND_EJECTING) {
+ p += scnprintf(p, left, "Device locking and ejecting\n");
+ }
+ if (info->interface_support & EDD_EXT_ENHANCED_DISK_DRIVE_SUPPORT) {
+ p += scnprintf(p, left, "Enhanced Disk Drive support\n");
+ }
+ if (info->interface_support & EDD_EXT_64BIT_EXTENSIONS) {
+ p += scnprintf(p, left, "64-bit extensions\n");
+ }
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_info_flags(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ if (info->params.info_flags & EDD_INFO_DMA_BOUNDARY_ERROR_TRANSPARENT)
+ p += scnprintf(p, left, "DMA boundary error transparent\n");
+ if (info->params.info_flags & EDD_INFO_GEOMETRY_VALID)
+ p += scnprintf(p, left, "geometry valid\n");
+ if (info->params.info_flags & EDD_INFO_REMOVABLE)
+ p += scnprintf(p, left, "removable\n");
+ if (info->params.info_flags & EDD_INFO_WRITE_VERIFY)
+ p += scnprintf(p, left, "write verify\n");
+ if (info->params.info_flags & EDD_INFO_MEDIA_CHANGE_NOTIFICATION)
+ p += scnprintf(p, left, "media change notification\n");
+ if (info->params.info_flags & EDD_INFO_LOCKABLE)
+ p += scnprintf(p, left, "lockable\n");
+ if (info->params.info_flags & EDD_INFO_NO_MEDIA_PRESENT)
+ p += scnprintf(p, left, "no media present\n");
+ if (info->params.info_flags & EDD_INFO_USE_INT13_FN50)
+ p += scnprintf(p, left, "use int13 fn50\n");
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_legacy_max_cylinder(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ p += scnprintf(p, left, "%u\n", info->legacy_max_cylinder);
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_legacy_max_head(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ p += scnprintf(p, left, "%u\n", info->legacy_max_head);
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_legacy_sectors_per_track(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ p += scnprintf(p, left, "%u\n", info->legacy_sectors_per_track);
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_default_cylinders(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ p += scnprintf(p, left, "%u\n", info->params.num_default_cylinders);
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_default_heads(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ p += scnprintf(p, left, "%u\n", info->params.num_default_heads);
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_default_sectors_per_track(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ p += scnprintf(p, left, "%u\n", info->params.sectors_per_track);
+ return (p - buf);
+}
+
+static ssize_t
+edd_show_sectors(struct edd_device *edev, char *buf)
+{
+ struct edd_info *info;
+ char *p = buf;
+ if (!edev)
+ return -EINVAL;
+ info = edd_dev_get_info(edev);
+ if (!info || !buf)
+ return -EINVAL;
+
+ p += scnprintf(p, left, "%llu\n", info->params.number_of_sectors);
+ return (p - buf);
+}
+
+
+/*
+ * Some device instances may not have all the above attributes,
+ * or the attribute values may be meaningless (i.e. if
+ * the device is < EDD 3.0, it won't have host_bus and interface
+ * information), so don't bother making files for them. Likewise
+ * if the default_{cylinders,heads,sectors_per_track} values
+ * are zero, the BIOS doesn't provide sane values, don't bother
+ * creating files for them either.
+ */
+
+static int
+edd_has_legacy_max_cylinder(struct edd_device *edev)
+{
+ struct edd_info *info;
+ if (!edev)
+ return 0;
+ info = edd_dev_get_info(edev);
+ if (!info)
+ return 0;
+ return info->legacy_max_cylinder > 0;
+}
+
+static int
+edd_has_legacy_max_head(struct edd_device *edev)
+{
+ struct edd_info *info;
+ if (!edev)
+ return 0;
+ info = edd_dev_get_info(edev);
+ if (!info)
+ return 0;
+ return info->legacy_max_head > 0;
+}
+
+static int
+edd_has_legacy_sectors_per_track(struct edd_device *edev)
+{
+ struct edd_info *info;
+ if (!edev)
+ return 0;
+ info = edd_dev_get_info(edev);
+ if (!info)
+ return 0;
+ return info->legacy_sectors_per_track > 0;
+}
+
+static int
+edd_has_default_cylinders(struct edd_device *edev)
+{
+ struct edd_info *info;
+ if (!edev)
+ return 0;
+ info = edd_dev_get_info(edev);
+ if (!info)
+ return 0;
+ return info->params.num_default_cylinders > 0;
+}
+
+static int
+edd_has_default_heads(struct edd_device *edev)
+{
+ struct edd_info *info;
+ if (!edev)
+ return 0;
+ info = edd_dev_get_info(edev);
+ if (!info)
+ return 0;
+ return info->params.num_default_heads > 0;
+}
+
+static int
+edd_has_default_sectors_per_track(struct edd_device *edev)
+{
+ struct edd_info *info;
+ if (!edev)
+ return 0;
+ info = edd_dev_get_info(edev);
+ if (!info)
+ return 0;
+ return info->params.sectors_per_track > 0;
+}
+
+static int
+edd_has_edd30(struct edd_device *edev)
+{
+ struct edd_info *info;
+ int i;
+ u8 csum = 0;
+
+ if (!edev)
+ return 0;
+ info = edd_dev_get_info(edev);
+ if (!info)
+ return 0;
+
+ if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE)) {
+ return 0;
+ }
+
+
+ /* We support only T13 spec */
+ if (info->params.device_path_info_length != 44)
+ return 0;
+
+ for (i = 30; i < info->params.device_path_info_length + 30; i++)
+ csum += *(((u8 *)&info->params) + i);
+
+ if (csum)
+ return 0;
+
+ return 1;
+}
+
+
+static EDD_DEVICE_ATTR(raw_data, 0444, edd_show_raw_data, edd_has_edd_info);
+static EDD_DEVICE_ATTR(version, 0444, edd_show_version, edd_has_edd_info);
+static EDD_DEVICE_ATTR(extensions, 0444, edd_show_extensions, edd_has_edd_info);
+static EDD_DEVICE_ATTR(info_flags, 0444, edd_show_info_flags, edd_has_edd_info);
+static EDD_DEVICE_ATTR(sectors, 0444, edd_show_sectors, edd_has_edd_info);
+static EDD_DEVICE_ATTR(legacy_max_cylinder, 0444,
+ edd_show_legacy_max_cylinder,
+ edd_has_legacy_max_cylinder);
+static EDD_DEVICE_ATTR(legacy_max_head, 0444, edd_show_legacy_max_head,
+ edd_has_legacy_max_head);
+static EDD_DEVICE_ATTR(legacy_sectors_per_track, 0444,
+ edd_show_legacy_sectors_per_track,
+ edd_has_legacy_sectors_per_track);
+static EDD_DEVICE_ATTR(default_cylinders, 0444, edd_show_default_cylinders,
+ edd_has_default_cylinders);
+static EDD_DEVICE_ATTR(default_heads, 0444, edd_show_default_heads,
+ edd_has_default_heads);
+static EDD_DEVICE_ATTR(default_sectors_per_track, 0444,
+ edd_show_default_sectors_per_track,
+ edd_has_default_sectors_per_track);
+static EDD_DEVICE_ATTR(interface, 0444, edd_show_interface, edd_has_edd30);
+static EDD_DEVICE_ATTR(host_bus, 0444, edd_show_host_bus, edd_has_edd30);
+static EDD_DEVICE_ATTR(mbr_signature, 0444, edd_show_mbr_signature, edd_has_mbr_signature);
+
+
+/* These are default attributes that are added for every edd
+ * device discovered. There are none.
+ */
+static struct attribute * def_attrs[] = {
+ NULL,
+};
+
+/* These attributes are conditional and only added for some devices. */
+static struct edd_attribute * edd_attrs[] = {
+ &edd_attr_raw_data,
+ &edd_attr_version,
+ &edd_attr_extensions,
+ &edd_attr_info_flags,
+ &edd_attr_sectors,
+ &edd_attr_legacy_max_cylinder,
+ &edd_attr_legacy_max_head,
+ &edd_attr_legacy_sectors_per_track,
+ &edd_attr_default_cylinders,
+ &edd_attr_default_heads,
+ &edd_attr_default_sectors_per_track,
+ &edd_attr_interface,
+ &edd_attr_host_bus,
+ &edd_attr_mbr_signature,
+ NULL,
+};
+
+/**
+ * edd_release - free edd structure
+ * @kobj: kobject of edd structure
+ *
+ * This is called when the refcount of the edd structure
+ * reaches 0. This should happen right after we unregister,
+ * but just in case, we use the release callback anyway.
+ */
+
+static void edd_release(struct kobject * kobj)
+{
+ struct edd_device * dev = to_edd_device(kobj);
+ kfree(dev);
+}
+
+static struct kobj_type edd_ktype = {
+ .release = edd_release,
+ .sysfs_ops = &edd_attr_ops,
+ .default_attrs = def_attrs,
+};
+
+static struct kset *edd_kset;
+
+
+/**
+ * edd_dev_is_type() - is this EDD device a 'type' device?
+ * @edev: target edd_device
+ * @type: a host bus or interface identifier string per the EDD spec
+ *
+ * Returns 1 (TRUE) if it is a 'type' device, 0 otherwise.
+ */
+static int
+edd_dev_is_type(struct edd_device *edev, const char *type)
+{
+ struct edd_info *info;
+ if (!edev)
+ return 0;
+ info = edd_dev_get_info(edev);
+
+ if (type && info) {
+ if (!strncmp(info->params.host_bus_type, type, strlen(type)) ||
+ !strncmp(info->params.interface_type, type, strlen(type)))
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * edd_get_pci_dev() - finds pci_dev that matches edev
+ * @edev: edd_device
+ *
+ * Returns pci_dev if found, or NULL
+ */
+static struct pci_dev *
+edd_get_pci_dev(struct edd_device *edev)
+{
+ struct edd_info *info = edd_dev_get_info(edev);
+
+ if (edd_dev_is_type(edev, "PCI") || edd_dev_is_type(edev, "XPRS")) {
+ return pci_get_domain_bus_and_slot(0,
+ info->params.interface_path.pci.bus,
+ PCI_DEVFN(info->params.interface_path.pci.slot,
+ info->params.interface_path.pci.function));
+ }
+ return NULL;
+}
+
+static int
+edd_create_symlink_to_pcidev(struct edd_device *edev)
+{
+
+ struct pci_dev *pci_dev = edd_get_pci_dev(edev);
+ int ret;
+ if (!pci_dev)
+ return 1;
+ ret = sysfs_create_link(&edev->kobj,&pci_dev->dev.kobj,"pci_dev");
+ pci_dev_put(pci_dev);
+ return ret;
+}
+
+static inline void
+edd_device_unregister(struct edd_device *edev)
+{
+ kobject_put(&edev->kobj);
+}
+
+static void edd_populate_dir(struct edd_device * edev)
+{
+ struct edd_attribute * attr;
+ int error = 0;
+ int i;
+
+ for (i = 0; (attr = edd_attrs[i]) && !error; i++) {
+ if (!attr->test ||
+ (attr->test && attr->test(edev)))
+ error = sysfs_create_file(&edev->kobj,&attr->attr);
+ }
+
+ if (!error) {
+ edd_create_symlink_to_pcidev(edev);
+ }
+}
+
+static int
+edd_device_register(struct edd_device *edev, int i)
+{
+ int error;
+
+ if (!edev)
+ return 1;
+ edd_dev_set_info(edev, i);
+ edev->kobj.kset = edd_kset;
+ error = kobject_init_and_add(&edev->kobj, &edd_ktype, NULL,
+ "int13_dev%02x", 0x80 + i);
+ if (!error) {
+ edd_populate_dir(edev);
+ kobject_uevent(&edev->kobj, KOBJ_ADD);
+ }
+ return error;
+}
+
+static inline int edd_num_devices(void)
+{
+ return max_t(unsigned char,
+ min_t(unsigned char, EDD_MBR_SIG_MAX, edd.mbr_signature_nr),
+ min_t(unsigned char, EDDMAXNR, edd.edd_info_nr));
+}
+
+/**
+ * edd_init() - creates sysfs tree of EDD data
+ */
+static int __init
+edd_init(void)
+{
+ int i;
+ int rc=0;
+ struct edd_device *edev;
+
+ if (!edd_num_devices())
+ return -ENODEV;
+
+ printk(KERN_INFO "BIOS EDD facility v%s %s, %d devices found\n",
+ EDD_VERSION, EDD_DATE, edd_num_devices());
+
+ edd_kset = kset_create_and_add("edd", NULL, firmware_kobj);
+ if (!edd_kset)
+ return -ENOMEM;
+
+ for (i = 0; i < edd_num_devices(); i++) {
+ edev = kzalloc(sizeof (*edev), GFP_KERNEL);
+ if (!edev) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = edd_device_register(edev, i);
+ if (rc) {
+ kfree(edev);
+ goto out;
+ }
+ edd_devices[i] = edev;
+ }
+
+ return 0;
+
+out:
+ while (--i >= 0)
+ edd_device_unregister(edd_devices[i]);
+ kset_unregister(edd_kset);
+ return rc;
+}
+
+static void __exit
+edd_exit(void)
+{
+ int i;
+ struct edd_device *edev;
+
+ for (i = 0; i < edd_num_devices(); i++) {
+ if ((edev = edd_devices[i]))
+ edd_device_unregister(edev);
+ }
+ kset_unregister(edd_kset);
+}
+
+late_initcall(edd_init);
+module_exit(edd_exit);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
new file mode 100644
index 000000000..d9895491f
--- /dev/null
+++ b/drivers/firmware/efi/Kconfig
@@ -0,0 +1,286 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "EFI (Extensible Firmware Interface) Support"
+ depends on EFI
+
+config EFI_VARS
+ tristate "EFI Variable Support via sysfs"
+ depends on EFI && (X86 || IA64)
+ default n
+ help
+ If you say Y here, you are able to get EFI (Extensible Firmware
+ Interface) variable information via sysfs. You may read,
+ write, create, and destroy EFI variables through this interface.
+ Note that this driver is only retained for compatibility with
+ legacy users: new users should use the efivarfs filesystem
+ instead.
+
+config EFI_ESRT
+ bool
+ depends on EFI && !IA64
+ default y
+
+config EFI_VARS_PSTORE
+ tristate "Register efivars backend for pstore"
+ depends on PSTORE
+ default y
+ help
+ Say Y here to enable use efivars as a backend to pstore. This
+ will allow writing console messages, crash dumps, or anything
+ else supported by pstore to EFI variables.
+
+config EFI_VARS_PSTORE_DEFAULT_DISABLE
+ bool "Disable using efivars as a pstore backend by default"
+ depends on EFI_VARS_PSTORE
+ default n
+ help
+ Saying Y here will disable the use of efivars as a storage
+ backend for pstore by default. This setting can be overridden
+ using the efivars module's pstore_disable parameter.
+
+config EFI_RUNTIME_MAP
+ bool "Export efi runtime maps to sysfs"
+ depends on X86 && EFI && KEXEC_CORE
+ default y
+ help
+ Export efi runtime memory maps to /sys/firmware/efi/runtime-map.
+ That memory map is used for example by kexec to set up efi virtual
+ mapping the 2nd kernel, but can also be used for debugging purposes.
+
+ See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map.
+
+config EFI_FAKE_MEMMAP
+ bool "Enable EFI fake memory map"
+ depends on EFI && X86
+ default n
+ help
+ Saying Y here will enable "efi_fake_mem" boot option.
+ By specifying this parameter, you can add arbitrary attribute
+ to specific memory range by updating original (firmware provided)
+ EFI memmap.
+ This is useful for debugging of EFI memmap related feature.
+ e.g. Address Range Mirroring feature.
+
+config EFI_MAX_FAKE_MEM
+ int "maximum allowable number of ranges in efi_fake_mem boot option"
+ depends on EFI_FAKE_MEMMAP
+ range 1 128
+ default 8
+ help
+ Maximum allowable number of ranges in efi_fake_mem boot option.
+ Ranges can be set up to this value using comma-separated list.
+ The default value is 8.
+
+config EFI_SOFT_RESERVE
+ bool "Reserve EFI Specific Purpose Memory"
+ depends on EFI && EFI_STUB && ACPI_HMAT
+ default ACPI_HMAT
+ help
+ On systems that have mixed performance classes of memory EFI
+ may indicate specific purpose memory with an attribute (See
+ EFI_MEMORY_SP in UEFI 2.8). A memory range tagged with this
+ attribute may have unique performance characteristics compared
+ to the system's general purpose "System RAM" pool. On the
+ expectation that such memory has application specific usage,
+ and its base EFI memory type is "conventional" answer Y to
+ arrange for the kernel to reserve it as a "Soft Reserved"
+ resource, and set aside for direct-access (device-dax) by
+ default. The memory range can later be optionally assigned to
+ the page allocator by system administrator policy via the
+ device-dax kmem facility. Say N to have the kernel treat this
+ memory as "System RAM" by default.
+
+ If unsure, say Y.
+
+config EFI_PARAMS_FROM_FDT
+ bool
+ help
+ Select this config option from the architecture Kconfig if
+ the EFI runtime support gets system table address, memory
+ map address, and other parameters from the device tree.
+
+config EFI_RUNTIME_WRAPPERS
+ bool
+
+config EFI_GENERIC_STUB
+ bool
+
+config EFI_ARMSTUB_DTB_LOADER
+ bool "Enable the DTB loader"
+ depends on EFI_GENERIC_STUB && !RISCV
+ default y
+ help
+ Select this config option to add support for the dtb= command
+ line parameter, allowing a device tree blob to be loaded into
+ memory from the EFI System Partition by the stub.
+
+ If the device tree is provided by the platform or by
+ the bootloader this option may not be needed.
+ But, for various development reasons and to maintain existing
+ functionality for bootloaders that do not have such support
+ this option is necessary.
+
+config EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER
+ bool "Enable the command line initrd loader" if !X86
+ depends on EFI_STUB && (EFI_GENERIC_STUB || X86)
+ default y
+ depends on !RISCV
+ help
+ Select this config option to add support for the initrd= command
+ line parameter, allowing an initrd that resides on the same volume
+ as the kernel image to be loaded into memory.
+
+ This method is deprecated.
+
+config EFI_BOOTLOADER_CONTROL
+ tristate "EFI Bootloader Control"
+ default n
+ help
+ This module installs a reboot hook, such that if reboot() is
+ invoked with a string argument NNN, "NNN" is copied to the
+ "LoaderEntryOneShot" EFI variable, to be read by the
+ bootloader. If the string matches one of the boot labels
+ defined in its configuration, the bootloader will boot once
+ to that label. The "LoaderEntryRebootReason" EFI variable is
+ set with the reboot reason: "reboot" or "shutdown". The
+ bootloader reads this reboot reason and takes particular
+ action according to its policy.
+
+config EFI_CAPSULE_LOADER
+ tristate "EFI capsule loader"
+ depends on EFI
+ help
+ This option exposes a loader interface "/dev/efi_capsule_loader" for
+ users to load EFI capsules. This driver requires working runtime
+ capsule support in the firmware, which many OEMs do not provide.
+
+ Most users should say N.
+
+config EFI_CAPSULE_QUIRK_QUARK_CSH
+ bool "Add support for Quark capsules with non-standard headers"
+ depends on X86 && !64BIT
+ select EFI_CAPSULE_LOADER
+ default y
+ help
+ Add support for processing Quark X1000 EFI capsules, whose header
+ layout deviates from the layout mandated by the UEFI specification.
+
+config EFI_TEST
+ tristate "EFI Runtime Service Tests Support"
+ depends on EFI
+ default n
+ help
+ This driver uses the efi.<service> function pointers directly instead
+ of going through the efivar API, because it is not trying to test the
+ kernel subsystem, just for testing the UEFI runtime service
+ interfaces which are provided by the firmware. This driver is used
+ by the Firmware Test Suite (FWTS) for testing the UEFI runtime
+ interfaces readiness of the firmware.
+ Details for FWTS are available from:
+ <https://wiki.ubuntu.com/FirmwareTestSuite>
+
+ Say Y here to enable the runtime services support via /dev/efi_test.
+ If unsure, say N.
+
+config APPLE_PROPERTIES
+ bool "Apple Device Properties"
+ depends on EFI_STUB && X86
+ select EFI_DEV_PATH_PARSER
+ select UCS2_STRING
+ help
+ Retrieve properties from EFI on Apple Macs and assign them to
+ devices, allowing for improved support of Apple hardware.
+ Properties that would otherwise be missing include the
+ Thunderbolt Device ROM and GPU configuration data.
+
+ If unsure, say Y if you have a Mac. Otherwise N.
+
+config RESET_ATTACK_MITIGATION
+ bool "Reset memory attack mitigation"
+ depends on EFI_STUB
+ help
+ Request that the firmware clear the contents of RAM after a reboot
+ using the TCG Platform Reset Attack Mitigation specification. This
+ protects against an attacker forcibly rebooting the system while it
+ still contains secrets in RAM, booting another OS and extracting the
+ secrets. This should only be enabled when userland is configured to
+ clear the MemoryOverwriteRequest flag on clean shutdown after secrets
+ have been evicted, since otherwise it will trigger even on clean
+ reboots.
+
+config EFI_RCI2_TABLE
+ bool "EFI Runtime Configuration Interface Table Version 2 Support"
+ depends on X86 || COMPILE_TEST
+ help
+ Displays the content of the Runtime Configuration Interface
+ Table version 2 on Dell EMC PowerEdge systems as a binary
+ attribute 'rci2' under /sys/firmware/efi/tables directory.
+
+ RCI2 table contains BIOS HII in XML format and is used to populate
+ BIOS setup page in Dell EMC OpenManage Server Administrator tool.
+ The BIOS setup page contains BIOS tokens which can be configured.
+
+ Say Y here for Dell EMC PowerEdge systems.
+
+config EFI_DISABLE_PCI_DMA
+ bool "Clear Busmaster bit on PCI bridges during ExitBootServices()"
+ help
+ Disable the busmaster bit in the control register on all PCI bridges
+ while calling ExitBootServices() and passing control to the runtime
+ kernel. System firmware may configure the IOMMU to prevent malicious
+ PCI devices from being able to attack the OS via DMA. However, since
+ firmware can't guarantee that the OS is IOMMU-aware, it will tear
+ down IOMMU configuration when ExitBootServices() is called. This
+ leaves a window between where a hostile device could still cause
+ damage before Linux configures the IOMMU again.
+
+ If you say Y here, the EFI stub will clear the busmaster bit on all
+ PCI bridges before ExitBootServices() is called. This will prevent
+ any malicious PCI devices from being able to perform DMA until the
+ kernel reenables busmastering after configuring the IOMMU.
+
+ This option will cause failures with some poorly behaved hardware
+ and should not be enabled without testing. The kernel commandline
+ options "efi=disable_early_pci_dma" or "efi=no_disable_early_pci_dma"
+ may be used to override this option.
+
+endmenu
+
+config EFI_EMBEDDED_FIRMWARE
+ bool
+ depends on EFI
+ select CRYPTO_LIB_SHA256
+
+config UEFI_CPER
+ bool
+
+config UEFI_CPER_ARM
+ bool
+ depends on UEFI_CPER && ( ARM || ARM64 )
+ default y
+
+config UEFI_CPER_X86
+ bool
+ depends on UEFI_CPER && X86
+ default y
+
+config EFI_DEV_PATH_PARSER
+ bool
+ depends on ACPI
+ default n
+
+config EFI_EARLYCON
+ def_bool y
+ depends on EFI && SERIAL_EARLYCON && !ARM && !IA64
+ select FONT_SUPPORT
+ select ARCH_USE_MEMREMAP_PROT
+
+config EFI_CUSTOM_SSDT_OVERLAYS
+ bool "Load custom ACPI SSDT overlay from an EFI variable"
+ depends on EFI && ACPI
+ default ACPI_TABLE_UPGRADE
+ help
+ Allow loading of an ACPI SSDT overlay from an EFI variable specified
+ by a kernel command line option.
+
+ See Documentation/admin-guide/acpi/ssdt-overlays.rst for more
+ information.
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
new file mode 100644
index 000000000..d6ca2da19
--- /dev/null
+++ b/drivers/firmware/efi/Makefile
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux kernel
+#
+
+#
+# ARM64 maps efi runtime services in userspace addresses
+# which don't have KASAN shadow. So dereference of these addresses
+# in efi_call_virt() will cause crash if this code instrumented.
+#
+KASAN_SANITIZE_runtime-wrappers.o := n
+
+obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
+obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o tpm.o
+obj-$(CONFIG_EFI) += capsule.o memmap.o
+obj-$(CONFIG_EFI_PARAMS_FROM_FDT) += fdtparams.o
+obj-$(CONFIG_EFI_VARS) += efivars.o
+obj-$(CONFIG_EFI_ESRT) += esrt.o
+obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
+obj-$(CONFIG_UEFI_CPER) += cper.o
+obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o
+obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o
+subdir-$(CONFIG_EFI_STUB) += libstub
+obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_map.o
+obj-$(CONFIG_EFI_BOOTLOADER_CONTROL) += efibc.o
+obj-$(CONFIG_EFI_TEST) += test/
+obj-$(CONFIG_EFI_DEV_PATH_PARSER) += dev-path-parser.o
+obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
+obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o
+obj-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += embedded-firmware.o
+obj-$(CONFIG_LOAD_UEFI_KEYS) += mokvar-table.o
+
+fake_map-y += fake_mem.o
+fake_map-$(CONFIG_X86) += x86_fake_mem.o
+
+arm-obj-$(CONFIG_EFI) := efi-init.o arm-runtime.o
+obj-$(CONFIG_ARM) += $(arm-obj-y)
+obj-$(CONFIG_ARM64) += $(arm-obj-y)
+riscv-obj-$(CONFIG_EFI) := efi-init.o riscv-runtime.o
+obj-$(CONFIG_RISCV) += $(riscv-obj-y)
+obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o
+obj-$(CONFIG_EFI_EARLYCON) += earlycon.o
+obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o
+obj-$(CONFIG_UEFI_CPER_X86) += cper-x86.o
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
new file mode 100644
index 000000000..e51838d74
--- /dev/null
+++ b/drivers/firmware/efi/apple-properties.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * apple-properties.c - EFI device properties on Macs
+ * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
+ *
+ * Properties are stored either as:
+ * u8 arrays which can be retrieved with device_property_read_u8_array() or
+ * booleans which can be queried with device_property_present().
+ */
+
+#define pr_fmt(fmt) "apple-properties: " fmt
+
+#include <linux/memblock.h>
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/platform_data/x86/apple.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/ucs2_string.h>
+#include <asm/setup.h>
+
+static bool dump_properties __initdata;
+
+static int __init dump_properties_enable(char *arg)
+{
+ dump_properties = true;
+ return 1;
+}
+
+__setup("dump_apple_properties", dump_properties_enable);
+
+struct dev_header {
+ u32 len;
+ u32 prop_count;
+ struct efi_dev_path path[];
+ /*
+ * followed by key/value pairs, each key and value preceded by u32 len,
+ * len includes itself, value may be empty (in which case its len is 4)
+ */
+};
+
+struct properties_header {
+ u32 len;
+ u32 version;
+ u32 dev_count;
+ struct dev_header dev_header[];
+};
+
+static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
+ struct device *dev, const void *ptr,
+ struct property_entry entry[])
+{
+ int i;
+
+ for (i = 0; i < dev_header->prop_count; i++) {
+ int remaining = dev_header->len - (ptr - (void *)dev_header);
+ u32 key_len, val_len, entry_len;
+ const u8 *entry_data;
+ char *key;
+
+ if (sizeof(key_len) > remaining)
+ break;
+
+ key_len = *(typeof(key_len) *)ptr;
+ if (key_len + sizeof(val_len) > remaining ||
+ key_len < sizeof(key_len) + sizeof(efi_char16_t) ||
+ *(efi_char16_t *)(ptr + sizeof(key_len)) == 0) {
+ dev_err(dev, "invalid property name len at %#zx\n",
+ ptr - (void *)dev_header);
+ break;
+ }
+
+ val_len = *(typeof(val_len) *)(ptr + key_len);
+ if (key_len + val_len > remaining ||
+ val_len < sizeof(val_len)) {
+ dev_err(dev, "invalid property val len at %#zx\n",
+ ptr - (void *)dev_header + key_len);
+ break;
+ }
+
+ /* 4 bytes to accommodate UTF-8 code points + null byte */
+ key = kzalloc((key_len - sizeof(key_len)) * 4 + 1, GFP_KERNEL);
+ if (!key) {
+ dev_err(dev, "cannot allocate property name\n");
+ break;
+ }
+ ucs2_as_utf8(key, ptr + sizeof(key_len),
+ key_len - sizeof(key_len));
+
+ entry_data = ptr + key_len + sizeof(val_len);
+ entry_len = val_len - sizeof(val_len);
+ if (entry_len)
+ entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
+ entry_len);
+ else
+ entry[i] = PROPERTY_ENTRY_BOOL(key);
+
+ if (dump_properties) {
+ dev_info(dev, "property: %s\n", key);
+ print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, entry_data, entry_len, true);
+ }
+
+ ptr += key_len + val_len;
+ }
+
+ if (i != dev_header->prop_count) {
+ dev_err(dev, "got %d device properties, expected %u\n", i,
+ dev_header->prop_count);
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, dev_header, dev_header->len, true);
+ return;
+ }
+
+ dev_info(dev, "assigning %d device properties\n", i);
+}
+
+static int __init unmarshal_devices(struct properties_header *properties)
+{
+ size_t offset = offsetof(struct properties_header, dev_header[0]);
+
+ while (offset + sizeof(struct dev_header) < properties->len) {
+ struct dev_header *dev_header = (void *)properties + offset;
+ struct property_entry *entry = NULL;
+ const struct efi_dev_path *ptr;
+ struct device *dev;
+ size_t len;
+ int ret, i;
+
+ if (offset + dev_header->len > properties->len ||
+ dev_header->len <= sizeof(*dev_header)) {
+ pr_err("invalid len in dev_header at %#zx\n", offset);
+ return -EINVAL;
+ }
+
+ ptr = dev_header->path;
+ len = dev_header->len - sizeof(*dev_header);
+
+ dev = efi_get_device_by_path(&ptr, &len);
+ if (IS_ERR(dev)) {
+ pr_err("device path parse error %ld at %#zx:\n",
+ PTR_ERR(dev), (void *)ptr - (void *)dev_header);
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, dev_header, dev_header->len, true);
+ dev = NULL;
+ goto skip_device;
+ }
+
+ entry = kcalloc(dev_header->prop_count + 1, sizeof(*entry),
+ GFP_KERNEL);
+ if (!entry) {
+ dev_err(dev, "cannot allocate properties\n");
+ goto skip_device;
+ }
+
+ unmarshal_key_value_pairs(dev_header, dev, ptr, entry);
+ if (!entry[0].name)
+ goto skip_device;
+
+ ret = device_add_properties(dev, entry); /* makes deep copy */
+ if (ret)
+ dev_err(dev, "error %d assigning properties\n", ret);
+
+ for (i = 0; entry[i].name; i++)
+ kfree(entry[i].name);
+
+skip_device:
+ kfree(entry);
+ put_device(dev);
+ offset += dev_header->len;
+ }
+
+ return 0;
+}
+
+static int __init map_properties(void)
+{
+ struct properties_header *properties;
+ struct setup_data *data;
+ u32 data_len;
+ u64 pa_data;
+ int ret;
+
+ if (!x86_apple_machine)
+ return 0;
+
+ pa_data = boot_params.hdr.setup_data;
+ while (pa_data) {
+ data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
+ if (!data) {
+ pr_err("cannot map setup_data header\n");
+ return -ENOMEM;
+ }
+
+ if (data->type != SETUP_APPLE_PROPERTIES) {
+ pa_data = data->next;
+ memunmap(data);
+ continue;
+ }
+
+ data_len = data->len;
+ memunmap(data);
+
+ data = memremap(pa_data, sizeof(*data) + data_len, MEMREMAP_WB);
+ if (!data) {
+ pr_err("cannot map setup_data payload\n");
+ return -ENOMEM;
+ }
+
+ properties = (struct properties_header *)data->data;
+ if (properties->version != 1) {
+ pr_err("unsupported version:\n");
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, properties, data_len, true);
+ ret = -ENOTSUPP;
+ } else if (properties->len != data_len) {
+ pr_err("length mismatch, expected %u\n", data_len);
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, properties, data_len, true);
+ ret = -EINVAL;
+ } else
+ ret = unmarshal_devices(properties);
+
+ /*
+ * Can only free the setup_data payload but not its header
+ * to avoid breaking the chain of ->next pointers.
+ */
+ data->len = 0;
+ memunmap(data);
+ memblock_free_late(pa_data + sizeof(*data), data_len);
+
+ return ret;
+ }
+ return 0;
+}
+
+fs_initcall(map_properties);
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
new file mode 100644
index 000000000..3359ae2ad
--- /dev/null
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ *
+ * Copyright (C) 2013, 2014 Linaro Ltd.
+ */
+
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+#include <linux/mm_types.h>
+#include <linux/preempt.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/pgtable.h>
+
+#include <asm/cacheflush.h>
+#include <asm/efi.h>
+#include <asm/mmu.h>
+#include <asm/pgalloc.h>
+
+#if defined(CONFIG_PTDUMP_DEBUGFS) && defined(CONFIG_ARM64)
+#include <asm/ptdump.h>
+
+static struct ptdump_info efi_ptdump_info = {
+ .mm = &efi_mm,
+ .markers = (struct addr_marker[]){
+ { 0, "UEFI runtime start" },
+ { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" },
+ { -1, NULL }
+ },
+ .base_addr = 0,
+};
+
+static int __init ptdump_init(void)
+{
+ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables");
+
+ return 0;
+}
+device_initcall(ptdump_init);
+
+#endif
+
+static bool __init efi_virtmap_init(void)
+{
+ efi_memory_desc_t *md;
+
+ efi_mm.pgd = pgd_alloc(&efi_mm);
+ mm_init_cpumask(&efi_mm);
+ init_new_context(NULL, &efi_mm);
+
+ for_each_efi_memory_desc(md) {
+ phys_addr_t phys = md->phys_addr;
+ int ret;
+
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0)
+ return false;
+
+ ret = efi_create_mapping(&efi_mm, md);
+ if (ret) {
+ pr_warn(" EFI remap %pa: failed to create mapping (%d)\n",
+ &phys, ret);
+ return false;
+ }
+ }
+
+ if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions))
+ return false;
+
+ return true;
+}
+
+/*
+ * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
+ * non-early mapping of the UEFI system table and virtual mappings for all
+ * EFI_MEMORY_RUNTIME regions.
+ */
+static int __init arm_enable_runtime_services(void)
+{
+ u64 mapsize;
+
+ if (!efi_enabled(EFI_BOOT)) {
+ pr_info("EFI services will not be available.\n");
+ return 0;
+ }
+
+ efi_memmap_unmap();
+
+ mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
+
+ if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
+ pr_err("Failed to remap EFI memory map\n");
+ return 0;
+ }
+
+ if (efi_soft_reserve_enabled()) {
+ efi_memory_desc_t *md;
+
+ for_each_efi_memory_desc(md) {
+ int md_size = md->num_pages << EFI_PAGE_SHIFT;
+ struct resource *res;
+
+ if (!(md->attribute & EFI_MEMORY_SP))
+ continue;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (WARN_ON(!res))
+ break;
+
+ res->start = md->phys_addr;
+ res->end = md->phys_addr + md_size - 1;
+ res->name = "Soft Reserved";
+ res->flags = IORESOURCE_MEM;
+ res->desc = IORES_DESC_SOFT_RESERVED;
+
+ insert_resource(&iomem_resource, res);
+ }
+ }
+
+ if (efi_runtime_disabled()) {
+ pr_info("EFI runtime services will be disabled.\n");
+ return 0;
+ }
+
+ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
+ pr_info("EFI runtime services access via paravirt.\n");
+ return 0;
+ }
+
+ pr_info("Remapping and enabling EFI services.\n");
+
+ if (!efi_virtmap_init()) {
+ pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
+ return -ENOMEM;
+ }
+
+ /* Set up runtime services function pointers */
+ efi_native_runtime_setup();
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
+ return 0;
+}
+early_initcall(arm_enable_runtime_services);
+
+void efi_virtmap_load(void)
+{
+ preempt_disable();
+ efi_set_pgd(&efi_mm);
+}
+
+void efi_virtmap_unload(void)
+{
+ efi_set_pgd(current->active_mm);
+ preempt_enable();
+}
+
+
+static int __init arm_dmi_init(void)
+{
+ /*
+ * On arm64/ARM, DMI depends on UEFI, and dmi_setup() needs to
+ * be called early because dmi_id_init(), which is an arch_initcall
+ * itself, depends on dmi_scan_machine() having been called already.
+ */
+ dmi_setup();
+ return 0;
+}
+core_initcall(arm_dmi_init);
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
new file mode 100644
index 000000000..3e8d4b51a
--- /dev/null
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * EFI capsule loader driver.
+ *
+ * Copyright 2015 Intel Corporation
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/efi.h>
+#include <linux/vmalloc.h>
+
+#define NO_FURTHER_WRITE_ACTION -1
+
+/**
+ * efi_free_all_buff_pages - free all previous allocated buffer pages
+ * @cap_info: pointer to current instance of capsule_info structure
+ *
+ * In addition to freeing buffer pages, it flags NO_FURTHER_WRITE_ACTION
+ * to cease processing data in subsequent write(2) calls until close(2)
+ * is called.
+ **/
+static void efi_free_all_buff_pages(struct capsule_info *cap_info)
+{
+ while (cap_info->index > 0)
+ __free_page(cap_info->pages[--cap_info->index]);
+
+ cap_info->index = NO_FURTHER_WRITE_ACTION;
+}
+
+int __efi_capsule_setup_info(struct capsule_info *cap_info)
+{
+ size_t pages_needed;
+ int ret;
+ void *temp_page;
+
+ pages_needed = ALIGN(cap_info->total_size, PAGE_SIZE) / PAGE_SIZE;
+
+ if (pages_needed == 0) {
+ pr_err("invalid capsule size\n");
+ return -EINVAL;
+ }
+
+ /* Check if the capsule binary supported */
+ ret = efi_capsule_supported(cap_info->header.guid,
+ cap_info->header.flags,
+ cap_info->header.imagesize,
+ &cap_info->reset_type);
+ if (ret) {
+ pr_err("capsule not supported\n");
+ return ret;
+ }
+
+ temp_page = krealloc(cap_info->pages,
+ pages_needed * sizeof(void *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!temp_page)
+ return -ENOMEM;
+
+ cap_info->pages = temp_page;
+
+ temp_page = krealloc(cap_info->phys,
+ pages_needed * sizeof(phys_addr_t *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!temp_page)
+ return -ENOMEM;
+
+ cap_info->phys = temp_page;
+
+ return 0;
+}
+
+/**
+ * efi_capsule_setup_info - obtain the efi capsule header in the binary and
+ * setup capsule_info structure
+ * @cap_info: pointer to current instance of capsule_info structure
+ * @kbuff: a mapped first page buffer pointer
+ * @hdr_bytes: the total received number of bytes for efi header
+ *
+ * Platforms with non-standard capsule update mechanisms can override
+ * this __weak function so they can perform any required capsule
+ * image munging. See quark_quirk_function() for an example.
+ **/
+int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
+ size_t hdr_bytes)
+{
+ /* Only process data block that is larger than efi header size */
+ if (hdr_bytes < sizeof(efi_capsule_header_t))
+ return 0;
+
+ memcpy(&cap_info->header, kbuff, sizeof(cap_info->header));
+ cap_info->total_size = cap_info->header.imagesize;
+
+ return __efi_capsule_setup_info(cap_info);
+}
+
+/**
+ * efi_capsule_submit_update - invoke the efi_capsule_update API once binary
+ * upload done
+ * @cap_info: pointer to current instance of capsule_info structure
+ **/
+static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
+{
+ bool do_vunmap = false;
+ int ret;
+
+ /*
+ * cap_info->capsule may have been assigned already by a quirk
+ * handler, so only overwrite it if it is NULL
+ */
+ if (!cap_info->capsule) {
+ cap_info->capsule = vmap(cap_info->pages, cap_info->index,
+ VM_MAP, PAGE_KERNEL);
+ if (!cap_info->capsule)
+ return -ENOMEM;
+ do_vunmap = true;
+ }
+
+ ret = efi_capsule_update(cap_info->capsule, cap_info->phys);
+ if (do_vunmap)
+ vunmap(cap_info->capsule);
+ if (ret) {
+ pr_err("capsule update failed\n");
+ return ret;
+ }
+
+ /* Indicate capsule binary uploading is done */
+ cap_info->index = NO_FURTHER_WRITE_ACTION;
+
+ if (cap_info->header.flags & EFI_CAPSULE_PERSIST_ACROSS_RESET) {
+ pr_info("Successfully uploaded capsule file with reboot type '%s'\n",
+ !cap_info->reset_type ? "RESET_COLD" :
+ cap_info->reset_type == 1 ? "RESET_WARM" :
+ "RESET_SHUTDOWN");
+ } else {
+ pr_info("Successfully processed capsule file\n");
+ }
+
+ return 0;
+}
+
+/**
+ * efi_capsule_write - store the capsule binary and pass it to
+ * efi_capsule_update() API
+ * @file: file pointer
+ * @buff: buffer pointer
+ * @count: number of bytes in @buff
+ * @offp: not used
+ *
+ * Expectation:
+ * - A user space tool should start at the beginning of capsule binary and
+ * pass data in sequentially.
+ * - Users should close and re-open this file note in order to upload more
+ * capsules.
+ * - After an error returned, user should close the file and restart the
+ * operation for the next try otherwise -EIO will be returned until the
+ * file is closed.
+ * - An EFI capsule header must be located at the beginning of capsule
+ * binary file and passed in as first block data of write operation.
+ **/
+static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
+ size_t count, loff_t *offp)
+{
+ int ret;
+ struct capsule_info *cap_info = file->private_data;
+ struct page *page;
+ void *kbuff = NULL;
+ size_t write_byte;
+
+ if (count == 0)
+ return 0;
+
+ /* Return error while NO_FURTHER_WRITE_ACTION is flagged */
+ if (cap_info->index < 0)
+ return -EIO;
+
+ /* Only alloc a new page when previous page is full */
+ if (!cap_info->page_bytes_remain) {
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ cap_info->pages[cap_info->index] = page;
+ cap_info->phys[cap_info->index] = page_to_phys(page);
+ cap_info->page_bytes_remain = PAGE_SIZE;
+ cap_info->index++;
+ } else {
+ page = cap_info->pages[cap_info->index - 1];
+ }
+
+ kbuff = kmap(page);
+ kbuff += PAGE_SIZE - cap_info->page_bytes_remain;
+
+ /* Copy capsule binary data from user space to kernel space buffer */
+ write_byte = min_t(size_t, count, cap_info->page_bytes_remain);
+ if (copy_from_user(kbuff, buff, write_byte)) {
+ ret = -EFAULT;
+ goto fail_unmap;
+ }
+ cap_info->page_bytes_remain -= write_byte;
+
+ /* Setup capsule binary info structure */
+ if (cap_info->header.headersize == 0) {
+ ret = efi_capsule_setup_info(cap_info, kbuff - cap_info->count,
+ cap_info->count + write_byte);
+ if (ret)
+ goto fail_unmap;
+ }
+
+ cap_info->count += write_byte;
+ kunmap(page);
+
+ /* Submit the full binary to efi_capsule_update() API */
+ if (cap_info->header.headersize > 0 &&
+ cap_info->count >= cap_info->total_size) {
+ if (cap_info->count > cap_info->total_size) {
+ pr_err("capsule upload size exceeded header defined size\n");
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ ret = efi_capsule_submit_update(cap_info);
+ if (ret)
+ goto failed;
+ }
+
+ return write_byte;
+
+fail_unmap:
+ kunmap(page);
+failed:
+ efi_free_all_buff_pages(cap_info);
+ return ret;
+}
+
+/**
+ * efi_capsule_release - called by file close
+ * @inode: not used
+ * @file: file pointer
+ *
+ * We will not free successfully submitted pages since efi update
+ * requires data to be maintained across system reboot.
+ **/
+static int efi_capsule_release(struct inode *inode, struct file *file)
+{
+ struct capsule_info *cap_info = file->private_data;
+
+ if (cap_info->index > 0 &&
+ (cap_info->header.headersize == 0 ||
+ cap_info->count < cap_info->total_size)) {
+ pr_err("capsule upload not complete\n");
+ efi_free_all_buff_pages(cap_info);
+ }
+
+ kfree(cap_info->pages);
+ kfree(cap_info->phys);
+ kfree(file->private_data);
+ file->private_data = NULL;
+ return 0;
+}
+
+/**
+ * efi_capsule_open - called by file open
+ * @inode: not used
+ * @file: file pointer
+ *
+ * Will allocate each capsule_info memory for each file open call.
+ * This provided the capability to support multiple file open feature
+ * where user is not needed to wait for others to finish in order to
+ * upload their capsule binary.
+ **/
+static int efi_capsule_open(struct inode *inode, struct file *file)
+{
+ struct capsule_info *cap_info;
+
+ cap_info = kzalloc(sizeof(*cap_info), GFP_KERNEL);
+ if (!cap_info)
+ return -ENOMEM;
+
+ cap_info->pages = kzalloc(sizeof(void *), GFP_KERNEL);
+ if (!cap_info->pages) {
+ kfree(cap_info);
+ return -ENOMEM;
+ }
+
+ cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
+ if (!cap_info->phys) {
+ kfree(cap_info->pages);
+ kfree(cap_info);
+ return -ENOMEM;
+ }
+
+ file->private_data = cap_info;
+
+ return 0;
+}
+
+static const struct file_operations efi_capsule_fops = {
+ .owner = THIS_MODULE,
+ .open = efi_capsule_open,
+ .write = efi_capsule_write,
+ .release = efi_capsule_release,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice efi_capsule_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "efi_capsule_loader",
+ .fops = &efi_capsule_fops,
+};
+
+static int __init efi_capsule_loader_init(void)
+{
+ int ret;
+
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return -ENODEV;
+
+ ret = misc_register(&efi_capsule_misc);
+ if (ret)
+ pr_err("Unable to register capsule loader device\n");
+
+ return ret;
+}
+module_init(efi_capsule_loader_init);
+
+static void __exit efi_capsule_loader_exit(void)
+{
+ misc_deregister(&efi_capsule_misc);
+}
+module_exit(efi_capsule_loader_exit);
+
+MODULE_DESCRIPTION("EFI capsule firmware binary loader");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c
new file mode 100644
index 000000000..598b7800d
--- /dev/null
+++ b/drivers/firmware/efi/capsule.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * EFI capsule support.
+ *
+ * Copyright 2013 Intel Corporation; author Matt Fleming
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/highmem.h>
+#include <linux/efi.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+
+typedef struct {
+ u64 length;
+ u64 data;
+} efi_capsule_block_desc_t;
+
+static bool capsule_pending;
+static bool stop_capsules;
+static int efi_reset_type = -1;
+
+/*
+ * capsule_mutex serialises access to both capsule_pending and
+ * efi_reset_type and stop_capsules.
+ */
+static DEFINE_MUTEX(capsule_mutex);
+
+/**
+ * efi_capsule_pending - has a capsule been passed to the firmware?
+ * @reset_type: store the type of EFI reset if capsule is pending
+ *
+ * To ensure that the registered capsule is processed correctly by the
+ * firmware we need to perform a specific type of reset. If a capsule is
+ * pending return the reset type in @reset_type.
+ *
+ * This function will race with callers of efi_capsule_update(), for
+ * example, calling this function while somebody else is in
+ * efi_capsule_update() but hasn't reached efi_capsue_update_locked()
+ * will miss the updates to capsule_pending and efi_reset_type after
+ * efi_capsule_update_locked() completes.
+ *
+ * A non-racy use is from platform reboot code because we use
+ * system_state to ensure no capsules can be sent to the firmware once
+ * we're at SYSTEM_RESTART. See efi_capsule_update_locked().
+ */
+bool efi_capsule_pending(int *reset_type)
+{
+ if (!capsule_pending)
+ return false;
+
+ if (reset_type)
+ *reset_type = efi_reset_type;
+
+ return true;
+}
+
+/*
+ * Whitelist of EFI capsule flags that we support.
+ *
+ * We do not handle EFI_CAPSULE_INITIATE_RESET because that would
+ * require us to prepare the kernel for reboot. Refuse to load any
+ * capsules with that flag and any other flags that we do not know how
+ * to handle.
+ */
+#define EFI_CAPSULE_SUPPORTED_FLAG_MASK \
+ (EFI_CAPSULE_PERSIST_ACROSS_RESET | EFI_CAPSULE_POPULATE_SYSTEM_TABLE)
+
+/**
+ * efi_capsule_supported - does the firmware support the capsule?
+ * @guid: vendor guid of capsule
+ * @flags: capsule flags
+ * @size: size of capsule data
+ * @reset: the reset type required for this capsule
+ *
+ * Check whether a capsule with @flags is supported by the firmware
+ * and that @size doesn't exceed the maximum size for a capsule.
+ *
+ * No attempt is made to check @reset against the reset type required
+ * by any pending capsules because of the races involved.
+ */
+int efi_capsule_supported(efi_guid_t guid, u32 flags, size_t size, int *reset)
+{
+ efi_capsule_header_t capsule;
+ efi_capsule_header_t *cap_list[] = { &capsule };
+ efi_status_t status;
+ u64 max_size;
+
+ if (flags & ~EFI_CAPSULE_SUPPORTED_FLAG_MASK)
+ return -EINVAL;
+
+ capsule.headersize = capsule.imagesize = sizeof(capsule);
+ memcpy(&capsule.guid, &guid, sizeof(efi_guid_t));
+ capsule.flags = flags;
+
+ status = efi.query_capsule_caps(cap_list, 1, &max_size, reset);
+ if (status != EFI_SUCCESS)
+ return efi_status_to_err(status);
+
+ if (size > max_size)
+ return -ENOSPC;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(efi_capsule_supported);
+
+/*
+ * Every scatter gather list (block descriptor) page must end with a
+ * continuation pointer. The last continuation pointer of the last
+ * page must be zero to mark the end of the chain.
+ */
+#define SGLIST_PER_PAGE ((PAGE_SIZE / sizeof(efi_capsule_block_desc_t)) - 1)
+
+/*
+ * How many scatter gather list (block descriptor) pages do we need
+ * to map @count pages?
+ */
+static inline unsigned int sg_pages_num(unsigned int count)
+{
+ return DIV_ROUND_UP(count, SGLIST_PER_PAGE);
+}
+
+/**
+ * efi_capsule_update_locked - pass a single capsule to the firmware
+ * @capsule: capsule to send to the firmware
+ * @sg_pages: array of scatter gather (block descriptor) pages
+ * @reset: the reset type required for @capsule
+ *
+ * Since this function must be called under capsule_mutex check
+ * whether efi_reset_type will conflict with @reset, and atomically
+ * set it and capsule_pending if a capsule was successfully sent to
+ * the firmware.
+ *
+ * We also check to see if the system is about to restart, and if so,
+ * abort. This avoids races between efi_capsule_update() and
+ * efi_capsule_pending().
+ */
+static int
+efi_capsule_update_locked(efi_capsule_header_t *capsule,
+ struct page **sg_pages, int reset)
+{
+ efi_physical_addr_t sglist_phys;
+ efi_status_t status;
+
+ lockdep_assert_held(&capsule_mutex);
+
+ /*
+ * If someone has already registered a capsule that requires a
+ * different reset type, we're out of luck and must abort.
+ */
+ if (efi_reset_type >= 0 && efi_reset_type != reset) {
+ pr_err("Conflicting capsule reset type %d (%d).\n",
+ reset, efi_reset_type);
+ return -EINVAL;
+ }
+
+ /*
+ * If the system is getting ready to restart it may have
+ * called efi_capsule_pending() to make decisions (such as
+ * whether to force an EFI reboot), and we're racing against
+ * that call. Abort in that case.
+ */
+ if (unlikely(stop_capsules)) {
+ pr_warn("Capsule update raced with reboot, aborting.\n");
+ return -EINVAL;
+ }
+
+ sglist_phys = page_to_phys(sg_pages[0]);
+
+ status = efi.update_capsule(&capsule, 1, sglist_phys);
+ if (status == EFI_SUCCESS) {
+ capsule_pending = true;
+ efi_reset_type = reset;
+ }
+
+ return efi_status_to_err(status);
+}
+
+/**
+ * efi_capsule_update - send a capsule to the firmware
+ * @capsule: capsule to send to firmware
+ * @pages: an array of capsule data pages
+ *
+ * Build a scatter gather list with EFI capsule block descriptors to
+ * map the capsule described by @capsule with its data in @pages and
+ * send it to the firmware via the UpdateCapsule() runtime service.
+ *
+ * @capsule must be a virtual mapping of the complete capsule update in the
+ * kernel address space, as the capsule can be consumed immediately.
+ * A capsule_header_t that describes the entire contents of the capsule
+ * must be at the start of the first data page.
+ *
+ * Even though this function will validate that the firmware supports
+ * the capsule guid, users will likely want to check that
+ * efi_capsule_supported() returns true before calling this function
+ * because it makes it easier to print helpful error messages.
+ *
+ * If the capsule is successfully submitted to the firmware, any
+ * subsequent calls to efi_capsule_pending() will return true. @pages
+ * must not be released or modified if this function returns
+ * successfully.
+ *
+ * Callers must be prepared for this function to fail, which can
+ * happen if we raced with system reboot or if there is already a
+ * pending capsule that has a reset type that conflicts with the one
+ * required by @capsule. Do NOT use efi_capsule_pending() to detect
+ * this conflict since that would be racy. Instead, submit the capsule
+ * to efi_capsule_update() and check the return value.
+ *
+ * Return 0 on success, a converted EFI status code on failure.
+ */
+int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages)
+{
+ u32 imagesize = capsule->imagesize;
+ efi_guid_t guid = capsule->guid;
+ unsigned int count, sg_count;
+ u32 flags = capsule->flags;
+ struct page **sg_pages;
+ int rv, reset_type;
+ int i, j;
+
+ rv = efi_capsule_supported(guid, flags, imagesize, &reset_type);
+ if (rv)
+ return rv;
+
+ count = DIV_ROUND_UP(imagesize, PAGE_SIZE);
+ sg_count = sg_pages_num(count);
+
+ sg_pages = kcalloc(sg_count, sizeof(*sg_pages), GFP_KERNEL);
+ if (!sg_pages)
+ return -ENOMEM;
+
+ for (i = 0; i < sg_count; i++) {
+ sg_pages[i] = alloc_page(GFP_KERNEL);
+ if (!sg_pages[i]) {
+ rv = -ENOMEM;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < sg_count; i++) {
+ efi_capsule_block_desc_t *sglist;
+
+ sglist = kmap(sg_pages[i]);
+
+ for (j = 0; j < SGLIST_PER_PAGE && count > 0; j++) {
+ u64 sz = min_t(u64, imagesize,
+ PAGE_SIZE - (u64)*pages % PAGE_SIZE);
+
+ sglist[j].length = sz;
+ sglist[j].data = *pages++;
+
+ imagesize -= sz;
+ count--;
+ }
+
+ /* Continuation pointer */
+ sglist[j].length = 0;
+
+ if (i + 1 == sg_count)
+ sglist[j].data = 0;
+ else
+ sglist[j].data = page_to_phys(sg_pages[i + 1]);
+
+ kunmap(sg_pages[i]);
+ }
+
+ mutex_lock(&capsule_mutex);
+ rv = efi_capsule_update_locked(capsule, sg_pages, reset_type);
+ mutex_unlock(&capsule_mutex);
+
+out:
+ for (i = 0; rv && i < sg_count; i++) {
+ if (sg_pages[i])
+ __free_page(sg_pages[i]);
+ }
+
+ kfree(sg_pages);
+ return rv;
+}
+EXPORT_SYMBOL_GPL(efi_capsule_update);
+
+static int capsule_reboot_notify(struct notifier_block *nb, unsigned long event, void *cmd)
+{
+ mutex_lock(&capsule_mutex);
+ stop_capsules = true;
+ mutex_unlock(&capsule_mutex);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block capsule_reboot_nb = {
+ .notifier_call = capsule_reboot_notify,
+};
+
+static int __init capsule_reboot_register(void)
+{
+ return register_reboot_notifier(&capsule_reboot_nb);
+}
+core_initcall(capsule_reboot_register);
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
new file mode 100644
index 000000000..36d3b8b9d
--- /dev/null
+++ b/drivers/firmware/efi/cper-arm.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UEFI Common Platform Error Record (CPER) support
+ *
+ * Copyright (C) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/cper.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/printk.h>
+#include <linux/bcd.h>
+#include <acpi/ghes.h>
+#include <ras/ras_event.h>
+
+static const char * const arm_reg_ctx_strs[] = {
+ "AArch32 general purpose registers",
+ "AArch32 EL1 context registers",
+ "AArch32 EL2 context registers",
+ "AArch32 secure context registers",
+ "AArch64 general purpose registers",
+ "AArch64 EL1 context registers",
+ "AArch64 EL2 context registers",
+ "AArch64 EL3 context registers",
+ "Misc. system register structure",
+};
+
+static const char * const arm_err_trans_type_strs[] = {
+ "Instruction",
+ "Data Access",
+ "Generic",
+};
+
+static const char * const arm_bus_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+};
+
+static const char * const arm_cache_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+ "Eviction",
+ "Snooping (processor initiated a cache snoop that resulted in an error)",
+ "Snooped (processor raised a cache error caused by another processor or device snooping its cache)",
+ "Management",
+};
+
+static const char * const arm_tlb_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+ "Local management operation (processor initiated a TLB management operation that resulted in an error)",
+ "External management operation (processor raised a TLB error caused by another processor or device broadcasting TLB operations)",
+};
+
+static const char * const arm_bus_err_part_type_strs[] = {
+ "Local processor originated request",
+ "Local processor responded to request",
+ "Local processor observed",
+ "Generic",
+};
+
+static const char * const arm_bus_err_addr_space_strs[] = {
+ "External Memory Access",
+ "Internal Memory Access",
+ "Unknown",
+ "Device Memory Access",
+};
+
+static void cper_print_arm_err_info(const char *pfx, u32 type,
+ u64 error_info)
+{
+ u8 trans_type, op_type, level, participation_type, address_space;
+ u16 mem_attributes;
+ bool proc_context_corrupt, corrected, precise_pc, restartable_pc;
+ bool time_out, access_mode;
+
+ /* If the type is unknown, bail. */
+ if (type > CPER_ARM_MAX_TYPE)
+ return;
+
+ /*
+ * Vendor type errors have error information values that are vendor
+ * specific.
+ */
+ if (type == CPER_ARM_VENDOR_ERROR)
+ return;
+
+ if (error_info & CPER_ARM_ERR_VALID_TRANSACTION_TYPE) {
+ trans_type = ((error_info >> CPER_ARM_ERR_TRANSACTION_SHIFT)
+ & CPER_ARM_ERR_TRANSACTION_MASK);
+ if (trans_type < ARRAY_SIZE(arm_err_trans_type_strs)) {
+ printk("%stransaction type: %s\n", pfx,
+ arm_err_trans_type_strs[trans_type]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_OPERATION_TYPE) {
+ op_type = ((error_info >> CPER_ARM_ERR_OPERATION_SHIFT)
+ & CPER_ARM_ERR_OPERATION_MASK);
+ switch (type) {
+ case CPER_ARM_CACHE_ERROR:
+ if (op_type < ARRAY_SIZE(arm_cache_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_cache_err_op_strs[op_type]);
+ }
+ break;
+ case CPER_ARM_TLB_ERROR:
+ if (op_type < ARRAY_SIZE(arm_tlb_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_tlb_err_op_strs[op_type]);
+ }
+ break;
+ case CPER_ARM_BUS_ERROR:
+ if (op_type < ARRAY_SIZE(arm_bus_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_bus_err_op_strs[op_type]);
+ }
+ break;
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_LEVEL) {
+ level = ((error_info >> CPER_ARM_ERR_LEVEL_SHIFT)
+ & CPER_ARM_ERR_LEVEL_MASK);
+ switch (type) {
+ case CPER_ARM_CACHE_ERROR:
+ printk("%scache level: %d\n", pfx, level);
+ break;
+ case CPER_ARM_TLB_ERROR:
+ printk("%sTLB level: %d\n", pfx, level);
+ break;
+ case CPER_ARM_BUS_ERROR:
+ printk("%saffinity level at which the bus error occurred: %d\n",
+ pfx, level);
+ break;
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT) {
+ proc_context_corrupt = ((error_info >> CPER_ARM_ERR_PC_CORRUPT_SHIFT)
+ & CPER_ARM_ERR_PC_CORRUPT_MASK);
+ if (proc_context_corrupt)
+ printk("%sprocessor context corrupted\n", pfx);
+ else
+ printk("%sprocessor context not corrupted\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_CORRECTED) {
+ corrected = ((error_info >> CPER_ARM_ERR_CORRECTED_SHIFT)
+ & CPER_ARM_ERR_CORRECTED_MASK);
+ if (corrected)
+ printk("%sthe error has been corrected\n", pfx);
+ else
+ printk("%sthe error has not been corrected\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_PRECISE_PC) {
+ precise_pc = ((error_info >> CPER_ARM_ERR_PRECISE_PC_SHIFT)
+ & CPER_ARM_ERR_PRECISE_PC_MASK);
+ if (precise_pc)
+ printk("%sPC is precise\n", pfx);
+ else
+ printk("%sPC is imprecise\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_RESTARTABLE_PC) {
+ restartable_pc = ((error_info >> CPER_ARM_ERR_RESTARTABLE_PC_SHIFT)
+ & CPER_ARM_ERR_RESTARTABLE_PC_MASK);
+ if (restartable_pc)
+ printk("%sProgram execution can be restarted reliably at the PC associated with the error.\n", pfx);
+ }
+
+ /* The rest of the fields are specific to bus errors */
+ if (type != CPER_ARM_BUS_ERROR)
+ return;
+
+ if (error_info & CPER_ARM_ERR_VALID_PARTICIPATION_TYPE) {
+ participation_type = ((error_info >> CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT)
+ & CPER_ARM_ERR_PARTICIPATION_TYPE_MASK);
+ if (participation_type < ARRAY_SIZE(arm_bus_err_part_type_strs)) {
+ printk("%sparticipation type: %s\n", pfx,
+ arm_bus_err_part_type_strs[participation_type]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_TIME_OUT) {
+ time_out = ((error_info >> CPER_ARM_ERR_TIME_OUT_SHIFT)
+ & CPER_ARM_ERR_TIME_OUT_MASK);
+ if (time_out)
+ printk("%srequest timed out\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_ADDRESS_SPACE) {
+ address_space = ((error_info >> CPER_ARM_ERR_ADDRESS_SPACE_SHIFT)
+ & CPER_ARM_ERR_ADDRESS_SPACE_MASK);
+ if (address_space < ARRAY_SIZE(arm_bus_err_addr_space_strs)) {
+ printk("%saddress space: %s\n", pfx,
+ arm_bus_err_addr_space_strs[address_space]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_MEM_ATTRIBUTES) {
+ mem_attributes = ((error_info >> CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT)
+ & CPER_ARM_ERR_MEM_ATTRIBUTES_MASK);
+ printk("%smemory access attributes:0x%x\n", pfx, mem_attributes);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_ACCESS_MODE) {
+ access_mode = ((error_info >> CPER_ARM_ERR_ACCESS_MODE_SHIFT)
+ & CPER_ARM_ERR_ACCESS_MODE_MASK);
+ if (access_mode)
+ printk("%saccess mode: normal\n", pfx);
+ else
+ printk("%saccess mode: secure\n", pfx);
+ }
+}
+
+void cper_print_proc_arm(const char *pfx,
+ const struct cper_sec_proc_arm *proc)
+{
+ int i, len, max_ctx_type;
+ struct cper_arm_err_info *err_info;
+ struct cper_arm_ctx_info *ctx_info;
+ char newpfx[64], infopfx[64];
+
+ printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
+
+ len = proc->section_length - (sizeof(*proc) +
+ proc->err_info_num * (sizeof(*err_info)));
+ if (len < 0) {
+ printk("%ssection length: %d\n", pfx, proc->section_length);
+ printk("%ssection length is too small\n", pfx);
+ printk("%sfirmware-generated error record is incorrect\n", pfx);
+ printk("%sERR_INFO_NUM is %d\n", pfx, proc->err_info_num);
+ return;
+ }
+
+ if (proc->validation_bits & CPER_ARM_VALID_MPIDR)
+ printk("%sMultiprocessor Affinity Register (MPIDR): 0x%016llx\n",
+ pfx, proc->mpidr);
+
+ if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL)
+ printk("%serror affinity level: %d\n", pfx,
+ proc->affinity_level);
+
+ if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) {
+ printk("%srunning state: 0x%x\n", pfx, proc->running_state);
+ printk("%sPower State Coordination Interface state: %d\n",
+ pfx, proc->psci_state);
+ }
+
+ snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+
+ err_info = (struct cper_arm_err_info *)(proc + 1);
+ for (i = 0; i < proc->err_info_num; i++) {
+ printk("%sError info structure %d:\n", pfx, i);
+
+ printk("%snum errors: %d\n", pfx, err_info->multiple_error + 1);
+
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_FLAGS) {
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_FIRST)
+ printk("%sfirst error captured\n", newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_LAST)
+ printk("%slast error captured\n", newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_PROPAGATED)
+ printk("%spropagated error captured\n",
+ newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_OVERFLOW)
+ printk("%soverflow occurred, error info is incomplete\n",
+ newpfx);
+ }
+
+ printk("%serror_type: %d, %s\n", newpfx, err_info->type,
+ err_info->type < ARRAY_SIZE(cper_proc_error_type_strs) ?
+ cper_proc_error_type_strs[err_info->type] : "unknown");
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO) {
+ printk("%serror_info: 0x%016llx\n", newpfx,
+ err_info->error_info);
+ snprintf(infopfx, sizeof(infopfx), "%s ", newpfx);
+ cper_print_arm_err_info(infopfx, err_info->type,
+ err_info->error_info);
+ }
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_VIRT_ADDR)
+ printk("%svirtual fault address: 0x%016llx\n",
+ newpfx, err_info->virt_fault_addr);
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR)
+ printk("%sphysical fault address: 0x%016llx\n",
+ newpfx, err_info->physical_fault_addr);
+ err_info += 1;
+ }
+
+ ctx_info = (struct cper_arm_ctx_info *)err_info;
+ max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
+ for (i = 0; i < proc->context_info_num; i++) {
+ int size = sizeof(*ctx_info) + ctx_info->size;
+
+ printk("%sContext info structure %d:\n", pfx, i);
+ if (len < size) {
+ printk("%ssection length is too small\n", newpfx);
+ printk("%sfirmware-generated error record is incorrect\n", pfx);
+ return;
+ }
+ if (ctx_info->type > max_ctx_type) {
+ printk("%sInvalid context type: %d (max: %d)\n",
+ newpfx, ctx_info->type, max_ctx_type);
+ return;
+ }
+ printk("%sregister context type: %s\n", newpfx,
+ arm_reg_ctx_strs[ctx_info->type]);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4,
+ (ctx_info + 1), ctx_info->size, 0);
+ len -= size;
+ ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size);
+ }
+
+ if (len > 0) {
+ printk("%sVendor specific error info has %u bytes:\n", pfx,
+ len);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info,
+ len, true);
+ }
+}
diff --git a/drivers/firmware/efi/cper-x86.c b/drivers/firmware/efi/cper-x86.c
new file mode 100644
index 000000000..2531de49f
--- /dev/null
+++ b/drivers/firmware/efi/cper-x86.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018, Advanced Micro Devices, Inc.
+
+#include <linux/cper.h>
+
+/*
+ * We don't need a "CPER_IA" prefix since these are all locally defined.
+ * This will save us a lot of line space.
+ */
+#define VALID_LAPIC_ID BIT_ULL(0)
+#define VALID_CPUID_INFO BIT_ULL(1)
+#define VALID_PROC_ERR_INFO_NUM(bits) (((bits) & GENMASK_ULL(7, 2)) >> 2)
+#define VALID_PROC_CXT_INFO_NUM(bits) (((bits) & GENMASK_ULL(13, 8)) >> 8)
+
+#define INFO_ERR_STRUCT_TYPE_CACHE \
+ GUID_INIT(0xA55701F5, 0xE3EF, 0x43DE, 0xAC, 0x72, 0x24, 0x9B, \
+ 0x57, 0x3F, 0xAD, 0x2C)
+#define INFO_ERR_STRUCT_TYPE_TLB \
+ GUID_INIT(0xFC06B535, 0x5E1F, 0x4562, 0x9F, 0x25, 0x0A, 0x3B, \
+ 0x9A, 0xDB, 0x63, 0xC3)
+#define INFO_ERR_STRUCT_TYPE_BUS \
+ GUID_INIT(0x1CF3F8B3, 0xC5B1, 0x49a2, 0xAA, 0x59, 0x5E, 0xEF, \
+ 0x92, 0xFF, 0xA6, 0x3C)
+#define INFO_ERR_STRUCT_TYPE_MS \
+ GUID_INIT(0x48AB7F57, 0xDC34, 0x4f6c, 0xA7, 0xD3, 0xB0, 0xB5, \
+ 0xB0, 0xA7, 0x43, 0x14)
+
+#define INFO_VALID_CHECK_INFO BIT_ULL(0)
+#define INFO_VALID_TARGET_ID BIT_ULL(1)
+#define INFO_VALID_REQUESTOR_ID BIT_ULL(2)
+#define INFO_VALID_RESPONDER_ID BIT_ULL(3)
+#define INFO_VALID_IP BIT_ULL(4)
+
+#define CHECK_VALID_TRANS_TYPE BIT_ULL(0)
+#define CHECK_VALID_OPERATION BIT_ULL(1)
+#define CHECK_VALID_LEVEL BIT_ULL(2)
+#define CHECK_VALID_PCC BIT_ULL(3)
+#define CHECK_VALID_UNCORRECTED BIT_ULL(4)
+#define CHECK_VALID_PRECISE_IP BIT_ULL(5)
+#define CHECK_VALID_RESTARTABLE_IP BIT_ULL(6)
+#define CHECK_VALID_OVERFLOW BIT_ULL(7)
+
+#define CHECK_VALID_BUS_PART_TYPE BIT_ULL(8)
+#define CHECK_VALID_BUS_TIME_OUT BIT_ULL(9)
+#define CHECK_VALID_BUS_ADDR_SPACE BIT_ULL(10)
+
+#define CHECK_VALID_BITS(check) (((check) & GENMASK_ULL(15, 0)))
+#define CHECK_TRANS_TYPE(check) (((check) & GENMASK_ULL(17, 16)) >> 16)
+#define CHECK_OPERATION(check) (((check) & GENMASK_ULL(21, 18)) >> 18)
+#define CHECK_LEVEL(check) (((check) & GENMASK_ULL(24, 22)) >> 22)
+#define CHECK_PCC BIT_ULL(25)
+#define CHECK_UNCORRECTED BIT_ULL(26)
+#define CHECK_PRECISE_IP BIT_ULL(27)
+#define CHECK_RESTARTABLE_IP BIT_ULL(28)
+#define CHECK_OVERFLOW BIT_ULL(29)
+
+#define CHECK_BUS_PART_TYPE(check) (((check) & GENMASK_ULL(31, 30)) >> 30)
+#define CHECK_BUS_TIME_OUT BIT_ULL(32)
+#define CHECK_BUS_ADDR_SPACE(check) (((check) & GENMASK_ULL(34, 33)) >> 33)
+
+#define CHECK_VALID_MS_ERR_TYPE BIT_ULL(0)
+#define CHECK_VALID_MS_PCC BIT_ULL(1)
+#define CHECK_VALID_MS_UNCORRECTED BIT_ULL(2)
+#define CHECK_VALID_MS_PRECISE_IP BIT_ULL(3)
+#define CHECK_VALID_MS_RESTARTABLE_IP BIT_ULL(4)
+#define CHECK_VALID_MS_OVERFLOW BIT_ULL(5)
+
+#define CHECK_MS_ERR_TYPE(check) (((check) & GENMASK_ULL(18, 16)) >> 16)
+#define CHECK_MS_PCC BIT_ULL(19)
+#define CHECK_MS_UNCORRECTED BIT_ULL(20)
+#define CHECK_MS_PRECISE_IP BIT_ULL(21)
+#define CHECK_MS_RESTARTABLE_IP BIT_ULL(22)
+#define CHECK_MS_OVERFLOW BIT_ULL(23)
+
+#define CTX_TYPE_MSR 1
+#define CTX_TYPE_MMREG 7
+
+enum err_types {
+ ERR_TYPE_CACHE = 0,
+ ERR_TYPE_TLB,
+ ERR_TYPE_BUS,
+ ERR_TYPE_MS,
+ N_ERR_TYPES
+};
+
+static enum err_types cper_get_err_type(const guid_t *err_type)
+{
+ if (guid_equal(err_type, &INFO_ERR_STRUCT_TYPE_CACHE))
+ return ERR_TYPE_CACHE;
+ else if (guid_equal(err_type, &INFO_ERR_STRUCT_TYPE_TLB))
+ return ERR_TYPE_TLB;
+ else if (guid_equal(err_type, &INFO_ERR_STRUCT_TYPE_BUS))
+ return ERR_TYPE_BUS;
+ else if (guid_equal(err_type, &INFO_ERR_STRUCT_TYPE_MS))
+ return ERR_TYPE_MS;
+ else
+ return N_ERR_TYPES;
+}
+
+static const char * const ia_check_trans_type_strs[] = {
+ "Instruction",
+ "Data Access",
+ "Generic",
+};
+
+static const char * const ia_check_op_strs[] = {
+ "generic error",
+ "generic read",
+ "generic write",
+ "data read",
+ "data write",
+ "instruction fetch",
+ "prefetch",
+ "eviction",
+ "snoop",
+};
+
+static const char * const ia_check_bus_part_type_strs[] = {
+ "Local Processor originated request",
+ "Local Processor responded to request",
+ "Local Processor observed",
+ "Generic",
+};
+
+static const char * const ia_check_bus_addr_space_strs[] = {
+ "Memory Access",
+ "Reserved",
+ "I/O",
+ "Other Transaction",
+};
+
+static const char * const ia_check_ms_error_type_strs[] = {
+ "No Error",
+ "Unclassified",
+ "Microcode ROM Parity Error",
+ "External Error",
+ "FRC Error",
+ "Internal Unclassified",
+};
+
+static const char * const ia_reg_ctx_strs[] = {
+ "Unclassified Data",
+ "MSR Registers (Machine Check and other MSRs)",
+ "32-bit Mode Execution Context",
+ "64-bit Mode Execution Context",
+ "FXSAVE Context",
+ "32-bit Mode Debug Registers (DR0-DR7)",
+ "64-bit Mode Debug Registers (DR0-DR7)",
+ "Memory Mapped Registers",
+};
+
+static inline void print_bool(char *str, const char *pfx, u64 check, u64 bit)
+{
+ printk("%s%s: %s\n", pfx, str, (check & bit) ? "true" : "false");
+}
+
+static void print_err_info_ms(const char *pfx, u16 validation_bits, u64 check)
+{
+ if (validation_bits & CHECK_VALID_MS_ERR_TYPE) {
+ u8 err_type = CHECK_MS_ERR_TYPE(check);
+
+ printk("%sError Type: %u, %s\n", pfx, err_type,
+ err_type < ARRAY_SIZE(ia_check_ms_error_type_strs) ?
+ ia_check_ms_error_type_strs[err_type] : "unknown");
+ }
+
+ if (validation_bits & CHECK_VALID_MS_PCC)
+ print_bool("Processor Context Corrupt", pfx, check, CHECK_MS_PCC);
+
+ if (validation_bits & CHECK_VALID_MS_UNCORRECTED)
+ print_bool("Uncorrected", pfx, check, CHECK_MS_UNCORRECTED);
+
+ if (validation_bits & CHECK_VALID_MS_PRECISE_IP)
+ print_bool("Precise IP", pfx, check, CHECK_MS_PRECISE_IP);
+
+ if (validation_bits & CHECK_VALID_MS_RESTARTABLE_IP)
+ print_bool("Restartable IP", pfx, check, CHECK_MS_RESTARTABLE_IP);
+
+ if (validation_bits & CHECK_VALID_MS_OVERFLOW)
+ print_bool("Overflow", pfx, check, CHECK_MS_OVERFLOW);
+}
+
+static void print_err_info(const char *pfx, u8 err_type, u64 check)
+{
+ u16 validation_bits = CHECK_VALID_BITS(check);
+
+ /*
+ * The MS Check structure varies a lot from the others, so use a
+ * separate function for decoding.
+ */
+ if (err_type == ERR_TYPE_MS)
+ return print_err_info_ms(pfx, validation_bits, check);
+
+ if (validation_bits & CHECK_VALID_TRANS_TYPE) {
+ u8 trans_type = CHECK_TRANS_TYPE(check);
+
+ printk("%sTransaction Type: %u, %s\n", pfx, trans_type,
+ trans_type < ARRAY_SIZE(ia_check_trans_type_strs) ?
+ ia_check_trans_type_strs[trans_type] : "unknown");
+ }
+
+ if (validation_bits & CHECK_VALID_OPERATION) {
+ u8 op = CHECK_OPERATION(check);
+
+ /*
+ * CACHE has more operation types than TLB or BUS, though the
+ * name and the order are the same.
+ */
+ u8 max_ops = (err_type == ERR_TYPE_CACHE) ? 9 : 7;
+
+ printk("%sOperation: %u, %s\n", pfx, op,
+ op < max_ops ? ia_check_op_strs[op] : "unknown");
+ }
+
+ if (validation_bits & CHECK_VALID_LEVEL)
+ printk("%sLevel: %llu\n", pfx, CHECK_LEVEL(check));
+
+ if (validation_bits & CHECK_VALID_PCC)
+ print_bool("Processor Context Corrupt", pfx, check, CHECK_PCC);
+
+ if (validation_bits & CHECK_VALID_UNCORRECTED)
+ print_bool("Uncorrected", pfx, check, CHECK_UNCORRECTED);
+
+ if (validation_bits & CHECK_VALID_PRECISE_IP)
+ print_bool("Precise IP", pfx, check, CHECK_PRECISE_IP);
+
+ if (validation_bits & CHECK_VALID_RESTARTABLE_IP)
+ print_bool("Restartable IP", pfx, check, CHECK_RESTARTABLE_IP);
+
+ if (validation_bits & CHECK_VALID_OVERFLOW)
+ print_bool("Overflow", pfx, check, CHECK_OVERFLOW);
+
+ if (err_type != ERR_TYPE_BUS)
+ return;
+
+ if (validation_bits & CHECK_VALID_BUS_PART_TYPE) {
+ u8 part_type = CHECK_BUS_PART_TYPE(check);
+
+ printk("%sParticipation Type: %u, %s\n", pfx, part_type,
+ part_type < ARRAY_SIZE(ia_check_bus_part_type_strs) ?
+ ia_check_bus_part_type_strs[part_type] : "unknown");
+ }
+
+ if (validation_bits & CHECK_VALID_BUS_TIME_OUT)
+ print_bool("Time Out", pfx, check, CHECK_BUS_TIME_OUT);
+
+ if (validation_bits & CHECK_VALID_BUS_ADDR_SPACE) {
+ u8 addr_space = CHECK_BUS_ADDR_SPACE(check);
+
+ printk("%sAddress Space: %u, %s\n", pfx, addr_space,
+ addr_space < ARRAY_SIZE(ia_check_bus_addr_space_strs) ?
+ ia_check_bus_addr_space_strs[addr_space] : "unknown");
+ }
+}
+
+void cper_print_proc_ia(const char *pfx, const struct cper_sec_proc_ia *proc)
+{
+ int i;
+ struct cper_ia_err_info *err_info;
+ struct cper_ia_proc_ctx *ctx_info;
+ char newpfx[64], infopfx[64];
+ u8 err_type;
+
+ if (proc->validation_bits & VALID_LAPIC_ID)
+ printk("%sLocal APIC_ID: 0x%llx\n", pfx, proc->lapic_id);
+
+ if (proc->validation_bits & VALID_CPUID_INFO) {
+ printk("%sCPUID Info:\n", pfx);
+ print_hex_dump(pfx, "", DUMP_PREFIX_OFFSET, 16, 4, proc->cpuid,
+ sizeof(proc->cpuid), 0);
+ }
+
+ snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+
+ err_info = (struct cper_ia_err_info *)(proc + 1);
+ for (i = 0; i < VALID_PROC_ERR_INFO_NUM(proc->validation_bits); i++) {
+ printk("%sError Information Structure %d:\n", pfx, i);
+
+ err_type = cper_get_err_type(&err_info->err_type);
+ printk("%sError Structure Type: %s\n", newpfx,
+ err_type < ARRAY_SIZE(cper_proc_error_type_strs) ?
+ cper_proc_error_type_strs[err_type] : "unknown");
+
+ if (err_type >= N_ERR_TYPES) {
+ printk("%sError Structure Type: %pUl\n", newpfx,
+ &err_info->err_type);
+ }
+
+ if (err_info->validation_bits & INFO_VALID_CHECK_INFO) {
+ printk("%sCheck Information: 0x%016llx\n", newpfx,
+ err_info->check_info);
+
+ if (err_type < N_ERR_TYPES) {
+ snprintf(infopfx, sizeof(infopfx), "%s ",
+ newpfx);
+
+ print_err_info(infopfx, err_type,
+ err_info->check_info);
+ }
+ }
+
+ if (err_info->validation_bits & INFO_VALID_TARGET_ID) {
+ printk("%sTarget Identifier: 0x%016llx\n",
+ newpfx, err_info->target_id);
+ }
+
+ if (err_info->validation_bits & INFO_VALID_REQUESTOR_ID) {
+ printk("%sRequestor Identifier: 0x%016llx\n",
+ newpfx, err_info->requestor_id);
+ }
+
+ if (err_info->validation_bits & INFO_VALID_RESPONDER_ID) {
+ printk("%sResponder Identifier: 0x%016llx\n",
+ newpfx, err_info->responder_id);
+ }
+
+ if (err_info->validation_bits & INFO_VALID_IP) {
+ printk("%sInstruction Pointer: 0x%016llx\n",
+ newpfx, err_info->ip);
+ }
+
+ err_info++;
+ }
+
+ ctx_info = (struct cper_ia_proc_ctx *)err_info;
+ for (i = 0; i < VALID_PROC_CXT_INFO_NUM(proc->validation_bits); i++) {
+ int size = sizeof(*ctx_info) + ctx_info->reg_arr_size;
+ int groupsize = 4;
+
+ printk("%sContext Information Structure %d:\n", pfx, i);
+
+ printk("%sRegister Context Type: %s\n", newpfx,
+ ctx_info->reg_ctx_type < ARRAY_SIZE(ia_reg_ctx_strs) ?
+ ia_reg_ctx_strs[ctx_info->reg_ctx_type] : "unknown");
+
+ printk("%sRegister Array Size: 0x%04x\n", newpfx,
+ ctx_info->reg_arr_size);
+
+ if (ctx_info->reg_ctx_type == CTX_TYPE_MSR) {
+ groupsize = 8; /* MSRs are 8 bytes wide. */
+ printk("%sMSR Address: 0x%08x\n", newpfx,
+ ctx_info->msr_addr);
+ }
+
+ if (ctx_info->reg_ctx_type == CTX_TYPE_MMREG) {
+ printk("%sMM Register Address: 0x%016llx\n", newpfx,
+ ctx_info->mm_reg_addr);
+ }
+
+ printk("%sRegister Array:\n", newpfx);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, groupsize,
+ (ctx_info + 1), ctx_info->reg_arr_size, 0);
+
+ ctx_info = (struct cper_ia_proc_ctx *)((long)ctx_info + size);
+ }
+}
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
new file mode 100644
index 000000000..232c092c4
--- /dev/null
+++ b/drivers/firmware/efi/cper.c
@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UEFI Common Platform Error Record (CPER) support
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * CPER is the format used to describe platform hardware error by
+ * various tables, such as ERST, BERT and HEST etc.
+ *
+ * For more information about CPER, please refer to Appendix N of UEFI
+ * Specification version 2.4.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/cper.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/printk.h>
+#include <linux/bcd.h>
+#include <acpi/ghes.h>
+#include <ras/ras_event.h>
+
+/*
+ * CPER record ID need to be unique even after reboot, because record
+ * ID is used as index for ERST storage, while CPER records from
+ * multiple boot may co-exist in ERST.
+ */
+u64 cper_next_record_id(void)
+{
+ static atomic64_t seq;
+
+ if (!atomic64_read(&seq)) {
+ time64_t time = ktime_get_real_seconds();
+
+ /*
+ * This code is unlikely to still be needed in year 2106,
+ * but just in case, let's use a few more bits for timestamps
+ * after y2038 to be sure they keep increasing monotonically
+ * for the next few hundred years...
+ */
+ if (time < 0x80000000)
+ atomic64_set(&seq, (ktime_get_real_seconds()) << 32);
+ else
+ atomic64_set(&seq, 0x8000000000000000ull |
+ ktime_get_real_seconds() << 24);
+ }
+
+ return atomic64_inc_return(&seq);
+}
+EXPORT_SYMBOL_GPL(cper_next_record_id);
+
+static const char * const severity_strs[] = {
+ "recoverable",
+ "fatal",
+ "corrected",
+ "info",
+};
+
+const char *cper_severity_str(unsigned int severity)
+{
+ return severity < ARRAY_SIZE(severity_strs) ?
+ severity_strs[severity] : "unknown";
+}
+EXPORT_SYMBOL_GPL(cper_severity_str);
+
+/*
+ * cper_print_bits - print strings for set bits
+ * @pfx: prefix for each line, including log level and prefix string
+ * @bits: bit mask
+ * @strs: string array, indexed by bit position
+ * @strs_size: size of the string array: @strs
+ *
+ * For each set bit in @bits, print the corresponding string in @strs.
+ * If the output length is longer than 80, multiple line will be
+ * printed, with @pfx is printed at the beginning of each line.
+ */
+void cper_print_bits(const char *pfx, unsigned int bits,
+ const char * const strs[], unsigned int strs_size)
+{
+ int i, len = 0;
+ const char *str;
+ char buf[84];
+
+ for (i = 0; i < strs_size; i++) {
+ if (!(bits & (1U << i)))
+ continue;
+ str = strs[i];
+ if (!str)
+ continue;
+ if (len && len + strlen(str) + 2 > 80) {
+ printk("%s\n", buf);
+ len = 0;
+ }
+ if (!len)
+ len = snprintf(buf, sizeof(buf), "%s%s", pfx, str);
+ else
+ len += scnprintf(buf+len, sizeof(buf)-len, ", %s", str);
+ }
+ if (len)
+ printk("%s\n", buf);
+}
+
+static const char * const proc_type_strs[] = {
+ "IA32/X64",
+ "IA64",
+ "ARM",
+};
+
+static const char * const proc_isa_strs[] = {
+ "IA32",
+ "IA64",
+ "X64",
+ "ARM A32/T32",
+ "ARM A64",
+};
+
+const char * const cper_proc_error_type_strs[] = {
+ "cache error",
+ "TLB error",
+ "bus error",
+ "micro-architectural error",
+};
+
+static const char * const proc_op_strs[] = {
+ "unknown or generic",
+ "data read",
+ "data write",
+ "instruction execution",
+};
+
+static const char * const proc_flag_strs[] = {
+ "restartable",
+ "precise IP",
+ "overflow",
+ "corrected",
+};
+
+static void cper_print_proc_generic(const char *pfx,
+ const struct cper_sec_proc_generic *proc)
+{
+ if (proc->validation_bits & CPER_PROC_VALID_TYPE)
+ printk("%s""processor_type: %d, %s\n", pfx, proc->proc_type,
+ proc->proc_type < ARRAY_SIZE(proc_type_strs) ?
+ proc_type_strs[proc->proc_type] : "unknown");
+ if (proc->validation_bits & CPER_PROC_VALID_ISA)
+ printk("%s""processor_isa: %d, %s\n", pfx, proc->proc_isa,
+ proc->proc_isa < ARRAY_SIZE(proc_isa_strs) ?
+ proc_isa_strs[proc->proc_isa] : "unknown");
+ if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) {
+ printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type);
+ cper_print_bits(pfx, proc->proc_error_type,
+ cper_proc_error_type_strs,
+ ARRAY_SIZE(cper_proc_error_type_strs));
+ }
+ if (proc->validation_bits & CPER_PROC_VALID_OPERATION)
+ printk("%s""operation: %d, %s\n", pfx, proc->operation,
+ proc->operation < ARRAY_SIZE(proc_op_strs) ?
+ proc_op_strs[proc->operation] : "unknown");
+ if (proc->validation_bits & CPER_PROC_VALID_FLAGS) {
+ printk("%s""flags: 0x%02x\n", pfx, proc->flags);
+ cper_print_bits(pfx, proc->flags, proc_flag_strs,
+ ARRAY_SIZE(proc_flag_strs));
+ }
+ if (proc->validation_bits & CPER_PROC_VALID_LEVEL)
+ printk("%s""level: %d\n", pfx, proc->level);
+ if (proc->validation_bits & CPER_PROC_VALID_VERSION)
+ printk("%s""version_info: 0x%016llx\n", pfx, proc->cpu_version);
+ if (proc->validation_bits & CPER_PROC_VALID_ID)
+ printk("%s""processor_id: 0x%016llx\n", pfx, proc->proc_id);
+ if (proc->validation_bits & CPER_PROC_VALID_TARGET_ADDRESS)
+ printk("%s""target_address: 0x%016llx\n",
+ pfx, proc->target_addr);
+ if (proc->validation_bits & CPER_PROC_VALID_REQUESTOR_ID)
+ printk("%s""requestor_id: 0x%016llx\n",
+ pfx, proc->requestor_id);
+ if (proc->validation_bits & CPER_PROC_VALID_RESPONDER_ID)
+ printk("%s""responder_id: 0x%016llx\n",
+ pfx, proc->responder_id);
+ if (proc->validation_bits & CPER_PROC_VALID_IP)
+ printk("%s""IP: 0x%016llx\n", pfx, proc->ip);
+}
+
+static const char * const mem_err_type_strs[] = {
+ "unknown",
+ "no error",
+ "single-bit ECC",
+ "multi-bit ECC",
+ "single-symbol chipkill ECC",
+ "multi-symbol chipkill ECC",
+ "master abort",
+ "target abort",
+ "parity error",
+ "watchdog timeout",
+ "invalid address",
+ "mirror Broken",
+ "memory sparing",
+ "scrub corrected error",
+ "scrub uncorrected error",
+ "physical memory map-out event",
+};
+
+const char *cper_mem_err_type_str(unsigned int etype)
+{
+ return etype < ARRAY_SIZE(mem_err_type_strs) ?
+ mem_err_type_strs[etype] : "unknown";
+}
+EXPORT_SYMBOL_GPL(cper_mem_err_type_str);
+
+static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg)
+{
+ u32 len, n;
+
+ if (!msg)
+ return 0;
+
+ n = 0;
+ len = CPER_REC_LEN - 1;
+ if (mem->validation_bits & CPER_MEM_VALID_NODE)
+ n += scnprintf(msg + n, len - n, "node: %d ", mem->node);
+ if (mem->validation_bits & CPER_MEM_VALID_CARD)
+ n += scnprintf(msg + n, len - n, "card: %d ", mem->card);
+ if (mem->validation_bits & CPER_MEM_VALID_MODULE)
+ n += scnprintf(msg + n, len - n, "module: %d ", mem->module);
+ if (mem->validation_bits & CPER_MEM_VALID_RANK_NUMBER)
+ n += scnprintf(msg + n, len - n, "rank: %d ", mem->rank);
+ if (mem->validation_bits & CPER_MEM_VALID_BANK)
+ n += scnprintf(msg + n, len - n, "bank: %d ", mem->bank);
+ if (mem->validation_bits & CPER_MEM_VALID_BANK_GROUP)
+ n += scnprintf(msg + n, len - n, "bank_group: %d ",
+ mem->bank >> CPER_MEM_BANK_GROUP_SHIFT);
+ if (mem->validation_bits & CPER_MEM_VALID_BANK_ADDRESS)
+ n += scnprintf(msg + n, len - n, "bank_address: %d ",
+ mem->bank & CPER_MEM_BANK_ADDRESS_MASK);
+ if (mem->validation_bits & CPER_MEM_VALID_DEVICE)
+ n += scnprintf(msg + n, len - n, "device: %d ", mem->device);
+ if (mem->validation_bits & (CPER_MEM_VALID_ROW | CPER_MEM_VALID_ROW_EXT)) {
+ u32 row = mem->row;
+
+ row |= cper_get_mem_extension(mem->validation_bits, mem->extended);
+ n += scnprintf(msg + n, len - n, "row: %d ", row);
+ }
+ if (mem->validation_bits & CPER_MEM_VALID_COLUMN)
+ n += scnprintf(msg + n, len - n, "column: %d ", mem->column);
+ if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION)
+ n += scnprintf(msg + n, len - n, "bit_position: %d ",
+ mem->bit_pos);
+ if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
+ n += scnprintf(msg + n, len - n, "requestor_id: 0x%016llx ",
+ mem->requestor_id);
+ if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
+ n += scnprintf(msg + n, len - n, "responder_id: 0x%016llx ",
+ mem->responder_id);
+ if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID)
+ scnprintf(msg + n, len - n, "target_id: 0x%016llx ",
+ mem->target_id);
+ if (mem->validation_bits & CPER_MEM_VALID_CHIP_ID)
+ scnprintf(msg + n, len - n, "chip_id: %d ",
+ mem->extended >> CPER_MEM_CHIP_ID_SHIFT);
+
+ msg[n] = '\0';
+ return n;
+}
+
+static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
+{
+ u32 len, n;
+ const char *bank = NULL, *device = NULL;
+
+ if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
+ return 0;
+
+ len = CPER_REC_LEN;
+ dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
+ if (bank && device)
+ n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
+ else
+ n = snprintf(msg, len,
+ "DIMM location: not present. DMI handle: 0x%.4x ",
+ mem->mem_dev_handle);
+
+ return n;
+}
+
+void cper_mem_err_pack(const struct cper_sec_mem_err *mem,
+ struct cper_mem_err_compact *cmem)
+{
+ cmem->validation_bits = mem->validation_bits;
+ cmem->node = mem->node;
+ cmem->card = mem->card;
+ cmem->module = mem->module;
+ cmem->bank = mem->bank;
+ cmem->device = mem->device;
+ cmem->row = mem->row;
+ cmem->column = mem->column;
+ cmem->bit_pos = mem->bit_pos;
+ cmem->requestor_id = mem->requestor_id;
+ cmem->responder_id = mem->responder_id;
+ cmem->target_id = mem->target_id;
+ cmem->extended = mem->extended;
+ cmem->rank = mem->rank;
+ cmem->mem_array_handle = mem->mem_array_handle;
+ cmem->mem_dev_handle = mem->mem_dev_handle;
+}
+
+const char *cper_mem_err_unpack(struct trace_seq *p,
+ struct cper_mem_err_compact *cmem)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ char rcd_decode_str[CPER_REC_LEN];
+
+ if (cper_mem_err_location(cmem, rcd_decode_str))
+ trace_seq_printf(p, "%s", rcd_decode_str);
+ if (cper_dimm_err_location(cmem, rcd_decode_str))
+ trace_seq_printf(p, "%s", rcd_decode_str);
+ trace_seq_putc(p, '\0');
+
+ return ret;
+}
+
+static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
+ int len)
+{
+ struct cper_mem_err_compact cmem;
+ char rcd_decode_str[CPER_REC_LEN];
+
+ /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
+ if (len == sizeof(struct cper_sec_mem_err_old) &&
+ (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
+ pr_err(FW_WARN "valid bits set for fields beyond structure\n");
+ return;
+ }
+ if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
+ printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
+ if (mem->validation_bits & CPER_MEM_VALID_PA)
+ printk("%s""physical_address: 0x%016llx\n",
+ pfx, mem->physical_addr);
+ if (mem->validation_bits & CPER_MEM_VALID_PA_MASK)
+ printk("%s""physical_address_mask: 0x%016llx\n",
+ pfx, mem->physical_addr_mask);
+ cper_mem_err_pack(mem, &cmem);
+ if (cper_mem_err_location(&cmem, rcd_decode_str))
+ printk("%s%s\n", pfx, rcd_decode_str);
+ if (mem->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
+ u8 etype = mem->error_type;
+ printk("%s""error_type: %d, %s\n", pfx, etype,
+ cper_mem_err_type_str(etype));
+ }
+ if (cper_dimm_err_location(&cmem, rcd_decode_str))
+ printk("%s%s\n", pfx, rcd_decode_str);
+}
+
+static const char * const pcie_port_type_strs[] = {
+ "PCIe end point",
+ "legacy PCI end point",
+ "unknown",
+ "unknown",
+ "root port",
+ "upstream switch port",
+ "downstream switch port",
+ "PCIe to PCI/PCI-X bridge",
+ "PCI/PCI-X to PCIe bridge",
+ "root complex integrated endpoint device",
+ "root complex event collector",
+};
+
+static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
+ const struct acpi_hest_generic_data *gdata)
+{
+ if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
+ printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
+ pcie->port_type < ARRAY_SIZE(pcie_port_type_strs) ?
+ pcie_port_type_strs[pcie->port_type] : "unknown");
+ if (pcie->validation_bits & CPER_PCIE_VALID_VERSION)
+ printk("%s""version: %d.%d\n", pfx,
+ pcie->version.major, pcie->version.minor);
+ if (pcie->validation_bits & CPER_PCIE_VALID_COMMAND_STATUS)
+ printk("%s""command: 0x%04x, status: 0x%04x\n", pfx,
+ pcie->command, pcie->status);
+ if (pcie->validation_bits & CPER_PCIE_VALID_DEVICE_ID) {
+ const __u8 *p;
+ printk("%s""device_id: %04x:%02x:%02x.%x\n", pfx,
+ pcie->device_id.segment, pcie->device_id.bus,
+ pcie->device_id.device, pcie->device_id.function);
+ printk("%s""slot: %d\n", pfx,
+ pcie->device_id.slot >> CPER_PCIE_SLOT_SHIFT);
+ printk("%s""secondary_bus: 0x%02x\n", pfx,
+ pcie->device_id.secondary_bus);
+ printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
+ pcie->device_id.vendor_id, pcie->device_id.device_id);
+ p = pcie->device_id.class_code;
+ printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]);
+ }
+ if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
+ printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,
+ pcie->serial_number.lower, pcie->serial_number.upper);
+ if (pcie->validation_bits & CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS)
+ printk(
+ "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
+ pfx, pcie->bridge.secondary_status, pcie->bridge.control);
+
+ /* Fatal errors call __ghes_panic() before AER handler prints this */
+ if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) &&
+ (gdata->error_severity & CPER_SEV_FATAL)) {
+ struct aer_capability_regs *aer;
+
+ aer = (struct aer_capability_regs *)pcie->aer_info;
+ printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n",
+ pfx, aer->uncor_status, aer->uncor_mask);
+ printk("%saer_uncor_severity: 0x%08x\n",
+ pfx, aer->uncor_severity);
+ printk("%sTLP Header: %08x %08x %08x %08x\n", pfx,
+ aer->header_log.dw0, aer->header_log.dw1,
+ aer->header_log.dw2, aer->header_log.dw3);
+ }
+}
+
+static const char * const fw_err_rec_type_strs[] = {
+ "IPF SAL Error Record",
+ "SOC Firmware Error Record Type1 (Legacy CrashLog Support)",
+ "SOC Firmware Error Record Type2",
+};
+
+static void cper_print_fw_err(const char *pfx,
+ struct acpi_hest_generic_data *gdata,
+ const struct cper_sec_fw_err_rec_ref *fw_err)
+{
+ void *buf = acpi_hest_get_payload(gdata);
+ u32 offset, length = gdata->error_data_length;
+
+ printk("%s""Firmware Error Record Type: %s\n", pfx,
+ fw_err->record_type < ARRAY_SIZE(fw_err_rec_type_strs) ?
+ fw_err_rec_type_strs[fw_err->record_type] : "unknown");
+ printk("%s""Revision: %d\n", pfx, fw_err->revision);
+
+ /* Record Type based on UEFI 2.7 */
+ if (fw_err->revision == 0) {
+ printk("%s""Record Identifier: %08llx\n", pfx,
+ fw_err->record_identifier);
+ } else if (fw_err->revision == 2) {
+ printk("%s""Record Identifier: %pUl\n", pfx,
+ &fw_err->record_identifier_guid);
+ }
+
+ /*
+ * The FW error record may contain trailing data beyond the
+ * structure defined by the specification. As the fields
+ * defined (and hence the offset of any trailing data) vary
+ * with the revision, set the offset to account for this
+ * variation.
+ */
+ if (fw_err->revision == 0) {
+ /* record_identifier_guid not defined */
+ offset = offsetof(struct cper_sec_fw_err_rec_ref,
+ record_identifier_guid);
+ } else if (fw_err->revision == 1) {
+ /* record_identifier not defined */
+ offset = offsetof(struct cper_sec_fw_err_rec_ref,
+ record_identifier);
+ } else {
+ offset = sizeof(*fw_err);
+ }
+
+ buf += offset;
+ length -= offset;
+
+ print_hex_dump(pfx, "", DUMP_PREFIX_OFFSET, 16, 4, buf, length, true);
+}
+
+static void cper_print_tstamp(const char *pfx,
+ struct acpi_hest_generic_data_v300 *gdata)
+{
+ __u8 hour, min, sec, day, mon, year, century, *timestamp;
+
+ if (gdata->validation_bits & ACPI_HEST_GEN_VALID_TIMESTAMP) {
+ timestamp = (__u8 *)&(gdata->time_stamp);
+ sec = bcd2bin(timestamp[0]);
+ min = bcd2bin(timestamp[1]);
+ hour = bcd2bin(timestamp[2]);
+ day = bcd2bin(timestamp[4]);
+ mon = bcd2bin(timestamp[5]);
+ year = bcd2bin(timestamp[6]);
+ century = bcd2bin(timestamp[7]);
+
+ printk("%s%ststamp: %02d%02d-%02d-%02d %02d:%02d:%02d\n", pfx,
+ (timestamp[3] & 0x1 ? "precise " : "imprecise "),
+ century, year, mon, day, hour, min, sec);
+ }
+}
+
+static void
+cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata,
+ int sec_no)
+{
+ guid_t *sec_type = (guid_t *)gdata->section_type;
+ __u16 severity;
+ char newpfx[64];
+
+ if (acpi_hest_get_version(gdata) >= 3)
+ cper_print_tstamp(pfx, (struct acpi_hest_generic_data_v300 *)gdata);
+
+ severity = gdata->error_severity;
+ printk("%s""Error %d, type: %s\n", pfx, sec_no,
+ cper_severity_str(severity));
+ if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
+ printk("%s""fru_id: %pUl\n", pfx, gdata->fru_id);
+ if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
+ printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
+
+ snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+ if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) {
+ struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata);
+
+ printk("%s""section_type: general processor error\n", newpfx);
+ if (gdata->error_data_length >= sizeof(*proc_err))
+ cper_print_proc_generic(newpfx, proc_err);
+ else
+ goto err_section_too_small;
+ } else if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
+ struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
+
+ printk("%s""section_type: memory error\n", newpfx);
+ if (gdata->error_data_length >=
+ sizeof(struct cper_sec_mem_err_old))
+ cper_print_mem(newpfx, mem_err,
+ gdata->error_data_length);
+ else
+ goto err_section_too_small;
+ } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
+ struct cper_sec_pcie *pcie = acpi_hest_get_payload(gdata);
+
+ printk("%s""section_type: PCIe error\n", newpfx);
+ if (gdata->error_data_length >= sizeof(*pcie))
+ cper_print_pcie(newpfx, pcie, gdata);
+ else
+ goto err_section_too_small;
+#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
+ } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
+ struct cper_sec_proc_arm *arm_err = acpi_hest_get_payload(gdata);
+
+ printk("%ssection_type: ARM processor error\n", newpfx);
+ if (gdata->error_data_length >= sizeof(*arm_err))
+ cper_print_proc_arm(newpfx, arm_err);
+ else
+ goto err_section_too_small;
+#endif
+#if defined(CONFIG_UEFI_CPER_X86)
+ } else if (guid_equal(sec_type, &CPER_SEC_PROC_IA)) {
+ struct cper_sec_proc_ia *ia_err = acpi_hest_get_payload(gdata);
+
+ printk("%ssection_type: IA32/X64 processor error\n", newpfx);
+ if (gdata->error_data_length >= sizeof(*ia_err))
+ cper_print_proc_ia(newpfx, ia_err);
+ else
+ goto err_section_too_small;
+#endif
+ } else if (guid_equal(sec_type, &CPER_SEC_FW_ERR_REC_REF)) {
+ struct cper_sec_fw_err_rec_ref *fw_err = acpi_hest_get_payload(gdata);
+
+ printk("%ssection_type: Firmware Error Record Reference\n",
+ newpfx);
+ /* The minimal FW Error Record contains 16 bytes */
+ if (gdata->error_data_length >= SZ_16)
+ cper_print_fw_err(newpfx, gdata, fw_err);
+ else
+ goto err_section_too_small;
+ } else {
+ const void *err = acpi_hest_get_payload(gdata);
+
+ printk("%ssection type: unknown, %pUl\n", newpfx, sec_type);
+ printk("%ssection length: %#x\n", newpfx,
+ gdata->error_data_length);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, err,
+ gdata->error_data_length, true);
+ }
+
+ return;
+
+err_section_too_small:
+ pr_err(FW_WARN "error section length is too small\n");
+}
+
+void cper_estatus_print(const char *pfx,
+ const struct acpi_hest_generic_status *estatus)
+{
+ struct acpi_hest_generic_data *gdata;
+ int sec_no = 0;
+ char newpfx[64];
+ __u16 severity;
+
+ severity = estatus->error_severity;
+ if (severity == CPER_SEV_CORRECTED)
+ printk("%s%s\n", pfx,
+ "It has been corrected by h/w "
+ "and requires no further action");
+ printk("%s""event severity: %s\n", pfx, cper_severity_str(severity));
+ snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+
+ apei_estatus_for_each_section(estatus, gdata) {
+ cper_estatus_print_section(newpfx, gdata, sec_no);
+ sec_no++;
+ }
+}
+EXPORT_SYMBOL_GPL(cper_estatus_print);
+
+int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus)
+{
+ if (estatus->data_length &&
+ estatus->data_length < sizeof(struct acpi_hest_generic_data))
+ return -EINVAL;
+ if (estatus->raw_data_length &&
+ estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cper_estatus_check_header);
+
+int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
+{
+ struct acpi_hest_generic_data *gdata;
+ unsigned int data_len, record_size;
+ int rc;
+
+ rc = cper_estatus_check_header(estatus);
+ if (rc)
+ return rc;
+
+ data_len = estatus->data_length;
+
+ apei_estatus_for_each_section(estatus, gdata) {
+ if (sizeof(struct acpi_hest_generic_data) > data_len)
+ return -EINVAL;
+
+ record_size = acpi_hest_get_record_size(gdata);
+ if (record_size > data_len)
+ return -EINVAL;
+
+ data_len -= record_size;
+ }
+ if (data_len)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cper_estatus_check);
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
new file mode 100644
index 000000000..5c9625e55
--- /dev/null
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dev-path-parser.c - EFI Device Path parser
+ * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/pci.h>
+
+struct acpi_hid_uid {
+ struct acpi_device_id hid[2];
+ char uid[11]; /* UINT_MAX + null byte */
+};
+
+static int __init match_acpi_dev(struct device *dev, const void *data)
+{
+ struct acpi_hid_uid hid_uid = *(const struct acpi_hid_uid *)data;
+ struct acpi_device *adev = to_acpi_device(dev);
+
+ if (acpi_match_device_ids(adev, hid_uid.hid))
+ return 0;
+
+ if (adev->pnp.unique_id)
+ return !strcmp(adev->pnp.unique_id, hid_uid.uid);
+ else
+ return !strcmp("0", hid_uid.uid);
+}
+
+static long __init parse_acpi_path(const struct efi_dev_path *node,
+ struct device *parent, struct device **child)
+{
+ struct acpi_hid_uid hid_uid = {};
+ struct device *phys_dev;
+
+ if (node->header.length != 12)
+ return -EINVAL;
+
+ sprintf(hid_uid.hid[0].id, "%c%c%c%04X",
+ 'A' + ((node->acpi.hid >> 10) & 0x1f) - 1,
+ 'A' + ((node->acpi.hid >> 5) & 0x1f) - 1,
+ 'A' + ((node->acpi.hid >> 0) & 0x1f) - 1,
+ node->acpi.hid >> 16);
+ sprintf(hid_uid.uid, "%u", node->acpi.uid);
+
+ *child = bus_find_device(&acpi_bus_type, NULL, &hid_uid,
+ match_acpi_dev);
+ if (!*child)
+ return -ENODEV;
+
+ phys_dev = acpi_get_first_physical_node(to_acpi_device(*child));
+ if (phys_dev) {
+ get_device(phys_dev);
+ put_device(*child);
+ *child = phys_dev;
+ }
+
+ return 0;
+}
+
+static int __init match_pci_dev(struct device *dev, void *data)
+{
+ unsigned int devfn = *(unsigned int *)data;
+
+ return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn;
+}
+
+static long __init parse_pci_path(const struct efi_dev_path *node,
+ struct device *parent, struct device **child)
+{
+ unsigned int devfn;
+
+ if (node->header.length != 6)
+ return -EINVAL;
+ if (!parent)
+ return -EINVAL;
+
+ devfn = PCI_DEVFN(node->pci.dev, node->pci.fn);
+
+ *child = device_find_child(parent, &devfn, match_pci_dev);
+ if (!*child)
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * Insert parsers for further node types here.
+ *
+ * Each parser takes a pointer to the @node and to the @parent (will be NULL
+ * for the first device path node). If a device corresponding to @node was
+ * found below @parent, its reference count should be incremented and the
+ * device returned in @child.
+ *
+ * The return value should be 0 on success or a negative int on failure.
+ * The special return values 0x01 (EFI_DEV_END_INSTANCE) and 0xFF
+ * (EFI_DEV_END_ENTIRE) signal the end of the device path, only
+ * parse_end_path() is supposed to return this.
+ *
+ * Be sure to validate the node length and contents before commencing the
+ * search for a device.
+ */
+
+static long __init parse_end_path(const struct efi_dev_path *node,
+ struct device *parent, struct device **child)
+{
+ if (node->header.length != 4)
+ return -EINVAL;
+ if (node->header.sub_type != EFI_DEV_END_INSTANCE &&
+ node->header.sub_type != EFI_DEV_END_ENTIRE)
+ return -EINVAL;
+ if (!parent)
+ return -ENODEV;
+
+ *child = get_device(parent);
+ return node->header.sub_type;
+}
+
+/**
+ * efi_get_device_by_path - find device by EFI Device Path
+ * @node: EFI Device Path
+ * @len: maximum length of EFI Device Path in bytes
+ *
+ * Parse a series of EFI Device Path nodes at @node and find the corresponding
+ * device. If the device was found, its reference count is incremented and a
+ * pointer to it is returned. The caller needs to drop the reference with
+ * put_device() after use. The @node pointer is updated to point to the
+ * location immediately after the "End of Hardware Device Path" node.
+ *
+ * If another Device Path instance follows, @len is decremented by the number
+ * of bytes consumed. Otherwise @len is set to %0.
+ *
+ * If a Device Path node is malformed or its corresponding device is not found,
+ * @node is updated to point to this offending node and an ERR_PTR is returned.
+ *
+ * If @len is initially %0, the function returns %NULL. Thus, to iterate over
+ * all instances in a path, the following idiom may be used:
+ *
+ * while (!IS_ERR_OR_NULL(dev = efi_get_device_by_path(&node, &len))) {
+ * // do something with dev
+ * put_device(dev);
+ * }
+ * if (IS_ERR(dev))
+ * // report error
+ *
+ * Devices can only be found if they're already instantiated. Most buses
+ * instantiate devices in the "subsys" initcall level, hence the earliest
+ * initcall level in which this function should be called is "fs".
+ *
+ * Returns the device on success or
+ * %ERR_PTR(-ENODEV) if no device was found,
+ * %ERR_PTR(-EINVAL) if a node is malformed or exceeds @len,
+ * %ERR_PTR(-ENOTSUPP) if support for a node type is not yet implemented.
+ */
+struct device * __init efi_get_device_by_path(const struct efi_dev_path **node,
+ size_t *len)
+{
+ struct device *parent = NULL, *child;
+ long ret = 0;
+
+ if (!*len)
+ return NULL;
+
+ while (!ret) {
+ if (*len < 4 || *len < (*node)->header.length)
+ ret = -EINVAL;
+ else if ((*node)->header.type == EFI_DEV_ACPI &&
+ (*node)->header.sub_type == EFI_DEV_BASIC_ACPI)
+ ret = parse_acpi_path(*node, parent, &child);
+ else if ((*node)->header.type == EFI_DEV_HW &&
+ (*node)->header.sub_type == EFI_DEV_PCI)
+ ret = parse_pci_path(*node, parent, &child);
+ else if (((*node)->header.type == EFI_DEV_END_PATH ||
+ (*node)->header.type == EFI_DEV_END_PATH2))
+ ret = parse_end_path(*node, parent, &child);
+ else
+ ret = -ENOTSUPP;
+
+ put_device(parent);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ parent = child;
+ *node = (void *)*node + (*node)->header.length;
+ *len -= (*node)->header.length;
+ }
+
+ if (ret == EFI_DEV_END_ENTIRE)
+ *len = 0;
+
+ return child;
+}
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
new file mode 100644
index 000000000..a52236e11
--- /dev/null
+++ b/drivers/firmware/efi/earlycon.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 Intel Corporation; author Matt Fleming
+ */
+
+#include <linux/console.h>
+#include <linux/efi.h>
+#include <linux/font.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/serial_core.h>
+#include <linux/screen_info.h>
+
+#include <asm/early_ioremap.h>
+
+static const struct console *earlycon_console __initdata;
+static const struct font_desc *font;
+static u32 efi_x, efi_y;
+static u64 fb_base;
+static bool fb_wb;
+static void *efi_fb;
+
+/*
+ * EFI earlycon needs to use early_memremap() to map the framebuffer.
+ * But early_memremap() is not usable for 'earlycon=efifb keep_bootcon',
+ * memremap() should be used instead. memremap() will be available after
+ * paging_init() which is earlier than initcall callbacks. Thus adding this
+ * early initcall function early_efi_map_fb() to map the whole EFI framebuffer.
+ */
+static int __init efi_earlycon_remap_fb(void)
+{
+ /* bail if there is no bootconsole or it has been disabled already */
+ if (!earlycon_console || !(earlycon_console->flags & CON_ENABLED))
+ return 0;
+
+ efi_fb = memremap(fb_base, screen_info.lfb_size,
+ fb_wb ? MEMREMAP_WB : MEMREMAP_WC);
+
+ return efi_fb ? 0 : -ENOMEM;
+}
+early_initcall(efi_earlycon_remap_fb);
+
+static int __init efi_earlycon_unmap_fb(void)
+{
+ /* unmap the bootconsole fb unless keep_bootcon has left it enabled */
+ if (efi_fb && !(earlycon_console->flags & CON_ENABLED))
+ memunmap(efi_fb);
+ return 0;
+}
+late_initcall(efi_earlycon_unmap_fb);
+
+static __ref void *efi_earlycon_map(unsigned long start, unsigned long len)
+{
+ pgprot_t fb_prot;
+
+ if (efi_fb)
+ return efi_fb + start;
+
+ fb_prot = fb_wb ? PAGE_KERNEL : pgprot_writecombine(PAGE_KERNEL);
+ return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot));
+}
+
+static __ref void efi_earlycon_unmap(void *addr, unsigned long len)
+{
+ if (efi_fb)
+ return;
+
+ early_memunmap(addr, len);
+}
+
+static void efi_earlycon_clear_scanline(unsigned int y)
+{
+ unsigned long *dst;
+ u16 len;
+
+ len = screen_info.lfb_linelength;
+ dst = efi_earlycon_map(y*len, len);
+ if (!dst)
+ return;
+
+ memset(dst, 0, len);
+ efi_earlycon_unmap(dst, len);
+}
+
+static void efi_earlycon_scroll_up(void)
+{
+ unsigned long *dst, *src;
+ u16 len;
+ u32 i, height;
+
+ len = screen_info.lfb_linelength;
+ height = screen_info.lfb_height;
+
+ for (i = 0; i < height - font->height; i++) {
+ dst = efi_earlycon_map(i*len, len);
+ if (!dst)
+ return;
+
+ src = efi_earlycon_map((i + font->height) * len, len);
+ if (!src) {
+ efi_earlycon_unmap(dst, len);
+ return;
+ }
+
+ memmove(dst, src, len);
+
+ efi_earlycon_unmap(src, len);
+ efi_earlycon_unmap(dst, len);
+ }
+}
+
+static void efi_earlycon_write_char(u32 *dst, unsigned char c, unsigned int h)
+{
+ const u32 color_black = 0x00000000;
+ const u32 color_white = 0x00ffffff;
+ const u8 *src;
+ int m, n, bytes;
+ u8 x;
+
+ bytes = BITS_TO_BYTES(font->width);
+ src = font->data + c * font->height * bytes + h * bytes;
+
+ for (m = 0; m < font->width; m++) {
+ n = m % 8;
+ x = *(src + m / 8);
+ if ((x >> (7 - n)) & 1)
+ *dst = color_white;
+ else
+ *dst = color_black;
+ dst++;
+ }
+}
+
+static void
+efi_earlycon_write(struct console *con, const char *str, unsigned int num)
+{
+ struct screen_info *si;
+ unsigned int len;
+ const char *s;
+ void *dst;
+
+ si = &screen_info;
+ len = si->lfb_linelength;
+
+ while (num) {
+ unsigned int linemax;
+ unsigned int h, count = 0;
+
+ for (s = str; *s && *s != '\n'; s++) {
+ if (count == num)
+ break;
+ count++;
+ }
+
+ linemax = (si->lfb_width - efi_x) / font->width;
+ if (count > linemax)
+ count = linemax;
+
+ for (h = 0; h < font->height; h++) {
+ unsigned int n, x;
+
+ dst = efi_earlycon_map((efi_y + h) * len, len);
+ if (!dst)
+ return;
+
+ s = str;
+ n = count;
+ x = efi_x;
+
+ while (n-- > 0) {
+ efi_earlycon_write_char(dst + x*4, *s, h);
+ x += font->width;
+ s++;
+ }
+
+ efi_earlycon_unmap(dst, len);
+ }
+
+ num -= count;
+ efi_x += count * font->width;
+ str += count;
+
+ if (num > 0 && *s == '\n') {
+ efi_x = 0;
+ efi_y += font->height;
+ str++;
+ num--;
+ }
+
+ if (efi_x + font->width > si->lfb_width) {
+ efi_x = 0;
+ efi_y += font->height;
+ }
+
+ if (efi_y + font->height > si->lfb_height) {
+ u32 i;
+
+ efi_y -= font->height;
+ efi_earlycon_scroll_up();
+
+ for (i = 0; i < font->height; i++)
+ efi_earlycon_clear_scanline(efi_y + i);
+ }
+ }
+}
+
+static int __init efi_earlycon_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ struct screen_info *si;
+ u16 xres, yres;
+ u32 i;
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ return -ENODEV;
+
+ fb_base = screen_info.lfb_base;
+ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ fb_base |= (u64)screen_info.ext_lfb_base << 32;
+
+ fb_wb = opt && !strcmp(opt, "ram");
+
+ si = &screen_info;
+ xres = si->lfb_width;
+ yres = si->lfb_height;
+
+ /*
+ * efi_earlycon_write_char() implicitly assumes a framebuffer with
+ * 32 bits per pixel.
+ */
+ if (si->lfb_depth != 32)
+ return -ENODEV;
+
+ font = get_default_font(xres, yres, -1, -1);
+ if (!font)
+ return -ENODEV;
+
+ efi_y = rounddown(yres, font->height) - font->height;
+ for (i = 0; i < (yres - efi_y) / font->height; i++)
+ efi_earlycon_scroll_up();
+
+ device->con->write = efi_earlycon_write;
+ earlycon_console = device->con;
+ return 0;
+}
+EARLYCON_DECLARE(efifb, efi_earlycon_setup);
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
new file mode 100644
index 000000000..6aafdb67d
--- /dev/null
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2012 Intel Corporation
+ * Author: Josh Triplett <josh@joshtriplett.org>
+ *
+ * Based on the bgrt driver:
+ * Copyright 2012 Red Hat, Inc <mjg@redhat.com>
+ * Author: Matthew Garrett
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/efi-bgrt.h>
+
+struct acpi_table_bgrt bgrt_tab;
+size_t bgrt_image_size;
+
+struct bmp_header {
+ u16 id;
+ u32 size;
+} __packed;
+
+void __init efi_bgrt_init(struct acpi_table_header *table)
+{
+ void *image;
+ struct bmp_header bmp_header;
+ struct acpi_table_bgrt *bgrt = &bgrt_tab;
+
+ if (acpi_disabled)
+ return;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
+ if (table->length < sizeof(bgrt_tab)) {
+ pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n",
+ table->length, sizeof(bgrt_tab));
+ return;
+ }
+ *bgrt = *(struct acpi_table_bgrt *)table;
+ /*
+ * Only version 1 is defined but some older laptops (seen on Lenovo
+ * Ivy Bridge models) have a correct version 1 BGRT table with the
+ * version set to 0, so we accept version 0 and 1.
+ */
+ if (bgrt->version > 1) {
+ pr_notice("Ignoring BGRT: invalid version %u (expected 1)\n",
+ bgrt->version);
+ goto out;
+ }
+ if (bgrt->image_type != 0) {
+ pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
+ bgrt->image_type);
+ goto out;
+ }
+ if (!bgrt->image_address) {
+ pr_notice("Ignoring BGRT: null image address\n");
+ goto out;
+ }
+
+ if (efi_mem_type(bgrt->image_address) != EFI_BOOT_SERVICES_DATA) {
+ pr_notice("Ignoring BGRT: invalid image address\n");
+ goto out;
+ }
+ image = early_memremap(bgrt->image_address, sizeof(bmp_header));
+ if (!image) {
+ pr_notice("Ignoring BGRT: failed to map image header memory\n");
+ goto out;
+ }
+
+ memcpy(&bmp_header, image, sizeof(bmp_header));
+ early_memunmap(image, sizeof(bmp_header));
+ if (bmp_header.id != 0x4d42) {
+ pr_notice("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n",
+ bmp_header.id);
+ goto out;
+ }
+ bgrt_image_size = bmp_header.size;
+ efi_mem_reserve(bgrt->image_address, bgrt_image_size);
+
+ return;
+out:
+ memset(bgrt, 0, sizeof(bgrt_tab));
+}
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
new file mode 100644
index 000000000..f55a92ff1
--- /dev/null
+++ b/drivers/firmware/efi/efi-init.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ *
+ * Copyright (C) 2013 - 2015 Linaro Ltd.
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/efi.h>
+#include <linux/fwnode.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/mm_types.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+
+#include <asm/efi.h>
+
+static int __init is_memory(efi_memory_desc_t *md)
+{
+ if (md->attribute & (EFI_MEMORY_WB|EFI_MEMORY_WT|EFI_MEMORY_WC))
+ return 1;
+ return 0;
+}
+
+/*
+ * Translate a EFI virtual address into a physical address: this is necessary,
+ * as some data members of the EFI system table are virtually remapped after
+ * SetVirtualAddressMap() has been called.
+ */
+static phys_addr_t __init efi_to_phys(unsigned long addr)
+{
+ efi_memory_desc_t *md;
+
+ for_each_efi_memory_desc(md) {
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0)
+ /* no virtual mapping has been installed by the stub */
+ break;
+ if (md->virt_addr <= addr &&
+ (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
+ return md->phys_addr + addr - md->virt_addr;
+ }
+ return addr;
+}
+
+static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR;
+static __initdata unsigned long cpu_state_table = EFI_INVALID_TABLE_ADDR;
+
+static const efi_config_table_type_t arch_tables[] __initconst = {
+ {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, &screen_info_table},
+ {LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table},
+ {}
+};
+
+static void __init init_screen_info(void)
+{
+ struct screen_info *si;
+
+ if (IS_ENABLED(CONFIG_ARM) &&
+ screen_info_table != EFI_INVALID_TABLE_ADDR) {
+ si = early_memremap_ro(screen_info_table, sizeof(*si));
+ if (!si) {
+ pr_err("Could not map screen_info config table\n");
+ return;
+ }
+ screen_info = *si;
+ early_memunmap(si, sizeof(*si));
+
+ /* dummycon on ARM needs non-zero values for columns/lines */
+ screen_info.orig_video_cols = 80;
+ screen_info.orig_video_lines = 25;
+ }
+
+ if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
+ memblock_is_map_memory(screen_info.lfb_base))
+ memblock_mark_nomap(screen_info.lfb_base, screen_info.lfb_size);
+}
+
+static int __init uefi_init(u64 efi_system_table)
+{
+ efi_config_table_t *config_tables;
+ efi_system_table_t *systab;
+ size_t table_size;
+ int retval;
+
+ systab = early_memremap_ro(efi_system_table, sizeof(efi_system_table_t));
+ if (systab == NULL) {
+ pr_warn("Unable to map EFI system table.\n");
+ return -ENOMEM;
+ }
+
+ set_bit(EFI_BOOT, &efi.flags);
+ if (IS_ENABLED(CONFIG_64BIT))
+ set_bit(EFI_64BIT, &efi.flags);
+
+ retval = efi_systab_check_header(&systab->hdr, 2);
+ if (retval)
+ goto out;
+
+ efi.runtime = systab->runtime;
+ efi.runtime_version = systab->hdr.revision;
+
+ efi_systab_report_header(&systab->hdr, efi_to_phys(systab->fw_vendor));
+
+ table_size = sizeof(efi_config_table_t) * systab->nr_tables;
+ config_tables = early_memremap_ro(efi_to_phys(systab->tables),
+ table_size);
+ if (config_tables == NULL) {
+ pr_warn("Unable to map EFI config table array.\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+ retval = efi_config_parse_tables(config_tables, systab->nr_tables,
+ IS_ENABLED(CONFIG_ARM) ? arch_tables
+ : NULL);
+
+ early_memunmap(config_tables, table_size);
+out:
+ early_memunmap(systab, sizeof(efi_system_table_t));
+ return retval;
+}
+
+/*
+ * Return true for regions that can be used as System RAM.
+ */
+static __init int is_usable_memory(efi_memory_desc_t *md)
+{
+ switch (md->type) {
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_ACPI_RECLAIM_MEMORY:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_CONVENTIONAL_MEMORY:
+ case EFI_PERSISTENT_MEMORY:
+ /*
+ * Special purpose memory is 'soft reserved', which means it
+ * is set aside initially, but can be hotplugged back in or
+ * be assigned to the dax driver after boot.
+ */
+ if (efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ return false;
+
+ /*
+ * According to the spec, these regions are no longer reserved
+ * after calling ExitBootServices(). However, we can only use
+ * them as System RAM if they can be mapped writeback cacheable.
+ */
+ return (md->attribute & EFI_MEMORY_WB);
+ default:
+ break;
+ }
+ return false;
+}
+
+static __init void reserve_regions(void)
+{
+ efi_memory_desc_t *md;
+ u64 paddr, npages, size;
+
+ if (efi_enabled(EFI_DBG))
+ pr_info("Processing EFI memory map:\n");
+
+ /*
+ * Discard memblocks discovered so far: if there are any at this
+ * point, they originate from memory nodes in the DT, and UEFI
+ * uses its own memory map instead.
+ */
+ memblock_dump_all();
+ memblock_remove(0, PHYS_ADDR_MAX);
+
+ for_each_efi_memory_desc(md) {
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+
+ if (efi_enabled(EFI_DBG)) {
+ char buf[64];
+
+ pr_info(" 0x%012llx-0x%012llx %s\n",
+ paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
+ efi_md_typeattr_format(buf, sizeof(buf), md));
+ }
+
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
+
+ if (is_memory(md)) {
+ early_init_dt_add_memory_arch(paddr, size);
+
+ if (!is_usable_memory(md))
+ memblock_mark_nomap(paddr, size);
+
+ /* keep ACPI reclaim memory intact for kexec etc. */
+ if (md->type == EFI_ACPI_RECLAIM_MEMORY)
+ memblock_reserve(paddr, size);
+ }
+ }
+}
+
+void __init efi_init(void)
+{
+ struct efi_memory_map_data data;
+ u64 efi_system_table;
+
+ /* Grab UEFI information placed in FDT by stub */
+ efi_system_table = efi_get_fdt_params(&data);
+ if (!efi_system_table)
+ return;
+
+ if (efi_memmap_init_early(&data) < 0) {
+ /*
+ * If we are booting via UEFI, the UEFI memory map is the only
+ * description of memory we have, so there is little point in
+ * proceeding if we cannot access it.
+ */
+ panic("Unable to map EFI memory map.\n");
+ }
+
+ WARN(efi.memmap.desc_version != 1,
+ "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
+ efi.memmap.desc_version);
+
+ if (uefi_init(efi_system_table) < 0) {
+ efi_memmap_unmap();
+ return;
+ }
+
+ reserve_regions();
+ efi_esrt_init();
+ efi_mokvar_table_init();
+
+ memblock_reserve(data.phys_map & PAGE_MASK,
+ PAGE_ALIGN(data.size + (data.phys_map & ~PAGE_MASK)));
+
+ init_screen_info();
+
+#ifdef CONFIG_ARM
+ /* ARM does not permit early mappings to persist across paging_init() */
+ efi_memmap_unmap();
+
+ if (cpu_state_table != EFI_INVALID_TABLE_ADDR) {
+ struct efi_arm_entry_state *state;
+ bool dump_state = true;
+
+ state = early_memremap_ro(cpu_state_table,
+ sizeof(struct efi_arm_entry_state));
+ if (state == NULL) {
+ pr_warn("Unable to map CPU entry state table.\n");
+ return;
+ }
+
+ if ((state->sctlr_before_ebs & 1) == 0)
+ pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n");
+ else if ((state->sctlr_after_ebs & 1) == 0)
+ pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n");
+ else
+ dump_state = false;
+
+ if (dump_state || efi_enabled(EFI_DBG)) {
+ pr_info("CPSR at EFI stub entry : 0x%08x\n", state->cpsr_before_ebs);
+ pr_info("SCTLR at EFI stub entry : 0x%08x\n", state->sctlr_before_ebs);
+ pr_info("CPSR after ExitBootServices() : 0x%08x\n", state->cpsr_after_ebs);
+ pr_info("SCTLR after ExitBootServices(): 0x%08x\n", state->sctlr_after_ebs);
+ }
+ early_memunmap(state, sizeof(struct efi_arm_entry_state));
+ }
+#endif
+}
+
+static bool efifb_overlaps_pci_range(const struct of_pci_range *range)
+{
+ u64 fb_base = screen_info.lfb_base;
+
+ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32;
+
+ return fb_base >= range->cpu_addr &&
+ fb_base < (range->cpu_addr + range->size);
+}
+
+static struct device_node *find_pci_overlap_node(void)
+{
+ struct device_node *np;
+
+ for_each_node_by_type(np, "pci") {
+ struct of_pci_range_parser parser;
+ struct of_pci_range range;
+ int err;
+
+ err = of_pci_range_parser_init(&parser, np);
+ if (err) {
+ pr_warn("of_pci_range_parser_init() failed: %d\n", err);
+ continue;
+ }
+
+ for_each_of_pci_range(&parser, &range)
+ if (efifb_overlaps_pci_range(&range))
+ return np;
+ }
+ return NULL;
+}
+
+/*
+ * If the efifb framebuffer is backed by a PCI graphics controller, we have
+ * to ensure that this relation is expressed using a device link when
+ * running in DT mode, or the probe order may be reversed, resulting in a
+ * resource reservation conflict on the memory window that the efifb
+ * framebuffer steals from the PCIe host bridge.
+ */
+static int efifb_add_links(const struct fwnode_handle *fwnode,
+ struct device *dev)
+{
+ struct device_node *sup_np;
+ struct device *sup_dev;
+
+ sup_np = find_pci_overlap_node();
+
+ /*
+ * If there's no PCI graphics controller backing the efifb, we are
+ * done here.
+ */
+ if (!sup_np)
+ return 0;
+
+ sup_dev = get_dev_from_fwnode(&sup_np->fwnode);
+ of_node_put(sup_np);
+
+ /*
+ * Return -ENODEV if the PCI graphics controller device hasn't been
+ * registered yet. This ensures that efifb isn't allowed to probe
+ * and this function is retried again when new devices are
+ * registered.
+ */
+ if (!sup_dev)
+ return -ENODEV;
+
+ /*
+ * If this fails, retrying this function at a later point won't
+ * change anything. So, don't return an error after this.
+ */
+ if (!device_link_add(dev, sup_dev, fw_devlink_get_flags()))
+ dev_warn(dev, "device_link_add() failed\n");
+
+ put_device(sup_dev);
+
+ return 0;
+}
+
+static const struct fwnode_operations efifb_fwnode_ops = {
+ .add_links = efifb_add_links,
+};
+
+static struct fwnode_handle efifb_fwnode = {
+ .ops = &efifb_fwnode_ops,
+};
+
+static int __init register_gop_device(void)
+{
+ struct platform_device *pd;
+ int err;
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ return 0;
+
+ pd = platform_device_alloc("efi-framebuffer", 0);
+ if (!pd)
+ return -ENOMEM;
+
+ if (IS_ENABLED(CONFIG_PCI))
+ pd->dev.fwnode = &efifb_fwnode;
+
+ err = platform_device_add_data(pd, &screen_info, sizeof(screen_info));
+ if (err)
+ return err;
+
+ return platform_device_add(pd);
+}
+subsys_initcall(register_gop_device);
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
new file mode 100644
index 000000000..7e771c56c
--- /dev/null
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/pstore.h>
+#include <linux/slab.h>
+#include <linux/ucs2_string.h>
+
+#define DUMP_NAME_LEN 66
+
+#define EFIVARS_DATA_SIZE_MAX 1024
+
+static bool efivars_pstore_disable =
+ IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
+
+module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
+
+#define PSTORE_EFI_ATTRIBUTES \
+ (EFI_VARIABLE_NON_VOLATILE | \
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+ EFI_VARIABLE_RUNTIME_ACCESS)
+
+static LIST_HEAD(efi_pstore_list);
+static DECLARE_WORK(efivar_work, NULL);
+
+static int efi_pstore_open(struct pstore_info *psi)
+{
+ psi->data = NULL;
+ return 0;
+}
+
+static int efi_pstore_close(struct pstore_info *psi)
+{
+ psi->data = NULL;
+ return 0;
+}
+
+static inline u64 generic_id(u64 timestamp, unsigned int part, int count)
+{
+ return (timestamp * 100 + part) * 1000 + count;
+}
+
+static int efi_pstore_read_func(struct efivar_entry *entry,
+ struct pstore_record *record)
+{
+ efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+ char name[DUMP_NAME_LEN], data_type;
+ int i;
+ int cnt;
+ unsigned int part;
+ unsigned long size;
+ u64 time;
+
+ if (efi_guidcmp(entry->var.VendorGuid, vendor))
+ return 0;
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ name[i] = entry->var.VariableName[i];
+
+ if (sscanf(name, "dump-type%u-%u-%d-%llu-%c",
+ &record->type, &part, &cnt, &time, &data_type) == 5) {
+ record->id = generic_id(time, part, cnt);
+ record->part = part;
+ record->count = cnt;
+ record->time.tv_sec = time;
+ record->time.tv_nsec = 0;
+ if (data_type == 'C')
+ record->compressed = true;
+ else
+ record->compressed = false;
+ record->ecc_notice_size = 0;
+ } else if (sscanf(name, "dump-type%u-%u-%d-%llu",
+ &record->type, &part, &cnt, &time) == 4) {
+ record->id = generic_id(time, part, cnt);
+ record->part = part;
+ record->count = cnt;
+ record->time.tv_sec = time;
+ record->time.tv_nsec = 0;
+ record->compressed = false;
+ record->ecc_notice_size = 0;
+ } else if (sscanf(name, "dump-type%u-%u-%llu",
+ &record->type, &part, &time) == 3) {
+ /*
+ * Check if an old format,
+ * which doesn't support holding
+ * multiple logs, remains.
+ */
+ record->id = generic_id(time, part, 0);
+ record->part = part;
+ record->count = 0;
+ record->time.tv_sec = time;
+ record->time.tv_nsec = 0;
+ record->compressed = false;
+ record->ecc_notice_size = 0;
+ } else
+ return 0;
+
+ entry->var.DataSize = 1024;
+ __efivar_entry_get(entry, &entry->var.Attributes,
+ &entry->var.DataSize, entry->var.Data);
+ size = entry->var.DataSize;
+ memcpy(record->buf, entry->var.Data,
+ (size_t)min_t(unsigned long, EFIVARS_DATA_SIZE_MAX, size));
+
+ return size;
+}
+
+/**
+ * efi_pstore_scan_sysfs_enter
+ * @pos: scanning entry
+ * @next: next entry
+ * @head: list head
+ */
+static void efi_pstore_scan_sysfs_enter(struct efivar_entry *pos,
+ struct efivar_entry *next,
+ struct list_head *head)
+{
+ pos->scanning = true;
+ if (&next->list != head)
+ next->scanning = true;
+}
+
+/**
+ * __efi_pstore_scan_sysfs_exit
+ * @entry: deleting entry
+ * @turn_off_scanning: Check if a scanning flag should be turned off
+ */
+static inline int __efi_pstore_scan_sysfs_exit(struct efivar_entry *entry,
+ bool turn_off_scanning)
+{
+ if (entry->deleting) {
+ list_del(&entry->list);
+ efivar_entry_iter_end();
+ kfree(entry);
+ if (efivar_entry_iter_begin())
+ return -EINTR;
+ } else if (turn_off_scanning)
+ entry->scanning = false;
+
+ return 0;
+}
+
+/**
+ * efi_pstore_scan_sysfs_exit
+ * @pos: scanning entry
+ * @next: next entry
+ * @head: list head
+ * @stop: a flag checking if scanning will stop
+ */
+static int efi_pstore_scan_sysfs_exit(struct efivar_entry *pos,
+ struct efivar_entry *next,
+ struct list_head *head, bool stop)
+{
+ int ret = __efi_pstore_scan_sysfs_exit(pos, true);
+
+ if (ret)
+ return ret;
+
+ if (stop)
+ ret = __efi_pstore_scan_sysfs_exit(next, &next->list != head);
+ return ret;
+}
+
+/**
+ * efi_pstore_sysfs_entry_iter
+ *
+ * @record: pstore record to pass to callback
+ *
+ * You MUST call efivar_entry_iter_begin() before this function, and
+ * efivar_entry_iter_end() afterwards.
+ *
+ */
+static int efi_pstore_sysfs_entry_iter(struct pstore_record *record)
+{
+ struct efivar_entry **pos = (struct efivar_entry **)&record->psi->data;
+ struct efivar_entry *entry, *n;
+ struct list_head *head = &efi_pstore_list;
+ int size = 0;
+ int ret;
+
+ if (!*pos) {
+ list_for_each_entry_safe(entry, n, head, list) {
+ efi_pstore_scan_sysfs_enter(entry, n, head);
+
+ size = efi_pstore_read_func(entry, record);
+ ret = efi_pstore_scan_sysfs_exit(entry, n, head,
+ size < 0);
+ if (ret)
+ return ret;
+ if (size)
+ break;
+ }
+ *pos = n;
+ return size;
+ }
+
+ list_for_each_entry_safe_from((*pos), n, head, list) {
+ efi_pstore_scan_sysfs_enter((*pos), n, head);
+
+ size = efi_pstore_read_func((*pos), record);
+ ret = efi_pstore_scan_sysfs_exit((*pos), n, head, size < 0);
+ if (ret)
+ return ret;
+ if (size)
+ break;
+ }
+ *pos = n;
+ return size;
+}
+
+/**
+ * efi_pstore_read
+ *
+ * This function returns a size of NVRAM entry logged via efi_pstore_write().
+ * The meaning and behavior of efi_pstore/pstore are as below.
+ *
+ * size > 0: Got data of an entry logged via efi_pstore_write() successfully,
+ * and pstore filesystem will continue reading subsequent entries.
+ * size == 0: Entry was not logged via efi_pstore_write(),
+ * and efi_pstore driver will continue reading subsequent entries.
+ * size < 0: Failed to get data of entry logging via efi_pstore_write(),
+ * and pstore will stop reading entry.
+ */
+static ssize_t efi_pstore_read(struct pstore_record *record)
+{
+ ssize_t size;
+
+ record->buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
+ if (!record->buf)
+ return -ENOMEM;
+
+ if (efivar_entry_iter_begin()) {
+ size = -EINTR;
+ goto out;
+ }
+ size = efi_pstore_sysfs_entry_iter(record);
+ efivar_entry_iter_end();
+
+out:
+ if (size <= 0) {
+ kfree(record->buf);
+ record->buf = NULL;
+ }
+ return size;
+}
+
+static int efi_pstore_write(struct pstore_record *record)
+{
+ char name[DUMP_NAME_LEN];
+ efi_char16_t efi_name[DUMP_NAME_LEN];
+ efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+ int i, ret = 0;
+
+ record->id = generic_id(record->time.tv_sec, record->part,
+ record->count);
+
+ /* Since we copy the entire length of name, make sure it is wiped. */
+ memset(name, 0, sizeof(name));
+
+ snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lld-%c",
+ record->type, record->part, record->count,
+ (long long)record->time.tv_sec,
+ record->compressed ? 'C' : 'D');
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ efi_name[i] = name[i];
+
+ ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
+ false, record->size, record->psi->buf);
+
+ if (record->reason == KMSG_DUMP_OOPS && try_module_get(THIS_MODULE))
+ if (!schedule_work(&efivar_work))
+ module_put(THIS_MODULE);
+
+ return ret;
+};
+
+/*
+ * Clean up an entry with the same name
+ */
+static int efi_pstore_erase_func(struct efivar_entry *entry, void *data)
+{
+ efi_char16_t *efi_name = data;
+ efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+ unsigned long ucs2_len = ucs2_strlen(efi_name);
+
+ if (efi_guidcmp(entry->var.VendorGuid, vendor))
+ return 0;
+
+ if (ucs2_strncmp(entry->var.VariableName, efi_name, (size_t)ucs2_len))
+ return 0;
+
+ if (entry->scanning) {
+ /*
+ * Skip deletion because this entry will be deleted
+ * after scanning is completed.
+ */
+ entry->deleting = true;
+ } else
+ list_del(&entry->list);
+
+ /* found */
+ __efivar_entry_delete(entry);
+
+ return 1;
+}
+
+static int efi_pstore_erase_name(const char *name)
+{
+ struct efivar_entry *entry = NULL;
+ efi_char16_t efi_name[DUMP_NAME_LEN];
+ int found, i;
+
+ for (i = 0; i < DUMP_NAME_LEN; i++) {
+ efi_name[i] = name[i];
+ if (name[i] == '\0')
+ break;
+ }
+
+ if (efivar_entry_iter_begin())
+ return -EINTR;
+
+ found = __efivar_entry_iter(efi_pstore_erase_func, &efi_pstore_list,
+ efi_name, &entry);
+ efivar_entry_iter_end();
+
+ if (found && !entry->scanning)
+ kfree(entry);
+
+ return found ? 0 : -ENOENT;
+}
+
+static int efi_pstore_erase(struct pstore_record *record)
+{
+ char name[DUMP_NAME_LEN];
+ int ret;
+
+ snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lld",
+ record->type, record->part, record->count,
+ (long long)record->time.tv_sec);
+ ret = efi_pstore_erase_name(name);
+ if (ret != -ENOENT)
+ return ret;
+
+ snprintf(name, sizeof(name), "dump-type%u-%u-%lld",
+ record->type, record->part, (long long)record->time.tv_sec);
+ ret = efi_pstore_erase_name(name);
+
+ return ret;
+}
+
+static struct pstore_info efi_pstore_info = {
+ .owner = THIS_MODULE,
+ .name = "efi",
+ .flags = PSTORE_FLAGS_DMESG,
+ .open = efi_pstore_open,
+ .close = efi_pstore_close,
+ .read = efi_pstore_read,
+ .write = efi_pstore_write,
+ .erase = efi_pstore_erase,
+};
+
+static int efi_pstore_callback(efi_char16_t *name, efi_guid_t vendor,
+ unsigned long name_size, void *data)
+{
+ struct efivar_entry *entry;
+ int ret;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ memcpy(entry->var.VariableName, name, name_size);
+ entry->var.VendorGuid = vendor;
+
+ ret = efivar_entry_add(entry, &efi_pstore_list);
+ if (ret)
+ kfree(entry);
+
+ return ret;
+}
+
+static int efi_pstore_update_entry(efi_char16_t *name, efi_guid_t vendor,
+ unsigned long name_size, void *data)
+{
+ struct efivar_entry *entry = data;
+
+ if (efivar_entry_find(name, vendor, &efi_pstore_list, false))
+ return 0;
+
+ memcpy(entry->var.VariableName, name, name_size);
+ memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
+
+ return 1;
+}
+
+static void efi_pstore_update_entries(struct work_struct *work)
+{
+ struct efivar_entry *entry;
+ int err;
+
+ /* Add new sysfs entries */
+ while (1) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return;
+
+ err = efivar_init(efi_pstore_update_entry, entry,
+ false, &efi_pstore_list);
+ if (!err)
+ break;
+
+ efivar_entry_add(entry, &efi_pstore_list);
+ }
+
+ kfree(entry);
+ module_put(THIS_MODULE);
+}
+
+static __init int efivars_pstore_init(void)
+{
+ int ret;
+
+ if (!efivars_kobject() || !efivar_supports_writes())
+ return 0;
+
+ if (efivars_pstore_disable)
+ return 0;
+
+ ret = efivar_init(efi_pstore_callback, NULL, true, &efi_pstore_list);
+ if (ret)
+ return ret;
+
+ efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
+ if (!efi_pstore_info.buf)
+ return -ENOMEM;
+
+ efi_pstore_info.bufsize = 1024;
+
+ if (pstore_register(&efi_pstore_info)) {
+ kfree(efi_pstore_info.buf);
+ efi_pstore_info.buf = NULL;
+ efi_pstore_info.bufsize = 0;
+ }
+
+ INIT_WORK(&efivar_work, efi_pstore_update_entries);
+
+ return 0;
+}
+
+static __exit void efivars_pstore_exit(void)
+{
+ if (!efi_pstore_info.bufsize)
+ return;
+
+ pstore_unregister(&efi_pstore_info);
+ kfree(efi_pstore_info.buf);
+ efi_pstore_info.buf = NULL;
+ efi_pstore_info.bufsize = 0;
+}
+
+module_init(efivars_pstore_init);
+module_exit(efivars_pstore_exit);
+
+MODULE_DESCRIPTION("EFI variable backend for pstore");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:efivars");
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
new file mode 100644
index 000000000..332739f3e
--- /dev/null
+++ b/drivers/firmware/efi/efi.c
@@ -0,0 +1,1049 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * efi.c - EFI subsystem
+ *
+ * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
+ * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
+ * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
+ *
+ * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
+ * allowing the efivarfs to be mounted or the efivars module to be loaded.
+ * The existance of /sys/firmware/efi may also be used by userspace to
+ * determine that the system supports EFI.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/efi.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/kexec.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/ucs2_string.h>
+#include <linux/memblock.h>
+#include <linux/security.h>
+
+#include <asm/early_ioremap.h>
+
+struct efi __read_mostly efi = {
+ .runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
+ .acpi = EFI_INVALID_TABLE_ADDR,
+ .acpi20 = EFI_INVALID_TABLE_ADDR,
+ .smbios = EFI_INVALID_TABLE_ADDR,
+ .smbios3 = EFI_INVALID_TABLE_ADDR,
+ .esrt = EFI_INVALID_TABLE_ADDR,
+ .tpm_log = EFI_INVALID_TABLE_ADDR,
+ .tpm_final_log = EFI_INVALID_TABLE_ADDR,
+#ifdef CONFIG_LOAD_UEFI_KEYS
+ .mokvar_table = EFI_INVALID_TABLE_ADDR,
+#endif
+};
+EXPORT_SYMBOL(efi);
+
+unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
+static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
+static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
+
+struct mm_struct efi_mm = {
+ .mm_rb = RB_ROOT,
+ .mm_users = ATOMIC_INIT(2),
+ .mm_count = ATOMIC_INIT(1),
+ .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
+ MMAP_LOCK_INITIALIZER(efi_mm)
+ .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
+ .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
+ .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
+};
+
+struct workqueue_struct *efi_rts_wq;
+
+static bool disable_runtime;
+static int __init setup_noefi(char *arg)
+{
+ disable_runtime = true;
+ return 0;
+}
+early_param("noefi", setup_noefi);
+
+bool efi_runtime_disabled(void)
+{
+ return disable_runtime;
+}
+
+bool __pure __efi_soft_reserve_enabled(void)
+{
+ return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
+}
+
+static int __init parse_efi_cmdline(char *str)
+{
+ if (!str) {
+ pr_warn("need at least one option\n");
+ return -EINVAL;
+ }
+
+ if (parse_option_str(str, "debug"))
+ set_bit(EFI_DBG, &efi.flags);
+
+ if (parse_option_str(str, "noruntime"))
+ disable_runtime = true;
+
+ if (parse_option_str(str, "nosoftreserve"))
+ set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
+
+ return 0;
+}
+early_param("efi", parse_efi_cmdline);
+
+struct kobject *efi_kobj;
+
+/*
+ * Let's not leave out systab information that snuck into
+ * the efivars driver
+ * Note, do not add more fields in systab sysfs file as it breaks sysfs
+ * one value per file rule!
+ */
+static ssize_t systab_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char *str = buf;
+
+ if (!kobj || !buf)
+ return -EINVAL;
+
+ if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
+ if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
+ /*
+ * If both SMBIOS and SMBIOS3 entry points are implemented, the
+ * SMBIOS3 entry point shall be preferred, so we list it first to
+ * let applications stop parsing after the first match.
+ */
+ if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
+ if (efi.smbios != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
+
+ if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
+ str = efi_systab_show_arch(str);
+
+ return str - buf;
+}
+
+static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
+
+static ssize_t fw_platform_size_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
+}
+
+extern __weak struct kobj_attribute efi_attr_fw_vendor;
+extern __weak struct kobj_attribute efi_attr_runtime;
+extern __weak struct kobj_attribute efi_attr_config_table;
+static struct kobj_attribute efi_attr_fw_platform_size =
+ __ATTR_RO(fw_platform_size);
+
+static struct attribute *efi_subsys_attrs[] = {
+ &efi_attr_systab.attr,
+ &efi_attr_fw_platform_size.attr,
+ &efi_attr_fw_vendor.attr,
+ &efi_attr_runtime.attr,
+ &efi_attr_config_table.attr,
+ NULL,
+};
+
+umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ return attr->mode;
+}
+
+static const struct attribute_group efi_subsys_attr_group = {
+ .attrs = efi_subsys_attrs,
+ .is_visible = efi_attr_is_visible,
+};
+
+static struct efivars generic_efivars;
+static struct efivar_operations generic_ops;
+
+static int generic_ops_register(void)
+{
+ generic_ops.get_variable = efi.get_variable;
+ generic_ops.get_next_variable = efi.get_next_variable;
+ generic_ops.query_variable_store = efi_query_variable_store;
+
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
+ generic_ops.set_variable = efi.set_variable;
+ generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
+ }
+ return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
+}
+
+static void generic_ops_unregister(void)
+{
+ efivars_unregister(&generic_efivars);
+}
+
+#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
+#define EFIVAR_SSDT_NAME_MAX 16
+static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
+static int __init efivar_ssdt_setup(char *str)
+{
+ int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
+
+ if (ret)
+ return ret;
+
+ if (strlen(str) < sizeof(efivar_ssdt))
+ memcpy(efivar_ssdt, str, strlen(str));
+ else
+ pr_warn("efivar_ssdt: name too long: %s\n", str);
+ return 1;
+}
+__setup("efivar_ssdt=", efivar_ssdt_setup);
+
+static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
+ unsigned long name_size, void *data)
+{
+ struct efivar_entry *entry;
+ struct list_head *list = data;
+ char utf8_name[EFIVAR_SSDT_NAME_MAX];
+ int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
+
+ ucs2_as_utf8(utf8_name, name, limit - 1);
+ if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
+ return 0;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return 0;
+
+ memcpy(entry->var.VariableName, name, name_size);
+ memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
+
+ efivar_entry_add(entry, list);
+
+ return 0;
+}
+
+static __init int efivar_ssdt_load(void)
+{
+ LIST_HEAD(entries);
+ struct efivar_entry *entry, *aux;
+ unsigned long size;
+ void *data;
+ int ret;
+
+ if (!efivar_ssdt[0])
+ return 0;
+
+ ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
+
+ list_for_each_entry_safe(entry, aux, &entries, list) {
+ pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
+ &entry->var.VendorGuid);
+
+ list_del(&entry->list);
+
+ ret = efivar_entry_size(entry, &size);
+ if (ret) {
+ pr_err("failed to get var size\n");
+ goto free_entry;
+ }
+
+ data = kmalloc(size, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto free_entry;
+ }
+
+ ret = efivar_entry_get(entry, NULL, &size, data);
+ if (ret) {
+ pr_err("failed to get var data\n");
+ goto free_data;
+ }
+
+ ret = acpi_load_table(data, NULL);
+ if (ret) {
+ pr_err("failed to load table: %d\n", ret);
+ goto free_data;
+ }
+
+ goto free_entry;
+
+free_data:
+ kfree(data);
+
+free_entry:
+ kfree(entry);
+ }
+
+ return ret;
+}
+#else
+static inline int efivar_ssdt_load(void) { return 0; }
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+#define EFI_DEBUGFS_MAX_BLOBS 32
+
+static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
+
+static void __init efi_debugfs_init(void)
+{
+ struct dentry *efi_debugfs;
+ efi_memory_desc_t *md;
+ char name[32];
+ int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
+ int i = 0;
+
+ efi_debugfs = debugfs_create_dir("efi", NULL);
+ if (IS_ERR_OR_NULL(efi_debugfs))
+ return;
+
+ for_each_efi_memory_desc(md) {
+ switch (md->type) {
+ case EFI_BOOT_SERVICES_CODE:
+ snprintf(name, sizeof(name), "boot_services_code%d",
+ type_count[md->type]++);
+ break;
+ case EFI_BOOT_SERVICES_DATA:
+ snprintf(name, sizeof(name), "boot_services_data%d",
+ type_count[md->type]++);
+ break;
+ default:
+ continue;
+ }
+
+ if (i >= EFI_DEBUGFS_MAX_BLOBS) {
+ pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
+ EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
+ break;
+ }
+
+ debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
+ debugfs_blob[i].data = memremap(md->phys_addr,
+ debugfs_blob[i].size,
+ MEMREMAP_WB);
+ if (!debugfs_blob[i].data)
+ continue;
+
+ debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
+ i++;
+ }
+}
+#else
+static inline void efi_debugfs_init(void) {}
+#endif
+
+/*
+ * We register the efi subsystem with the firmware subsystem and the
+ * efivars subsystem with the efi subsystem, if the system was booted with
+ * EFI.
+ */
+static int __init efisubsys_init(void)
+{
+ int error;
+
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ efi.runtime_supported_mask = 0;
+
+ if (!efi_enabled(EFI_BOOT))
+ return 0;
+
+ if (efi.runtime_supported_mask) {
+ /*
+ * Since we process only one efi_runtime_service() at a time, an
+ * ordered workqueue (which creates only one execution context)
+ * should suffice for all our needs.
+ */
+ efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
+ if (!efi_rts_wq) {
+ pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ efi.runtime_supported_mask = 0;
+ return 0;
+ }
+ }
+
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
+ platform_device_register_simple("rtc-efi", 0, NULL, 0);
+
+ /* We register the efi directory at /sys/firmware/efi */
+ efi_kobj = kobject_create_and_add("efi", firmware_kobj);
+ if (!efi_kobj) {
+ pr_err("efi: Firmware registration failed.\n");
+ error = -ENOMEM;
+ goto err_destroy_wq;
+ }
+
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
+ EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
+ error = generic_ops_register();
+ if (error)
+ goto err_put;
+ efivar_ssdt_load();
+ platform_device_register_simple("efivars", 0, NULL, 0);
+ }
+
+ error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
+ if (error) {
+ pr_err("efi: Sysfs attribute export failed with error %d.\n",
+ error);
+ goto err_unregister;
+ }
+
+ error = efi_runtime_map_init(efi_kobj);
+ if (error)
+ goto err_remove_group;
+
+ /* and the standard mountpoint for efivarfs */
+ error = sysfs_create_mount_point(efi_kobj, "efivars");
+ if (error) {
+ pr_err("efivars: Subsystem registration failed.\n");
+ goto err_remove_group;
+ }
+
+ if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
+ efi_debugfs_init();
+
+ return 0;
+
+err_remove_group:
+ sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
+err_unregister:
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
+ EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
+ generic_ops_unregister();
+err_put:
+ kobject_put(efi_kobj);
+err_destroy_wq:
+ if (efi_rts_wq)
+ destroy_workqueue(efi_rts_wq);
+
+ return error;
+}
+
+subsys_initcall(efisubsys_init);
+
+/*
+ * Find the efi memory descriptor for a given physical address. Given a
+ * physical address, determine if it exists within an EFI Memory Map entry,
+ * and if so, populate the supplied memory descriptor with the appropriate
+ * data.
+ */
+int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+{
+ efi_memory_desc_t *md;
+
+ if (!efi_enabled(EFI_MEMMAP)) {
+ pr_err_once("EFI_MEMMAP is not enabled.\n");
+ return -EINVAL;
+ }
+
+ if (!out_md) {
+ pr_err_once("out_md is null.\n");
+ return -EINVAL;
+ }
+
+ for_each_efi_memory_desc(md) {
+ u64 size;
+ u64 end;
+
+ size = md->num_pages << EFI_PAGE_SHIFT;
+ end = md->phys_addr + size;
+ if (phys_addr >= md->phys_addr && phys_addr < end) {
+ memcpy(out_md, md, sizeof(*out_md));
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+/*
+ * Calculate the highest address of an efi memory descriptor.
+ */
+u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
+{
+ u64 size = md->num_pages << EFI_PAGE_SHIFT;
+ u64 end = md->phys_addr + size;
+ return end;
+}
+
+void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
+
+/**
+ * efi_mem_reserve - Reserve an EFI memory region
+ * @addr: Physical address to reserve
+ * @size: Size of reservation
+ *
+ * Mark a region as reserved from general kernel allocation and
+ * prevent it being released by efi_free_boot_services().
+ *
+ * This function should be called drivers once they've parsed EFI
+ * configuration tables to figure out where their data lives, e.g.
+ * efi_esrt_init().
+ */
+void __init efi_mem_reserve(phys_addr_t addr, u64 size)
+{
+ if (!memblock_is_region_reserved(addr, size))
+ memblock_reserve(addr, size);
+
+ /*
+ * Some architectures (x86) reserve all boot services ranges
+ * until efi_free_boot_services() because of buggy firmware
+ * implementations. This means the above memblock_reserve() is
+ * superfluous on x86 and instead what it needs to do is
+ * ensure the @start, @size is not freed.
+ */
+ efi_arch_mem_reserve(addr, size);
+}
+
+static const efi_config_table_type_t common_tables[] __initconst = {
+ {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" },
+ {ACPI_TABLE_GUID, &efi.acpi, "ACPI" },
+ {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" },
+ {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" },
+ {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" },
+ {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" },
+ {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" },
+ {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" },
+ {LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" },
+ {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
+ {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
+#ifdef CONFIG_EFI_RCI2_TABLE
+ {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
+#endif
+#ifdef CONFIG_LOAD_UEFI_KEYS
+ {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" },
+#endif
+ {},
+};
+
+static __init int match_config_table(const efi_guid_t *guid,
+ unsigned long table,
+ const efi_config_table_type_t *table_types)
+{
+ int i;
+
+ for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
+ if (!efi_guidcmp(*guid, table_types[i].guid)) {
+ *(table_types[i].ptr) = table;
+ if (table_types[i].name[0])
+ pr_cont("%s=0x%lx ",
+ table_types[i].name, table);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
+ int count,
+ const efi_config_table_type_t *arch_tables)
+{
+ const efi_config_table_64_t *tbl64 = (void *)config_tables;
+ const efi_config_table_32_t *tbl32 = (void *)config_tables;
+ const efi_guid_t *guid;
+ unsigned long table;
+ int i;
+
+ pr_info("");
+ for (i = 0; i < count; i++) {
+ if (!IS_ENABLED(CONFIG_X86)) {
+ guid = &config_tables[i].guid;
+ table = (unsigned long)config_tables[i].table;
+ } else if (efi_enabled(EFI_64BIT)) {
+ guid = &tbl64[i].guid;
+ table = tbl64[i].table;
+
+ if (IS_ENABLED(CONFIG_X86_32) &&
+ tbl64[i].table > U32_MAX) {
+ pr_cont("\n");
+ pr_err("Table located above 4GB, disabling EFI.\n");
+ return -EINVAL;
+ }
+ } else {
+ guid = &tbl32[i].guid;
+ table = tbl32[i].table;
+ }
+
+ if (!match_config_table(guid, table, common_tables) && arch_tables)
+ match_config_table(guid, table, arch_tables);
+ }
+ pr_cont("\n");
+ set_bit(EFI_CONFIG_TABLES, &efi.flags);
+
+ if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
+ struct linux_efi_random_seed *seed;
+ u32 size = 0;
+
+ seed = early_memremap(efi_rng_seed, sizeof(*seed));
+ if (seed != NULL) {
+ size = min_t(u32, seed->size, SZ_1K); // sanity check
+ early_memunmap(seed, sizeof(*seed));
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ if (size > 0) {
+ seed = early_memremap(efi_rng_seed,
+ sizeof(*seed) + size);
+ if (seed != NULL) {
+ add_bootloader_randomness(seed->bits, size);
+ memzero_explicit(seed->bits, size);
+ early_memunmap(seed, sizeof(*seed) + size);
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ }
+ }
+
+ if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
+ efi_memattr_init();
+
+ efi_tpm_eventlog_init();
+
+ if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
+ unsigned long prsv = mem_reserve;
+
+ while (prsv) {
+ struct linux_efi_memreserve *rsv;
+ u8 *p;
+
+ /*
+ * Just map a full page: that is what we will get
+ * anyway, and it permits us to map the entire entry
+ * before knowing its size.
+ */
+ p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
+ PAGE_SIZE);
+ if (p == NULL) {
+ pr_err("Could not map UEFI memreserve entry!\n");
+ return -ENOMEM;
+ }
+
+ rsv = (void *)(p + prsv % PAGE_SIZE);
+
+ /* reserve the entry itself */
+ memblock_reserve(prsv,
+ struct_size(rsv, entry, rsv->size));
+
+ for (i = 0; i < atomic_read(&rsv->count); i++) {
+ memblock_reserve(rsv->entry[i].base,
+ rsv->entry[i].size);
+ }
+
+ prsv = rsv->next;
+ early_memunmap(p, PAGE_SIZE);
+ }
+ }
+
+ if (rt_prop != EFI_INVALID_TABLE_ADDR) {
+ efi_rt_properties_table_t *tbl;
+
+ tbl = early_memremap(rt_prop, sizeof(*tbl));
+ if (tbl) {
+ efi.runtime_supported_mask &= tbl->runtime_services_supported;
+ early_memunmap(tbl, sizeof(*tbl));
+ }
+ }
+
+ return 0;
+}
+
+int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
+ int min_major_version)
+{
+ if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
+ pr_err("System table signature incorrect!\n");
+ return -EINVAL;
+ }
+
+ if ((systab_hdr->revision >> 16) < min_major_version)
+ pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
+ systab_hdr->revision >> 16,
+ systab_hdr->revision & 0xffff,
+ min_major_version);
+
+ return 0;
+}
+
+#ifndef CONFIG_IA64
+static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
+ size_t size)
+{
+ const efi_char16_t *ret;
+
+ ret = early_memremap_ro(fw_vendor, size);
+ if (!ret)
+ pr_err("Could not map the firmware vendor!\n");
+ return ret;
+}
+
+static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
+{
+ early_memunmap((void *)fw_vendor, size);
+}
+#else
+#define map_fw_vendor(p, s) __va(p)
+#define unmap_fw_vendor(v, s)
+#endif
+
+void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
+ unsigned long fw_vendor)
+{
+ char vendor[100] = "unknown";
+ const efi_char16_t *c16;
+ size_t i;
+
+ c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
+ if (c16) {
+ for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
+ vendor[i] = c16[i];
+ vendor[i] = '\0';
+
+ unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
+ }
+
+ pr_info("EFI v%u.%.02u by %s\n",
+ systab_hdr->revision >> 16,
+ systab_hdr->revision & 0xffff,
+ vendor);
+
+ if (IS_ENABLED(CONFIG_X86_64) &&
+ systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
+ !strcmp(vendor, "Apple")) {
+ pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
+ efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
+ }
+}
+
+static __initdata char memory_type_name[][13] = {
+ "Reserved",
+ "Loader Code",
+ "Loader Data",
+ "Boot Code",
+ "Boot Data",
+ "Runtime Code",
+ "Runtime Data",
+ "Conventional",
+ "Unusable",
+ "ACPI Reclaim",
+ "ACPI Mem NVS",
+ "MMIO",
+ "MMIO Port",
+ "PAL Code",
+ "Persistent",
+};
+
+char * __init efi_md_typeattr_format(char *buf, size_t size,
+ const efi_memory_desc_t *md)
+{
+ char *pos;
+ int type_len;
+ u64 attr;
+
+ pos = buf;
+ if (md->type >= ARRAY_SIZE(memory_type_name))
+ type_len = snprintf(pos, size, "[type=%u", md->type);
+ else
+ type_len = snprintf(pos, size, "[%-*s",
+ (int)(sizeof(memory_type_name[0]) - 1),
+ memory_type_name[md->type]);
+ if (type_len >= size)
+ return buf;
+
+ pos += type_len;
+ size -= type_len;
+
+ attr = md->attribute;
+ if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
+ EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
+ EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
+ EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
+ EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
+ snprintf(pos, size, "|attr=0x%016llx]",
+ (unsigned long long)attr);
+ else
+ snprintf(pos, size,
+ "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+ attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
+ attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
+ attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
+ attr & EFI_MEMORY_SP ? "SP" : "",
+ attr & EFI_MEMORY_NV ? "NV" : "",
+ attr & EFI_MEMORY_XP ? "XP" : "",
+ attr & EFI_MEMORY_RP ? "RP" : "",
+ attr & EFI_MEMORY_WP ? "WP" : "",
+ attr & EFI_MEMORY_RO ? "RO" : "",
+ attr & EFI_MEMORY_UCE ? "UCE" : "",
+ attr & EFI_MEMORY_WB ? "WB" : "",
+ attr & EFI_MEMORY_WT ? "WT" : "",
+ attr & EFI_MEMORY_WC ? "WC" : "",
+ attr & EFI_MEMORY_UC ? "UC" : "");
+ return buf;
+}
+
+/*
+ * IA64 has a funky EFI memory map that doesn't work the same way as
+ * other architectures.
+ */
+#ifndef CONFIG_IA64
+/*
+ * efi_mem_attributes - lookup memmap attributes for physical address
+ * @phys_addr: the physical address to lookup
+ *
+ * Search in the EFI memory map for the region covering
+ * @phys_addr. Returns the EFI memory attributes if the region
+ * was found in the memory map, 0 otherwise.
+ */
+u64 efi_mem_attributes(unsigned long phys_addr)
+{
+ efi_memory_desc_t *md;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return 0;
+
+ for_each_efi_memory_desc(md) {
+ if ((md->phys_addr <= phys_addr) &&
+ (phys_addr < (md->phys_addr +
+ (md->num_pages << EFI_PAGE_SHIFT))))
+ return md->attribute;
+ }
+ return 0;
+}
+
+/*
+ * efi_mem_type - lookup memmap type for physical address
+ * @phys_addr: the physical address to lookup
+ *
+ * Search in the EFI memory map for the region covering @phys_addr.
+ * Returns the EFI memory type if the region was found in the memory
+ * map, -EINVAL otherwise.
+ */
+int efi_mem_type(unsigned long phys_addr)
+{
+ const efi_memory_desc_t *md;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return -ENOTSUPP;
+
+ for_each_efi_memory_desc(md) {
+ if ((md->phys_addr <= phys_addr) &&
+ (phys_addr < (md->phys_addr +
+ (md->num_pages << EFI_PAGE_SHIFT))))
+ return md->type;
+ }
+ return -EINVAL;
+}
+#endif
+
+int efi_status_to_err(efi_status_t status)
+{
+ int err;
+
+ switch (status) {
+ case EFI_SUCCESS:
+ err = 0;
+ break;
+ case EFI_INVALID_PARAMETER:
+ err = -EINVAL;
+ break;
+ case EFI_OUT_OF_RESOURCES:
+ err = -ENOSPC;
+ break;
+ case EFI_DEVICE_ERROR:
+ err = -EIO;
+ break;
+ case EFI_WRITE_PROTECTED:
+ err = -EROFS;
+ break;
+ case EFI_SECURITY_VIOLATION:
+ err = -EACCES;
+ break;
+ case EFI_NOT_FOUND:
+ err = -ENOENT;
+ break;
+ case EFI_ABORTED:
+ err = -EINTR;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
+static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
+
+static int __init efi_memreserve_map_root(void)
+{
+ if (mem_reserve == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+ efi_memreserve_root = memremap(mem_reserve,
+ sizeof(*efi_memreserve_root),
+ MEMREMAP_WB);
+ if (WARN_ON_ONCE(!efi_memreserve_root))
+ return -ENOMEM;
+ return 0;
+}
+
+static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
+{
+ struct resource *res, *parent;
+ int ret;
+
+ res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
+ if (!res)
+ return -ENOMEM;
+
+ res->name = "reserved";
+ res->flags = IORESOURCE_MEM;
+ res->start = addr;
+ res->end = addr + size - 1;
+
+ /* we expect a conflict with a 'System RAM' region */
+ parent = request_resource_conflict(&iomem_resource, res);
+ ret = parent ? request_resource(parent, res) : 0;
+
+ /*
+ * Given that efi_mem_reserve_iomem() can be called at any
+ * time, only call memblock_reserve() if the architecture
+ * keeps the infrastructure around.
+ */
+ if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
+ memblock_reserve(addr, size);
+
+ return ret;
+}
+
+int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
+{
+ struct linux_efi_memreserve *rsv;
+ unsigned long prsv;
+ int rc, index;
+
+ if (efi_memreserve_root == (void *)ULONG_MAX)
+ return -ENODEV;
+
+ if (!efi_memreserve_root) {
+ rc = efi_memreserve_map_root();
+ if (rc)
+ return rc;
+ }
+
+ /* first try to find a slot in an existing linked list entry */
+ for (prsv = efi_memreserve_root->next; prsv; ) {
+ rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
+ if (!rsv)
+ return -ENOMEM;
+ index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
+ if (index < rsv->size) {
+ rsv->entry[index].base = addr;
+ rsv->entry[index].size = size;
+
+ memunmap(rsv);
+ return efi_mem_reserve_iomem(addr, size);
+ }
+ prsv = rsv->next;
+ memunmap(rsv);
+ }
+
+ /* no slot found - allocate a new linked list entry */
+ rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
+ if (!rsv)
+ return -ENOMEM;
+
+ rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
+ if (rc) {
+ free_page((unsigned long)rsv);
+ return rc;
+ }
+
+ /*
+ * The memremap() call above assumes that a linux_efi_memreserve entry
+ * never crosses a page boundary, so let's ensure that this remains true
+ * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
+ * using SZ_4K explicitly in the size calculation below.
+ */
+ rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
+ atomic_set(&rsv->count, 1);
+ rsv->entry[0].base = addr;
+ rsv->entry[0].size = size;
+
+ spin_lock(&efi_mem_reserve_persistent_lock);
+ rsv->next = efi_memreserve_root->next;
+ efi_memreserve_root->next = __pa(rsv);
+ spin_unlock(&efi_mem_reserve_persistent_lock);
+
+ return efi_mem_reserve_iomem(addr, size);
+}
+
+static int __init efi_memreserve_root_init(void)
+{
+ if (efi_memreserve_root)
+ return 0;
+ if (efi_memreserve_map_root())
+ efi_memreserve_root = (void *)ULONG_MAX;
+ return 0;
+}
+early_initcall(efi_memreserve_root_init);
+
+#ifdef CONFIG_KEXEC
+static int update_efi_random_seed(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ struct linux_efi_random_seed *seed;
+ u32 size = 0;
+
+ if (!kexec_in_progress)
+ return NOTIFY_DONE;
+
+ seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
+ if (seed != NULL) {
+ size = min(seed->size, EFI_RANDOM_SEED_SIZE);
+ memunmap(seed);
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ if (size > 0) {
+ seed = memremap(efi_rng_seed, sizeof(*seed) + size,
+ MEMREMAP_WB);
+ if (seed != NULL) {
+ seed->size = size;
+ get_random_bytes(seed->bits, seed->size);
+ memunmap(seed);
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block efi_random_seed_nb = {
+ .notifier_call = update_efi_random_seed,
+};
+
+static int __init register_update_efi_random_seed(void)
+{
+ if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
+ return 0;
+ return register_reboot_notifier(&efi_random_seed_nb);
+}
+late_initcall(register_update_efi_random_seed);
+#endif
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
new file mode 100644
index 000000000..15a47539d
--- /dev/null
+++ b/drivers/firmware/efi/efibc.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * efibc: control EFI bootloaders which obey LoaderEntryOneShot var
+ * Copyright (c) 2013-2016, Intel Corporation.
+ */
+
+#define pr_fmt(fmt) "efibc: " fmt
+
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+
+static void efibc_str_to_str16(const char *str, efi_char16_t *str16)
+{
+ size_t i;
+
+ for (i = 0; i < strlen(str); i++)
+ str16[i] = str[i];
+
+ str16[i] = '\0';
+}
+
+static int efibc_set_variable(const char *name, const char *value)
+{
+ int ret;
+ efi_guid_t guid = LINUX_EFI_LOADER_ENTRY_GUID;
+ struct efivar_entry *entry;
+ size_t size = (strlen(value) + 1) * sizeof(efi_char16_t);
+
+ if (size > sizeof(entry->var.Data)) {
+ pr_err("value is too large (%zu bytes) for '%s' EFI variable\n", size, name);
+ return -EINVAL;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ pr_err("failed to allocate efivar entry for '%s' EFI variable\n", name);
+ return -ENOMEM;
+ }
+
+ efibc_str_to_str16(name, entry->var.VariableName);
+ efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data);
+ memcpy(&entry->var.VendorGuid, &guid, sizeof(guid));
+
+ ret = efivar_entry_set_safe(entry->var.VariableName,
+ entry->var.VendorGuid,
+ EFI_VARIABLE_NON_VOLATILE
+ | EFI_VARIABLE_BOOTSERVICE_ACCESS
+ | EFI_VARIABLE_RUNTIME_ACCESS,
+ false, size, entry->var.Data);
+
+ if (ret)
+ pr_err("failed to set %s EFI variable: 0x%x\n",
+ name, ret);
+
+ kfree(entry);
+ return ret;
+}
+
+static int efibc_reboot_notifier_call(struct notifier_block *notifier,
+ unsigned long event, void *data)
+{
+ const char *reason = "shutdown";
+ int ret;
+
+ if (event == SYS_RESTART)
+ reason = "reboot";
+
+ ret = efibc_set_variable("LoaderEntryRebootReason", reason);
+ if (ret || !data)
+ return NOTIFY_DONE;
+
+ efibc_set_variable("LoaderEntryOneShot", (char *)data);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block efibc_reboot_notifier = {
+ .notifier_call = efibc_reboot_notifier_call,
+};
+
+static int __init efibc_init(void)
+{
+ int ret;
+
+ if (!efivars_kobject() || !efivar_supports_writes())
+ return -ENODEV;
+
+ ret = register_reboot_notifier(&efibc_reboot_notifier);
+ if (ret)
+ pr_err("unable to register reboot notifier\n");
+
+ return ret;
+}
+module_init(efibc_init);
+
+static void __exit efibc_exit(void)
+{
+ unregister_reboot_notifier(&efibc_reboot_notifier);
+}
+module_exit(efibc_exit);
+
+MODULE_AUTHOR("Jeremy Compostella <jeremy.compostella@intel.com>");
+MODULE_AUTHOR("Matt Gumbel <matthew.k.gumbel@intel.com");
+MODULE_DESCRIPTION("EFI Bootloader Control");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
new file mode 100644
index 000000000..e6b16b3a1
--- /dev/null
+++ b/drivers/firmware/efi/efivars.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Originally from efivars.c,
+ *
+ * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
+ * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
+ *
+ * This code takes all variables accessible from EFI runtime and
+ * exports them via sysfs
+ */
+
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/ucs2_string.h>
+#include <linux/compat.h>
+
+#define EFIVARS_VERSION "0.08"
+#define EFIVARS_DATE "2004-May-17"
+
+MODULE_AUTHOR("Matt Domsch <Matt_Domsch@Dell.com>");
+MODULE_DESCRIPTION("sysfs interface to EFI Variables");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(EFIVARS_VERSION);
+
+static LIST_HEAD(efivar_sysfs_list);
+
+static struct kset *efivars_kset;
+
+static struct bin_attribute *efivars_new_var;
+static struct bin_attribute *efivars_del_var;
+
+struct compat_efi_variable {
+ efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
+ efi_guid_t VendorGuid;
+ __u32 DataSize;
+ __u8 Data[1024];
+ __u32 Status;
+ __u32 Attributes;
+} __packed;
+
+struct efivar_attribute {
+ struct attribute attr;
+ ssize_t (*show) (struct efivar_entry *entry, char *buf);
+ ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
+};
+
+#define EFIVAR_ATTR(_name, _mode, _show, _store) \
+struct efivar_attribute efivar_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode}, \
+ .show = _show, \
+ .store = _store, \
+};
+
+#define to_efivar_attr(_attr) container_of(_attr, struct efivar_attribute, attr)
+#define to_efivar_entry(obj) container_of(obj, struct efivar_entry, kobj)
+
+/*
+ * Prototype for sysfs creation function
+ */
+static int
+efivar_create_sysfs_entry(struct efivar_entry *new_var);
+
+static ssize_t
+efivar_guid_read(struct efivar_entry *entry, char *buf)
+{
+ struct efi_variable *var = &entry->var;
+ char *str = buf;
+
+ if (!entry || !buf)
+ return 0;
+
+ efi_guid_to_str(&var->VendorGuid, str);
+ str += strlen(str);
+ str += sprintf(str, "\n");
+
+ return str - buf;
+}
+
+static ssize_t
+efivar_attr_read(struct efivar_entry *entry, char *buf)
+{
+ struct efi_variable *var = &entry->var;
+ unsigned long size = sizeof(var->Data);
+ char *str = buf;
+ int ret;
+
+ if (!entry || !buf)
+ return -EINVAL;
+
+ ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
+ var->DataSize = size;
+ if (ret)
+ return -EIO;
+
+ if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
+ str += sprintf(str, "EFI_VARIABLE_NON_VOLATILE\n");
+ if (var->Attributes & EFI_VARIABLE_BOOTSERVICE_ACCESS)
+ str += sprintf(str, "EFI_VARIABLE_BOOTSERVICE_ACCESS\n");
+ if (var->Attributes & EFI_VARIABLE_RUNTIME_ACCESS)
+ str += sprintf(str, "EFI_VARIABLE_RUNTIME_ACCESS\n");
+ if (var->Attributes & EFI_VARIABLE_HARDWARE_ERROR_RECORD)
+ str += sprintf(str, "EFI_VARIABLE_HARDWARE_ERROR_RECORD\n");
+ if (var->Attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS)
+ str += sprintf(str,
+ "EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS\n");
+ if (var->Attributes &
+ EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS)
+ str += sprintf(str,
+ "EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS\n");
+ if (var->Attributes & EFI_VARIABLE_APPEND_WRITE)
+ str += sprintf(str, "EFI_VARIABLE_APPEND_WRITE\n");
+ return str - buf;
+}
+
+static ssize_t
+efivar_size_read(struct efivar_entry *entry, char *buf)
+{
+ struct efi_variable *var = &entry->var;
+ unsigned long size = sizeof(var->Data);
+ char *str = buf;
+ int ret;
+
+ if (!entry || !buf)
+ return -EINVAL;
+
+ ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
+ var->DataSize = size;
+ if (ret)
+ return -EIO;
+
+ str += sprintf(str, "0x%lx\n", var->DataSize);
+ return str - buf;
+}
+
+static ssize_t
+efivar_data_read(struct efivar_entry *entry, char *buf)
+{
+ struct efi_variable *var = &entry->var;
+ unsigned long size = sizeof(var->Data);
+ int ret;
+
+ if (!entry || !buf)
+ return -EINVAL;
+
+ ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
+ var->DataSize = size;
+ if (ret)
+ return -EIO;
+
+ memcpy(buf, var->Data, var->DataSize);
+ return var->DataSize;
+}
+
+static inline int
+sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
+ unsigned long size, u32 attributes, u8 *data)
+{
+ /*
+ * If only updating the variable data, then the name
+ * and guid should remain the same
+ */
+ if (memcmp(name, var->VariableName, sizeof(var->VariableName)) ||
+ efi_guidcmp(vendor, var->VendorGuid)) {
+ printk(KERN_ERR "efivars: Cannot edit the wrong variable!\n");
+ return -EINVAL;
+ }
+
+ if ((size <= 0) || (attributes == 0)){
+ printk(KERN_ERR "efivars: DataSize & Attributes must be valid!\n");
+ return -EINVAL;
+ }
+
+ if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
+ efivar_validate(vendor, name, data, size) == false) {
+ printk(KERN_ERR "efivars: Malformed variable content\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+copy_out_compat(struct efi_variable *dst, struct compat_efi_variable *src)
+{
+ memcpy(dst->VariableName, src->VariableName, EFI_VAR_NAME_LEN);
+ memcpy(dst->Data, src->Data, sizeof(src->Data));
+
+ dst->VendorGuid = src->VendorGuid;
+ dst->DataSize = src->DataSize;
+ dst->Attributes = src->Attributes;
+}
+
+/*
+ * We allow each variable to be edited via rewriting the
+ * entire efi variable structure.
+ */
+static ssize_t
+efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
+{
+ struct efi_variable *new_var, *var = &entry->var;
+ efi_char16_t *name;
+ unsigned long size;
+ efi_guid_t vendor;
+ u32 attributes;
+ u8 *data;
+ int err;
+
+ if (!entry || !buf)
+ return -EINVAL;
+
+ if (in_compat_syscall()) {
+ struct compat_efi_variable *compat;
+
+ if (count != sizeof(*compat))
+ return -EINVAL;
+
+ compat = (struct compat_efi_variable *)buf;
+ attributes = compat->Attributes;
+ vendor = compat->VendorGuid;
+ name = compat->VariableName;
+ size = compat->DataSize;
+ data = compat->Data;
+
+ err = sanity_check(var, name, vendor, size, attributes, data);
+ if (err)
+ return err;
+
+ copy_out_compat(&entry->var, compat);
+ } else {
+ if (count != sizeof(struct efi_variable))
+ return -EINVAL;
+
+ new_var = (struct efi_variable *)buf;
+
+ attributes = new_var->Attributes;
+ vendor = new_var->VendorGuid;
+ name = new_var->VariableName;
+ size = new_var->DataSize;
+ data = new_var->Data;
+
+ err = sanity_check(var, name, vendor, size, attributes, data);
+ if (err)
+ return err;
+
+ memcpy(&entry->var, new_var, count);
+ }
+
+ err = efivar_entry_set(entry, attributes, size, data, NULL);
+ if (err) {
+ printk(KERN_WARNING "efivars: set_variable() failed: status=%d\n", err);
+ return -EIO;
+ }
+
+ return count;
+}
+
+static ssize_t
+efivar_show_raw(struct efivar_entry *entry, char *buf)
+{
+ struct efi_variable *var = &entry->var;
+ struct compat_efi_variable *compat;
+ unsigned long datasize = sizeof(var->Data);
+ size_t size;
+ int ret;
+
+ if (!entry || !buf)
+ return 0;
+
+ ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data);
+ var->DataSize = datasize;
+ if (ret)
+ return -EIO;
+
+ if (in_compat_syscall()) {
+ compat = (struct compat_efi_variable *)buf;
+
+ size = sizeof(*compat);
+ memcpy(compat->VariableName, var->VariableName,
+ EFI_VAR_NAME_LEN);
+ memcpy(compat->Data, var->Data, sizeof(compat->Data));
+
+ compat->VendorGuid = var->VendorGuid;
+ compat->DataSize = var->DataSize;
+ compat->Attributes = var->Attributes;
+ } else {
+ size = sizeof(*var);
+ memcpy(buf, var, size);
+ }
+
+ return size;
+}
+
+/*
+ * Generic read/write functions that call the specific functions of
+ * the attributes...
+ */
+static ssize_t efivar_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct efivar_entry *var = to_efivar_entry(kobj);
+ struct efivar_attribute *efivar_attr = to_efivar_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (efivar_attr->show) {
+ ret = efivar_attr->show(var, buf);
+ }
+ return ret;
+}
+
+static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct efivar_entry *var = to_efivar_entry(kobj);
+ struct efivar_attribute *efivar_attr = to_efivar_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (efivar_attr->store)
+ ret = efivar_attr->store(var, buf, count);
+
+ return ret;
+}
+
+static const struct sysfs_ops efivar_attr_ops = {
+ .show = efivar_attr_show,
+ .store = efivar_attr_store,
+};
+
+static void efivar_release(struct kobject *kobj)
+{
+ struct efivar_entry *var = to_efivar_entry(kobj);
+ kfree(var);
+}
+
+static EFIVAR_ATTR(guid, 0400, efivar_guid_read, NULL);
+static EFIVAR_ATTR(attributes, 0400, efivar_attr_read, NULL);
+static EFIVAR_ATTR(size, 0400, efivar_size_read, NULL);
+static EFIVAR_ATTR(data, 0400, efivar_data_read, NULL);
+static EFIVAR_ATTR(raw_var, 0600, efivar_show_raw, efivar_store_raw);
+
+static struct attribute *def_attrs[] = {
+ &efivar_attr_guid.attr,
+ &efivar_attr_size.attr,
+ &efivar_attr_attributes.attr,
+ &efivar_attr_data.attr,
+ &efivar_attr_raw_var.attr,
+ NULL,
+};
+
+static struct kobj_type efivar_ktype = {
+ .release = efivar_release,
+ .sysfs_ops = &efivar_attr_ops,
+ .default_attrs = def_attrs,
+};
+
+static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct compat_efi_variable *compat = (struct compat_efi_variable *)buf;
+ struct efi_variable *new_var = (struct efi_variable *)buf;
+ struct efivar_entry *new_entry;
+ bool need_compat = in_compat_syscall();
+ efi_char16_t *name;
+ unsigned long size;
+ u32 attributes;
+ u8 *data;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (need_compat) {
+ if (count != sizeof(*compat))
+ return -EINVAL;
+
+ attributes = compat->Attributes;
+ name = compat->VariableName;
+ size = compat->DataSize;
+ data = compat->Data;
+ } else {
+ if (count != sizeof(*new_var))
+ return -EINVAL;
+
+ attributes = new_var->Attributes;
+ name = new_var->VariableName;
+ size = new_var->DataSize;
+ data = new_var->Data;
+ }
+
+ if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
+ efivar_validate(new_var->VendorGuid, name, data,
+ size) == false) {
+ printk(KERN_ERR "efivars: Malformed variable content\n");
+ return -EINVAL;
+ }
+
+ new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+ if (!new_entry)
+ return -ENOMEM;
+
+ if (need_compat)
+ copy_out_compat(&new_entry->var, compat);
+ else
+ memcpy(&new_entry->var, new_var, sizeof(*new_var));
+
+ err = efivar_entry_set(new_entry, attributes, size,
+ data, &efivar_sysfs_list);
+ if (err) {
+ if (err == -EEXIST)
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (efivar_create_sysfs_entry(new_entry)) {
+ printk(KERN_WARNING "efivars: failed to create sysfs entry.\n");
+ kfree(new_entry);
+ }
+ return count;
+
+out:
+ kfree(new_entry);
+ return err;
+}
+
+static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct efi_variable *del_var = (struct efi_variable *)buf;
+ struct compat_efi_variable *compat;
+ struct efivar_entry *entry;
+ efi_char16_t *name;
+ efi_guid_t vendor;
+ int err = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (in_compat_syscall()) {
+ if (count != sizeof(*compat))
+ return -EINVAL;
+
+ compat = (struct compat_efi_variable *)buf;
+ name = compat->VariableName;
+ vendor = compat->VendorGuid;
+ } else {
+ if (count != sizeof(*del_var))
+ return -EINVAL;
+
+ name = del_var->VariableName;
+ vendor = del_var->VendorGuid;
+ }
+
+ if (efivar_entry_iter_begin())
+ return -EINTR;
+ entry = efivar_entry_find(name, vendor, &efivar_sysfs_list, true);
+ if (!entry)
+ err = -EINVAL;
+ else if (__efivar_entry_delete(entry))
+ err = -EIO;
+
+ if (err) {
+ efivar_entry_iter_end();
+ return err;
+ }
+
+ if (!entry->scanning) {
+ efivar_entry_iter_end();
+ efivar_unregister(entry);
+ } else
+ efivar_entry_iter_end();
+
+ /* It's dead Jim.... */
+ return count;
+}
+
+/**
+ * efivar_create_sysfs_entry - create a new entry in sysfs
+ * @new_var: efivar entry to create
+ *
+ * Returns 0 on success, negative error code on failure
+ */
+static int
+efivar_create_sysfs_entry(struct efivar_entry *new_var)
+{
+ int short_name_size;
+ char *short_name;
+ unsigned long utf8_name_size;
+ efi_char16_t *variable_name = new_var->var.VariableName;
+ int ret;
+
+ /*
+ * Length of the variable bytes in UTF8, plus the '-' separator,
+ * plus the GUID, plus trailing NUL
+ */
+ utf8_name_size = ucs2_utf8size(variable_name);
+ short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
+
+ short_name = kmalloc(short_name_size, GFP_KERNEL);
+ if (!short_name)
+ return -ENOMEM;
+
+ ucs2_as_utf8(short_name, variable_name, short_name_size);
+
+ /* This is ugly, but necessary to separate one vendor's
+ private variables from another's. */
+ short_name[utf8_name_size] = '-';
+ efi_guid_to_str(&new_var->var.VendorGuid,
+ short_name + utf8_name_size + 1);
+
+ new_var->kobj.kset = efivars_kset;
+
+ ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
+ NULL, "%s", short_name);
+ kfree(short_name);
+ if (ret) {
+ kobject_put(&new_var->kobj);
+ return ret;
+ }
+
+ kobject_uevent(&new_var->kobj, KOBJ_ADD);
+ if (efivar_entry_add(new_var, &efivar_sysfs_list)) {
+ efivar_unregister(new_var);
+ return -EINTR;
+ }
+
+ return 0;
+}
+
+static int
+create_efivars_bin_attributes(void)
+{
+ struct bin_attribute *attr;
+ int error;
+
+ /* new_var */
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ attr->attr.name = "new_var";
+ attr->attr.mode = 0200;
+ attr->write = efivar_create;
+ efivars_new_var = attr;
+
+ /* del_var */
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr) {
+ error = -ENOMEM;
+ goto out_free;
+ }
+ attr->attr.name = "del_var";
+ attr->attr.mode = 0200;
+ attr->write = efivar_delete;
+ efivars_del_var = attr;
+
+ sysfs_bin_attr_init(efivars_new_var);
+ sysfs_bin_attr_init(efivars_del_var);
+
+ /* Register */
+ error = sysfs_create_bin_file(&efivars_kset->kobj, efivars_new_var);
+ if (error) {
+ printk(KERN_ERR "efivars: unable to create new_var sysfs file"
+ " due to error %d\n", error);
+ goto out_free;
+ }
+
+ error = sysfs_create_bin_file(&efivars_kset->kobj, efivars_del_var);
+ if (error) {
+ printk(KERN_ERR "efivars: unable to create del_var sysfs file"
+ " due to error %d\n", error);
+ sysfs_remove_bin_file(&efivars_kset->kobj, efivars_new_var);
+ goto out_free;
+ }
+
+ return 0;
+out_free:
+ kfree(efivars_del_var);
+ efivars_del_var = NULL;
+ kfree(efivars_new_var);
+ efivars_new_var = NULL;
+ return error;
+}
+
+static int efivars_sysfs_callback(efi_char16_t *name, efi_guid_t vendor,
+ unsigned long name_size, void *data)
+{
+ struct efivar_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ memcpy(entry->var.VariableName, name, name_size);
+ memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
+
+ efivar_create_sysfs_entry(entry);
+
+ return 0;
+}
+
+static int efivar_sysfs_destroy(struct efivar_entry *entry, void *data)
+{
+ int err = efivar_entry_remove(entry);
+
+ if (err)
+ return err;
+ efivar_unregister(entry);
+ return 0;
+}
+
+static void efivars_sysfs_exit(void)
+{
+ /* Remove all entries and destroy */
+ int err;
+
+ err = __efivar_entry_iter(efivar_sysfs_destroy, &efivar_sysfs_list,
+ NULL, NULL);
+ if (err) {
+ pr_err("efivars: Failed to destroy sysfs entries\n");
+ return;
+ }
+
+ if (efivars_new_var)
+ sysfs_remove_bin_file(&efivars_kset->kobj, efivars_new_var);
+ if (efivars_del_var)
+ sysfs_remove_bin_file(&efivars_kset->kobj, efivars_del_var);
+ kfree(efivars_new_var);
+ kfree(efivars_del_var);
+ kset_unregister(efivars_kset);
+}
+
+static int efivars_sysfs_init(void)
+{
+ struct kobject *parent_kobj = efivars_kobject();
+ int error = 0;
+
+ /* No efivars has been registered yet */
+ if (!parent_kobj || !efivar_supports_writes())
+ return 0;
+
+ printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
+ EFIVARS_DATE);
+
+ efivars_kset = kset_create_and_add("vars", NULL, parent_kobj);
+ if (!efivars_kset) {
+ printk(KERN_ERR "efivars: Subsystem registration failed.\n");
+ return -ENOMEM;
+ }
+
+ efivar_init(efivars_sysfs_callback, NULL, true, &efivar_sysfs_list);
+
+ error = create_efivars_bin_attributes();
+ if (error) {
+ efivars_sysfs_exit();
+ return error;
+ }
+
+ return 0;
+}
+
+module_init(efivars_sysfs_init);
+module_exit(efivars_sysfs_exit);
diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c
new file mode 100644
index 000000000..21ae0c482
--- /dev/null
+++ b/drivers/firmware/efi/embedded-firmware.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for extracting embedded firmware for peripherals from EFI code,
+ *
+ * Copyright (c) 2018 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/efi_embedded_fw.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <crypto/sha.h>
+
+/* Exported for use by lib/test_firmware.c only */
+LIST_HEAD(efi_embedded_fw_list);
+EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_list, TEST_FIRMWARE);
+bool efi_embedded_fw_checked;
+EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_checked, TEST_FIRMWARE);
+
+static const struct dmi_system_id * const embedded_fw_table[] = {
+#ifdef CONFIG_TOUCHSCREEN_DMI
+ touchscreen_dmi_table,
+#endif
+ NULL
+};
+
+/*
+ * Note the efi_check_for_embedded_firmwares() code currently makes the
+ * following 2 assumptions. This may needs to be revisited if embedded firmware
+ * is found where this is not true:
+ * 1) The firmware is only found in EFI_BOOT_SERVICES_CODE memory segments
+ * 2) The firmware always starts at an offset which is a multiple of 8 bytes
+ */
+static int __init efi_check_md_for_embedded_firmware(
+ efi_memory_desc_t *md, const struct efi_embedded_fw_desc *desc)
+{
+ struct efi_embedded_fw *fw;
+ u8 hash[32];
+ u64 i, size;
+ u8 *map;
+
+ size = md->num_pages << EFI_PAGE_SHIFT;
+ map = memremap(md->phys_addr, size, MEMREMAP_WB);
+ if (!map) {
+ pr_err("Error mapping EFI mem at %#llx\n", md->phys_addr);
+ return -ENOMEM;
+ }
+
+ for (i = 0; (i + desc->length) <= size; i += 8) {
+ if (memcmp(map + i, desc->prefix, EFI_EMBEDDED_FW_PREFIX_LEN))
+ continue;
+
+ sha256(map + i, desc->length, hash);
+ if (memcmp(hash, desc->sha256, 32) == 0)
+ break;
+ }
+ if ((i + desc->length) > size) {
+ memunmap(map);
+ return -ENOENT;
+ }
+
+ pr_info("Found EFI embedded fw '%s'\n", desc->name);
+
+ fw = kmalloc(sizeof(*fw), GFP_KERNEL);
+ if (!fw) {
+ memunmap(map);
+ return -ENOMEM;
+ }
+
+ fw->data = kmemdup(map + i, desc->length, GFP_KERNEL);
+ memunmap(map);
+ if (!fw->data) {
+ kfree(fw);
+ return -ENOMEM;
+ }
+
+ fw->name = desc->name;
+ fw->length = desc->length;
+ list_add(&fw->list, &efi_embedded_fw_list);
+
+ return 0;
+}
+
+void __init efi_check_for_embedded_firmwares(void)
+{
+ const struct efi_embedded_fw_desc *fw_desc;
+ const struct dmi_system_id *dmi_id;
+ efi_memory_desc_t *md;
+ int i, r;
+
+ for (i = 0; embedded_fw_table[i]; i++) {
+ dmi_id = dmi_first_match(embedded_fw_table[i]);
+ if (!dmi_id)
+ continue;
+
+ fw_desc = dmi_id->driver_data;
+
+ /*
+ * In some drivers the struct driver_data contains may contain
+ * other driver specific data after the fw_desc struct; and
+ * the fw_desc struct itself may be empty, skip these.
+ */
+ if (!fw_desc->name)
+ continue;
+
+ for_each_efi_memory_desc(md) {
+ if (md->type != EFI_BOOT_SERVICES_CODE)
+ continue;
+
+ r = efi_check_md_for_embedded_firmware(md, fw_desc);
+ if (r == 0)
+ break;
+ }
+ }
+
+ efi_embedded_fw_checked = true;
+}
+
+int efi_get_embedded_fw(const char *name, const u8 **data, size_t *size)
+{
+ struct efi_embedded_fw *iter, *fw = NULL;
+
+ if (!efi_embedded_fw_checked) {
+ pr_warn("Warning %s called while we did not check for embedded fw\n",
+ __func__);
+ return -ENOENT;
+ }
+
+ list_for_each_entry(iter, &efi_embedded_fw_list, list) {
+ if (strcmp(name, iter->name) == 0) {
+ fw = iter;
+ break;
+ }
+ }
+
+ if (!fw)
+ return -ENOENT;
+
+ *data = fw->data;
+ *size = fw->length;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(efi_get_embedded_fw);
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
new file mode 100644
index 000000000..d59152721
--- /dev/null
+++ b/drivers/firmware/efi/esrt.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * esrt.c
+ *
+ * This module exports EFI System Resource Table (ESRT) entries into userspace
+ * through the sysfs file system. The ESRT provides a read-only catalog of
+ * system components for which the system accepts firmware upgrades via UEFI's
+ * "Capsule Update" feature. This module allows userland utilities to evaluate
+ * what firmware updates can be applied to this system, and potentially arrange
+ * for those updates to occur.
+ *
+ * Data is currently found below /sys/firmware/efi/esrt/...
+ */
+#define pr_fmt(fmt) "esrt: " fmt
+
+#include <linux/capability.h>
+#include <linux/device.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#include <asm/early_ioremap.h>
+
+struct efi_system_resource_entry_v1 {
+ efi_guid_t fw_class;
+ u32 fw_type;
+ u32 fw_version;
+ u32 lowest_supported_fw_version;
+ u32 capsule_flags;
+ u32 last_attempt_version;
+ u32 last_attempt_status;
+};
+
+/*
+ * _count and _version are what they seem like. _max is actually just
+ * accounting info for the firmware when creating the table; it should never
+ * have been exposed to us. To wit, the spec says:
+ * The maximum number of resource array entries that can be within the
+ * table without reallocating the table, must not be zero.
+ * Since there's no guidance about what that means in terms of memory layout,
+ * it means nothing to us.
+ */
+struct efi_system_resource_table {
+ u32 fw_resource_count;
+ u32 fw_resource_count_max;
+ u64 fw_resource_version;
+ u8 entries[];
+};
+
+static phys_addr_t esrt_data;
+static size_t esrt_data_size;
+
+static struct efi_system_resource_table *esrt;
+
+struct esre_entry {
+ union {
+ struct efi_system_resource_entry_v1 *esre1;
+ } esre;
+
+ struct kobject kobj;
+ struct list_head list;
+};
+
+/* global list of esre_entry. */
+static LIST_HEAD(entry_list);
+
+/* entry attribute */
+struct esre_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct esre_entry *entry, char *buf);
+ ssize_t (*store)(struct esre_entry *entry,
+ const char *buf, size_t count);
+};
+
+static struct esre_entry *to_entry(struct kobject *kobj)
+{
+ return container_of(kobj, struct esre_entry, kobj);
+}
+
+static struct esre_attribute *to_attr(struct attribute *attr)
+{
+ return container_of(attr, struct esre_attribute, attr);
+}
+
+static ssize_t esre_attr_show(struct kobject *kobj,
+ struct attribute *_attr, char *buf)
+{
+ struct esre_entry *entry = to_entry(kobj);
+ struct esre_attribute *attr = to_attr(_attr);
+
+ /* Don't tell normal users what firmware versions we've got... */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ return attr->show(entry, buf);
+}
+
+static const struct sysfs_ops esre_attr_ops = {
+ .show = esre_attr_show,
+};
+
+/* Generic ESRT Entry ("ESRE") support. */
+static ssize_t fw_class_show(struct esre_entry *entry, char *buf)
+{
+ char *str = buf;
+
+ efi_guid_to_str(&entry->esre.esre1->fw_class, str);
+ str += strlen(str);
+ str += sprintf(str, "\n");
+
+ return str - buf;
+}
+
+static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400);
+
+#define esre_attr_decl(name, size, fmt) \
+static ssize_t name##_show(struct esre_entry *entry, char *buf) \
+{ \
+ return sprintf(buf, fmt "\n", \
+ le##size##_to_cpu(entry->esre.esre1->name)); \
+} \
+\
+static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400)
+
+esre_attr_decl(fw_type, 32, "%u");
+esre_attr_decl(fw_version, 32, "%u");
+esre_attr_decl(lowest_supported_fw_version, 32, "%u");
+esre_attr_decl(capsule_flags, 32, "0x%x");
+esre_attr_decl(last_attempt_version, 32, "%u");
+esre_attr_decl(last_attempt_status, 32, "%u");
+
+static struct attribute *esre1_attrs[] = {
+ &esre_fw_class.attr,
+ &esre_fw_type.attr,
+ &esre_fw_version.attr,
+ &esre_lowest_supported_fw_version.attr,
+ &esre_capsule_flags.attr,
+ &esre_last_attempt_version.attr,
+ &esre_last_attempt_status.attr,
+ NULL
+};
+static void esre_release(struct kobject *kobj)
+{
+ struct esre_entry *entry = to_entry(kobj);
+
+ list_del(&entry->list);
+ kfree(entry);
+}
+
+static struct kobj_type esre1_ktype = {
+ .release = esre_release,
+ .sysfs_ops = &esre_attr_ops,
+ .default_attrs = esre1_attrs,
+};
+
+
+static struct kobject *esrt_kobj;
+static struct kset *esrt_kset;
+
+static int esre_create_sysfs_entry(void *esre, int entry_num)
+{
+ struct esre_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->kobj.kset = esrt_kset;
+
+ if (esrt->fw_resource_version == 1) {
+ int rc = 0;
+
+ entry->esre.esre1 = esre;
+ rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
+ "entry%d", entry_num);
+ if (rc) {
+ kobject_put(&entry->kobj);
+ return rc;
+ }
+ }
+
+ list_add_tail(&entry->list, &entry_list);
+ return 0;
+}
+
+/* support for displaying ESRT fields at the top level */
+#define esrt_attr_decl(name, size, fmt) \
+static ssize_t name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf)\
+{ \
+ return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \
+} \
+\
+static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400)
+
+esrt_attr_decl(fw_resource_count, 32, "%u");
+esrt_attr_decl(fw_resource_count_max, 32, "%u");
+esrt_attr_decl(fw_resource_version, 64, "%llu");
+
+static struct attribute *esrt_attrs[] = {
+ &esrt_fw_resource_count.attr,
+ &esrt_fw_resource_count_max.attr,
+ &esrt_fw_resource_version.attr,
+ NULL,
+};
+
+static inline int esrt_table_exists(void)
+{
+ if (!efi_enabled(EFI_CONFIG_TABLES))
+ return 0;
+ if (efi.esrt == EFI_INVALID_TABLE_ADDR)
+ return 0;
+ return 1;
+}
+
+static umode_t esrt_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (!esrt_table_exists())
+ return 0;
+ return attr->mode;
+}
+
+static const struct attribute_group esrt_attr_group = {
+ .attrs = esrt_attrs,
+ .is_visible = esrt_attr_is_visible,
+};
+
+/*
+ * remap the table, validate it, mark it reserved and unmap it.
+ */
+void __init efi_esrt_init(void)
+{
+ void *va;
+ struct efi_system_resource_table tmpesrt;
+ size_t size, max, entry_size, entries_size;
+ efi_memory_desc_t md;
+ int rc;
+ phys_addr_t end;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
+ pr_debug("esrt-init: loading.\n");
+ if (!esrt_table_exists())
+ return;
+
+ rc = efi_mem_desc_lookup(efi.esrt, &md);
+ if (rc < 0 ||
+ (!(md.attribute & EFI_MEMORY_RUNTIME) &&
+ md.type != EFI_BOOT_SERVICES_DATA &&
+ md.type != EFI_RUNTIME_SERVICES_DATA)) {
+ pr_warn("ESRT header is not in the memory map.\n");
+ return;
+ }
+
+ max = efi_mem_desc_end(&md);
+ if (max < efi.esrt) {
+ pr_err("EFI memory descriptor is invalid. (esrt: %p max: %p)\n",
+ (void *)efi.esrt, (void *)max);
+ return;
+ }
+
+ size = sizeof(*esrt);
+ max -= efi.esrt;
+
+ if (max < size) {
+ pr_err("ESRT header doesn't fit on single memory map entry. (size: %zu max: %zu)\n",
+ size, max);
+ return;
+ }
+
+ va = early_memremap(efi.esrt, size);
+ if (!va) {
+ pr_err("early_memremap(%p, %zu) failed.\n", (void *)efi.esrt,
+ size);
+ return;
+ }
+
+ memcpy(&tmpesrt, va, sizeof(tmpesrt));
+ early_memunmap(va, size);
+
+ if (tmpesrt.fw_resource_version != 1) {
+ pr_err("Unsupported ESRT version %lld.\n",
+ tmpesrt.fw_resource_version);
+ return;
+ }
+
+ entry_size = sizeof(struct efi_system_resource_entry_v1);
+ if (tmpesrt.fw_resource_count > 0 && max - size < entry_size) {
+ pr_err("ESRT memory map entry can only hold the header. (max: %zu size: %zu)\n",
+ max - size, entry_size);
+ return;
+ }
+
+ /*
+ * The format doesn't really give us any boundary to test here,
+ * so I'm making up 128 as the max number of individually updatable
+ * components we support.
+ * 128 should be pretty excessive, but there's still some chance
+ * somebody will do that someday and we'll need to raise this.
+ */
+ if (tmpesrt.fw_resource_count > 128) {
+ pr_err("ESRT says fw_resource_count has very large value %d.\n",
+ tmpesrt.fw_resource_count);
+ return;
+ }
+
+ /*
+ * We know it can't be larger than N * sizeof() here, and N is limited
+ * by the previous test to a small number, so there's no overflow.
+ */
+ entries_size = tmpesrt.fw_resource_count * entry_size;
+ if (max < size + entries_size) {
+ pr_err("ESRT does not fit on single memory map entry (size: %zu max: %zu)\n",
+ size, max);
+ return;
+ }
+
+ size += entries_size;
+
+ esrt_data = (phys_addr_t)efi.esrt;
+ esrt_data_size = size;
+
+ end = esrt_data + size;
+ pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end);
+ if (md.type == EFI_BOOT_SERVICES_DATA)
+ efi_mem_reserve(esrt_data, esrt_data_size);
+
+ pr_debug("esrt-init: loaded.\n");
+}
+
+static int __init register_entries(void)
+{
+ struct efi_system_resource_entry_v1 *v1_entries = (void *)esrt->entries;
+ int i, rc;
+
+ if (!esrt_table_exists())
+ return 0;
+
+ for (i = 0; i < le32_to_cpu(esrt->fw_resource_count); i++) {
+ void *esre = NULL;
+ if (esrt->fw_resource_version == 1) {
+ esre = &v1_entries[i];
+ } else {
+ pr_err("Unsupported ESRT version %lld.\n",
+ esrt->fw_resource_version);
+ return -EINVAL;
+ }
+
+ rc = esre_create_sysfs_entry(esre, i);
+ if (rc < 0) {
+ pr_err("ESRT entry creation failed with error %d.\n",
+ rc);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static void cleanup_entry_list(void)
+{
+ struct esre_entry *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &entry_list, list) {
+ kobject_put(&entry->kobj);
+ }
+}
+
+static int __init esrt_sysfs_init(void)
+{
+ int error;
+
+ pr_debug("esrt-sysfs: loading.\n");
+ if (!esrt_data || !esrt_data_size)
+ return -ENOSYS;
+
+ esrt = memremap(esrt_data, esrt_data_size, MEMREMAP_WB);
+ if (!esrt) {
+ pr_err("memremap(%pa, %zu) failed.\n", &esrt_data,
+ esrt_data_size);
+ return -ENOMEM;
+ }
+
+ esrt_kobj = kobject_create_and_add("esrt", efi_kobj);
+ if (!esrt_kobj) {
+ pr_err("Firmware table registration failed.\n");
+ error = -ENOMEM;
+ goto err;
+ }
+
+ error = sysfs_create_group(esrt_kobj, &esrt_attr_group);
+ if (error) {
+ pr_err("Sysfs attribute export failed with error %d.\n",
+ error);
+ goto err_remove_esrt;
+ }
+
+ esrt_kset = kset_create_and_add("entries", NULL, esrt_kobj);
+ if (!esrt_kset) {
+ pr_err("kset creation failed.\n");
+ error = -ENOMEM;
+ goto err_remove_group;
+ }
+
+ error = register_entries();
+ if (error)
+ goto err_cleanup_list;
+
+ pr_debug("esrt-sysfs: loaded.\n");
+
+ return 0;
+err_cleanup_list:
+ cleanup_entry_list();
+ kset_unregister(esrt_kset);
+err_remove_group:
+ sysfs_remove_group(esrt_kobj, &esrt_attr_group);
+err_remove_esrt:
+ kobject_put(esrt_kobj);
+err:
+ memunmap(esrt);
+ esrt = NULL;
+ return error;
+}
+device_initcall(esrt_sysfs_init);
+
+/*
+MODULE_AUTHOR("Peter Jones <pjones@redhat.com>");
+MODULE_DESCRIPTION("EFI System Resource Table support");
+MODULE_LICENSE("GPL");
+*/
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
new file mode 100644
index 000000000..6e0f34a38
--- /dev/null
+++ b/drivers/firmware/efi/fake_mem.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fake_mem.c
+ *
+ * Copyright (C) 2015 FUJITSU LIMITED
+ * Author: Taku Izumi <izumi.taku@jp.fujitsu.com>
+ *
+ * This code introduces new boot option named "efi_fake_mem"
+ * By specifying this parameter, you can add arbitrary attribute to
+ * specific memory range by updating original (firmware provided) EFI
+ * memmap.
+ */
+
+#include <linux/kernel.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/types.h>
+#include <linux/sort.h>
+#include "fake_mem.h"
+
+struct efi_mem_range efi_fake_mems[EFI_MAX_FAKEMEM];
+int nr_fake_mem;
+
+static int __init cmp_fake_mem(const void *x1, const void *x2)
+{
+ const struct efi_mem_range *m1 = x1;
+ const struct efi_mem_range *m2 = x2;
+
+ if (m1->range.start < m2->range.start)
+ return -1;
+ if (m1->range.start > m2->range.start)
+ return 1;
+ return 0;
+}
+
+static void __init efi_fake_range(struct efi_mem_range *efi_range)
+{
+ struct efi_memory_map_data data = { 0 };
+ int new_nr_map = efi.memmap.nr_map;
+ efi_memory_desc_t *md;
+ void *new_memmap;
+
+ /* count up the number of EFI memory descriptor */
+ for_each_efi_memory_desc(md)
+ new_nr_map += efi_memmap_split_count(md, &efi_range->range);
+
+ /* allocate memory for new EFI memmap */
+ if (efi_memmap_alloc(new_nr_map, &data) != 0)
+ return;
+
+ /* create new EFI memmap */
+ new_memmap = early_memremap(data.phys_map, data.size);
+ if (!new_memmap) {
+ __efi_memmap_free(data.phys_map, data.size, data.flags);
+ return;
+ }
+
+ efi_memmap_insert(&efi.memmap, new_memmap, efi_range);
+
+ /* swap into new EFI memmap */
+ early_memunmap(new_memmap, data.size);
+
+ efi_memmap_install(&data);
+}
+
+void __init efi_fake_memmap(void)
+{
+ int i;
+
+ if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
+ return;
+
+ for (i = 0; i < nr_fake_mem; i++)
+ efi_fake_range(&efi_fake_mems[i]);
+
+ /* print new EFI memmap */
+ efi_print_memmap();
+}
+
+static int __init setup_fake_mem(char *p)
+{
+ u64 start = 0, mem_size = 0, attribute = 0;
+ int i;
+
+ if (!p)
+ return -EINVAL;
+
+ while (*p != '\0') {
+ mem_size = memparse(p, &p);
+ if (*p == '@')
+ start = memparse(p+1, &p);
+ else
+ break;
+
+ if (*p == ':')
+ attribute = simple_strtoull(p+1, &p, 0);
+ else
+ break;
+
+ if (nr_fake_mem >= EFI_MAX_FAKEMEM)
+ break;
+
+ efi_fake_mems[nr_fake_mem].range.start = start;
+ efi_fake_mems[nr_fake_mem].range.end = start + mem_size - 1;
+ efi_fake_mems[nr_fake_mem].attribute = attribute;
+ nr_fake_mem++;
+
+ if (*p == ',')
+ p++;
+ }
+
+ sort(efi_fake_mems, nr_fake_mem, sizeof(struct efi_mem_range),
+ cmp_fake_mem, NULL);
+
+ for (i = 0; i < nr_fake_mem; i++)
+ pr_info("efi_fake_mem: add attr=0x%016llx to [mem 0x%016llx-0x%016llx]",
+ efi_fake_mems[i].attribute, efi_fake_mems[i].range.start,
+ efi_fake_mems[i].range.end);
+
+ return *p == '\0' ? 0 : -EINVAL;
+}
+
+early_param("efi_fake_mem", setup_fake_mem);
diff --git a/drivers/firmware/efi/fake_mem.h b/drivers/firmware/efi/fake_mem.h
new file mode 100644
index 000000000..d52791af4
--- /dev/null
+++ b/drivers/firmware/efi/fake_mem.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __EFI_FAKE_MEM_H__
+#define __EFI_FAKE_MEM_H__
+#include <asm/efi.h>
+
+#define EFI_MAX_FAKEMEM CONFIG_EFI_MAX_FAKE_MEM
+
+extern struct efi_mem_range efi_fake_mems[EFI_MAX_FAKEMEM];
+extern int nr_fake_mem;
+#endif /* __EFI_FAKE_MEM_H__ */
diff --git a/drivers/firmware/efi/fdtparams.c b/drivers/firmware/efi/fdtparams.c
new file mode 100644
index 000000000..e901f8564
--- /dev/null
+++ b/drivers/firmware/efi/fdtparams.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/efi.h>
+#include <linux/libfdt.h>
+#include <linux/of_fdt.h>
+
+#include <asm/unaligned.h>
+
+enum {
+ SYSTAB,
+ MMBASE,
+ MMSIZE,
+ DCSIZE,
+ DCVERS,
+
+ PARAMCOUNT
+};
+
+static __initconst const char name[][22] = {
+ [SYSTAB] = "System Table ",
+ [MMBASE] = "MemMap Address ",
+ [MMSIZE] = "MemMap Size ",
+ [DCSIZE] = "MemMap Desc. Size ",
+ [DCVERS] = "MemMap Desc. Version ",
+};
+
+static __initconst const struct {
+ const char path[17];
+ const char params[PARAMCOUNT][26];
+} dt_params[] = {
+ {
+#ifdef CONFIG_XEN // <-------17------>
+ .path = "/hypervisor/uefi",
+ .params = {
+ [SYSTAB] = "xen,uefi-system-table",
+ [MMBASE] = "xen,uefi-mmap-start",
+ [MMSIZE] = "xen,uefi-mmap-size",
+ [DCSIZE] = "xen,uefi-mmap-desc-size",
+ [DCVERS] = "xen,uefi-mmap-desc-ver",
+ }
+ }, {
+#endif
+ .path = "/chosen",
+ .params = { // <-----------26----------->
+ [SYSTAB] = "linux,uefi-system-table",
+ [MMBASE] = "linux,uefi-mmap-start",
+ [MMSIZE] = "linux,uefi-mmap-size",
+ [DCSIZE] = "linux,uefi-mmap-desc-size",
+ [DCVERS] = "linux,uefi-mmap-desc-ver",
+ }
+ }
+};
+
+static int __init efi_get_fdt_prop(const void *fdt, int node, const char *pname,
+ const char *rname, void *var, int size)
+{
+ const void *prop;
+ int len;
+ u64 val;
+
+ prop = fdt_getprop(fdt, node, pname, &len);
+ if (!prop)
+ return 1;
+
+ val = (len == 4) ? (u64)be32_to_cpup(prop) : get_unaligned_be64(prop);
+
+ if (size == 8)
+ *(u64 *)var = val;
+ else
+ *(u32 *)var = (val < U32_MAX) ? val : U32_MAX; // saturate
+
+ if (efi_enabled(EFI_DBG))
+ pr_info(" %s: 0x%0*llx\n", rname, size * 2, val);
+
+ return 0;
+}
+
+u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm)
+{
+ const void *fdt = initial_boot_params;
+ unsigned long systab;
+ int i, j, node;
+ struct {
+ void *var;
+ int size;
+ } target[] = {
+ [SYSTAB] = { &systab, sizeof(systab) },
+ [MMBASE] = { &mm->phys_map, sizeof(mm->phys_map) },
+ [MMSIZE] = { &mm->size, sizeof(mm->size) },
+ [DCSIZE] = { &mm->desc_size, sizeof(mm->desc_size) },
+ [DCVERS] = { &mm->desc_version, sizeof(mm->desc_version) },
+ };
+
+ BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(name));
+ BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(dt_params[0].params));
+
+ if (!fdt)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
+ node = fdt_path_offset(fdt, dt_params[i].path);
+ if (node < 0)
+ continue;
+
+ if (efi_enabled(EFI_DBG))
+ pr_info("Getting UEFI parameters from %s in DT:\n",
+ dt_params[i].path);
+
+ for (j = 0; j < ARRAY_SIZE(target); j++) {
+ const char *pname = dt_params[i].params[j];
+
+ if (!efi_get_fdt_prop(fdt, node, pname, name[j],
+ target[j].var, target[j].size))
+ continue;
+ if (!j)
+ goto notfound;
+ pr_err("Can't find property '%s' in DT!\n", pname);
+ return 0;
+ }
+ return systab;
+ }
+notfound:
+ pr_info("UEFI not found.\n");
+ return 0;
+}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
new file mode 100644
index 000000000..433e11dab
--- /dev/null
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -0,0 +1,149 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# The stub may be linked into the kernel proper or into a separate boot binary,
+# but in either case, it executes before the kernel does (with MMU disabled) so
+# things like ftrace and stack-protector are likely to cause trouble if left
+# enabled, even if doing so doesn't break the build.
+#
+cflags-$(CONFIG_X86_32) := -march=i386
+cflags-$(CONFIG_X86_64) := -mcmodel=small
+cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
+ -fPIC -fno-strict-aliasing -mno-red-zone \
+ -mno-mmx -mno-sse -fshort-wchar \
+ -Wno-pointer-sign \
+ $(call cc-disable-warning, address-of-packed-member) \
+ $(call cc-disable-warning, gnu) \
+ -fno-asynchronous-unwind-tables \
+ $(CLANG_FLAGS)
+
+# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
+# disable the stackleak plugin
+cflags-$(CONFIG_ARM64) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
+ -fpie $(DISABLE_STACKLEAK_PLUGIN) \
+ $(call cc-option,-mbranch-protection=none)
+cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
+ -fno-builtin -fpic \
+ $(call cc-option,-mno-single-pic-base)
+cflags-$(CONFIG_RISCV) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
+ -fpic
+
+cflags-$(CONFIG_EFI_GENERIC_STUB) += -I$(srctree)/scripts/dtc/libfdt
+
+KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
+ -include $(srctree)/include/linux/hidden.h \
+ -D__NO_FORTIFY \
+ -ffreestanding \
+ -fno-stack-protector \
+ $(call cc-option,-fno-addrsig) \
+ -D__DISABLE_EXPORTS
+
+#
+# struct randomization only makes sense for Linux internal types, which the EFI
+# stub code never touches, so let's turn off struct randomization for the stub
+# altogether
+#
+KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS), $(KBUILD_CFLAGS))
+
+# remove SCS flags from all objects in this directory
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
+
+GCOV_PROFILE := n
+# Sanitizer runtimes are unavailable and cannot be linked here.
+KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
+UBSAN_SANITIZE := n
+OBJECT_FILES_NON_STANDARD := y
+
+# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+KCOV_INSTRUMENT := n
+
+lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
+ file.o mem.o random.o randomalloc.o pci.o \
+ skip_spaces.o lib-cmdline.o lib-ctype.o \
+ alignedmem.o relocate.o vsprintf.o
+
+# include the stub's generic dependencies from lib/ when building for ARM/arm64
+efi-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
+
+$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
+ $(call if_changed_rule,cc_o_c)
+
+lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o fdt.o string.o \
+ $(patsubst %.c,lib-%.o,$(efi-deps-y))
+
+lib-$(CONFIG_ARM) += arm32-stub.o
+lib-$(CONFIG_ARM64) += arm64-stub.o
+lib-$(CONFIG_X86) += x86-stub.o
+lib-$(CONFIG_RISCV) += riscv-stub.o
+CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
+
+# Even when -mbranch-protection=none is set, Clang will generate a
+# .note.gnu.property for code-less object files (like lib/ctype.c),
+# so work around this by explicitly removing the unwanted section.
+# https://bugs.llvm.org/show_bug.cgi?id=46480
+STUBCOPY_FLAGS-y += --remove-section=.note.gnu.property
+
+#
+# For x86, bootloaders like systemd-boot or grub-efi do not zero-initialize the
+# .bss section, so the .bss section of the EFI stub needs to be included in the
+# .data section of the compressed kernel to ensure initialization. Rename the
+# .bss section here so it's easy to pick out in the linker script.
+#
+STUBCOPY_FLAGS-$(CONFIG_X86) += --rename-section .bss=.bss.efistub,load,alloc
+STUBCOPY_RELOC-$(CONFIG_X86_32) := R_386_32
+STUBCOPY_RELOC-$(CONFIG_X86_64) := R_X86_64_64
+
+#
+# ARM discards the .data section because it disallows r/w data in the
+# decompressor. So move our .data to .data.efistub and .bss to .bss.efistub,
+# which are preserved explicitly by the decompressor linker script.
+#
+STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub \
+ --rename-section .bss=.bss.efistub,load,alloc
+STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
+
+#
+# arm64 puts the stub in the kernel proper, which will unnecessarily retain all
+# code indefinitely unless it is annotated as __init/__initdata/__initconst etc.
+# So let's apply the __init annotations at the section level, by prefixing
+# the section names directly. This will ensure that even all the inline string
+# literals are covered.
+# The fact that the stub and the kernel proper are essentially the same binary
+# also means that we need to be extra careful to make sure that the stub does
+# not rely on any absolute symbol references, considering that the virtual
+# kernel mapping that the linker uses is not active yet when the stub is
+# executing. So build all C dependencies of the EFI stub into libstub, and do
+# a verification pass to see if any absolute relocations exist in any of the
+# object files.
+#
+extra-y := $(lib-y)
+lib-y := $(patsubst %.o,%.stub.o,$(lib-y))
+
+STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
+ --prefix-symbols=__efistub_
+STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
+
+# For RISC-V, we don't need anything special other than arm64. Keep all the
+# symbols in .init section and make sure that no absolute symbols references
+# doesn't exist.
+STUBCOPY_FLAGS-$(CONFIG_RISCV) += --prefix-alloc-sections=.init \
+ --prefix-symbols=__efistub_
+STUBCOPY_RELOC-$(CONFIG_RISCV) := R_RISCV_HI20
+
+$(obj)/%.stub.o: $(obj)/%.o FORCE
+ $(call if_changed,stubcopy)
+
+#
+# Strip debug sections and some other sections that may legally contain
+# absolute relocations, so that we can inspect the remaining sections for
+# such relocations. If none are found, regenerate the output object, but
+# this time, use objcopy and leave all sections in place.
+#
+quiet_cmd_stubcopy = STUBCPY $@
+ cmd_stubcopy = \
+ $(STRIP) --strip-debug -o $@ $<; \
+ if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); then \
+ echo "$@: absolute symbol references not allowed in the EFI stub" >&2; \
+ /bin/false; \
+ fi; \
+ $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@
diff --git a/drivers/firmware/efi/libstub/alignedmem.c b/drivers/firmware/efi/libstub/alignedmem.c
new file mode 100644
index 000000000..1de9878dd
--- /dev/null
+++ b/drivers/firmware/efi/libstub/alignedmem.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/**
+ * efi_allocate_pages_aligned() - Allocate memory pages
+ * @size: minimum number of bytes to allocate
+ * @addr: On return the address of the first allocated page. The first
+ * allocated page has alignment EFI_ALLOC_ALIGN which is an
+ * architecture dependent multiple of the page size.
+ * @max: the address that the last allocated memory page shall not
+ * exceed
+ * @align: minimum alignment of the base of the allocation
+ *
+ * Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according
+ * to @align, which should be >= EFI_ALLOC_ALIGN. The last allocated page will
+ * not exceed the address given by @max.
+ *
+ * Return: status code
+ */
+efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
+ unsigned long max, unsigned long align)
+{
+ efi_physical_addr_t alloc_addr;
+ efi_status_t status;
+ int slack;
+
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
+
+ alloc_addr = ALIGN_DOWN(max + 1, align) - 1;
+ size = round_up(size, EFI_ALLOC_ALIGN);
+ slack = align / EFI_PAGE_SIZE - 1;
+
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
+ EFI_LOADER_DATA, size / EFI_PAGE_SIZE + slack,
+ &alloc_addr);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ *addr = ALIGN((unsigned long)alloc_addr, align);
+
+ if (slack > 0) {
+ int l = (alloc_addr & (align - 1)) / EFI_PAGE_SIZE;
+
+ if (l) {
+ efi_bs_call(free_pages, alloc_addr, slack - l + 1);
+ slack = l - 1;
+ }
+ if (slack)
+ efi_bs_call(free_pages, *addr + size, slack);
+ }
+ return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
new file mode 100644
index 000000000..4b5b2403b
--- /dev/null
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 Linaro Ltd; <roy.franz@linaro.org>
+ */
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+static efi_guid_t cpu_state_guid = LINUX_EFI_ARM_CPU_STATE_TABLE_GUID;
+
+struct efi_arm_entry_state *efi_entry_state;
+
+static void get_cpu_state(u32 *cpsr, u32 *sctlr)
+{
+ asm("mrs %0, cpsr" : "=r"(*cpsr));
+ if ((*cpsr & MODE_MASK) == HYP_MODE)
+ asm("mrc p15, 4, %0, c1, c0, 0" : "=r"(*sctlr));
+ else
+ asm("mrc p15, 0, %0, c1, c0, 0" : "=r"(*sctlr));
+}
+
+efi_status_t check_platform_features(void)
+{
+ efi_status_t status;
+ u32 cpsr, sctlr;
+ int block;
+
+ get_cpu_state(&cpsr, &sctlr);
+
+ efi_info("Entering in %s mode with MMU %sabled\n",
+ ((cpsr & MODE_MASK) == HYP_MODE) ? "HYP" : "SVC",
+ (sctlr & 1) ? "en" : "dis");
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ sizeof(*efi_entry_state),
+ (void **)&efi_entry_state);
+ if (status != EFI_SUCCESS) {
+ efi_err("allocate_pool() failed\n");
+ return status;
+ }
+
+ efi_entry_state->cpsr_before_ebs = cpsr;
+ efi_entry_state->sctlr_before_ebs = sctlr;
+
+ status = efi_bs_call(install_configuration_table, &cpu_state_guid,
+ efi_entry_state);
+ if (status != EFI_SUCCESS) {
+ efi_err("install_configuration_table() failed\n");
+ goto free_state;
+ }
+
+ /* non-LPAE kernels can run anywhere */
+ if (!IS_ENABLED(CONFIG_ARM_LPAE))
+ return EFI_SUCCESS;
+
+ /* LPAE kernels need compatible hardware */
+ block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
+ if (block < 5) {
+ efi_err("This LPAE kernel is not supported by your CPU\n");
+ status = EFI_UNSUPPORTED;
+ goto drop_table;
+ }
+ return EFI_SUCCESS;
+
+drop_table:
+ efi_bs_call(install_configuration_table, &cpu_state_guid, NULL);
+free_state:
+ efi_bs_call(free_pool, efi_entry_state);
+ return status;
+}
+
+void efi_handle_post_ebs_state(void)
+{
+ get_cpu_state(&efi_entry_state->cpsr_after_ebs,
+ &efi_entry_state->sctlr_after_ebs);
+}
+
+static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID;
+
+struct screen_info *alloc_screen_info(void)
+{
+ struct screen_info *si;
+ efi_status_t status;
+
+ /*
+ * Unlike on arm64, where we can directly fill out the screen_info
+ * structure from the stub, we need to allocate a buffer to hold
+ * its contents while we hand over to the kernel proper from the
+ * decompressor.
+ */
+ status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+ sizeof(*si), (void **)&si);
+
+ if (status != EFI_SUCCESS)
+ return NULL;
+
+ status = efi_bs_call(install_configuration_table,
+ &screen_info_guid, si);
+ if (status == EFI_SUCCESS)
+ return si;
+
+ efi_bs_call(free_pool, si);
+ return NULL;
+}
+
+void free_screen_info(struct screen_info *si)
+{
+ if (!si)
+ return;
+
+ efi_bs_call(install_configuration_table, &screen_info_guid, NULL);
+ efi_bs_call(free_pool, si);
+}
+
+efi_status_t handle_kernel_image(unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ efi_loaded_image_t *image)
+{
+ const int slack = TEXT_OFFSET - 5 * PAGE_SIZE;
+ int alloc_size = MAX_UNCOMP_KERNEL_SIZE + EFI_PHYS_ALIGN;
+ unsigned long alloc_base, kernel_base;
+ efi_status_t status;
+
+ /*
+ * Allocate space for the decompressed kernel as low as possible.
+ * The region should be 16 MiB aligned, but the first 'slack' bytes
+ * are not used by Linux, so we allow those to be occupied by the
+ * firmware.
+ */
+ status = efi_low_alloc_above(alloc_size, EFI_PAGE_SIZE, &alloc_base, 0x0);
+ if (status != EFI_SUCCESS) {
+ efi_err("Unable to allocate memory for uncompressed kernel.\n");
+ return status;
+ }
+
+ if ((alloc_base % EFI_PHYS_ALIGN) > slack) {
+ /*
+ * More than 'slack' bytes are already occupied at the base of
+ * the allocation, so we need to advance to the next 16 MiB block.
+ */
+ kernel_base = round_up(alloc_base, EFI_PHYS_ALIGN);
+ efi_info("Free memory starts at 0x%lx, setting kernel_base to 0x%lx\n",
+ alloc_base, kernel_base);
+ } else {
+ kernel_base = round_down(alloc_base, EFI_PHYS_ALIGN);
+ }
+
+ *reserve_addr = kernel_base + slack;
+ *reserve_size = MAX_UNCOMP_KERNEL_SIZE;
+
+ /* now free the parts that we will not use */
+ if (*reserve_addr > alloc_base) {
+ efi_bs_call(free_pages, alloc_base,
+ (*reserve_addr - alloc_base) / EFI_PAGE_SIZE);
+ alloc_size -= *reserve_addr - alloc_base;
+ }
+ efi_bs_call(free_pages, *reserve_addr + MAX_UNCOMP_KERNEL_SIZE,
+ (alloc_size - MAX_UNCOMP_KERNEL_SIZE) / EFI_PAGE_SIZE);
+
+ *image_addr = kernel_base + TEXT_OFFSET;
+ *image_size = 0;
+
+ efi_debug("image addr == 0x%lx, reserve_addr == 0x%lx\n",
+ *image_addr, *reserve_addr);
+
+ return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
new file mode 100644
index 000000000..7f4bafcd9
--- /dev/null
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013, 2014 Linaro Ltd; <roy.franz@linaro.org>
+ *
+ * This file implements the EFI boot stub for the arm64 kernel.
+ * Adapted from ARM version by Mark Salter <msalter@redhat.com>
+ */
+
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+#include <asm/memory.h>
+#include <asm/sections.h>
+#include <asm/sysreg.h>
+
+#include "efistub.h"
+
+efi_status_t check_platform_features(void)
+{
+ u64 tg;
+
+ /* UEFI mandates support for 4 KB granularity, no need to check */
+ if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
+ return EFI_SUCCESS;
+
+ tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
+ if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) {
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
+ efi_err("This 64 KB granular kernel is not supported by your CPU\n");
+ else
+ efi_err("This 16 KB granular kernel is not supported by your CPU\n");
+ return EFI_UNSUPPORTED;
+ }
+ return EFI_SUCCESS;
+}
+
+/*
+ * Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail
+ * to provide space, and fail to zero it). Check for this condition by double
+ * checking that the first and the last byte of the image are covered by the
+ * same EFI memory map entry.
+ */
+static bool check_image_region(u64 base, u64 size)
+{
+ unsigned long map_size, desc_size, buff_size;
+ efi_memory_desc_t *memory_map;
+ struct efi_boot_memmap map;
+ efi_status_t status;
+ bool ret = false;
+ int map_offset;
+
+ map.map = &memory_map;
+ map.map_size = &map_size;
+ map.desc_size = &desc_size;
+ map.desc_ver = NULL;
+ map.key_ptr = NULL;
+ map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(&map);
+ if (status != EFI_SUCCESS)
+ return false;
+
+ for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+ efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
+
+ /*
+ * Find the region that covers base, and return whether
+ * it covers base+size bytes.
+ */
+ if (base >= md->phys_addr && base < end) {
+ ret = (base + size) <= end;
+ break;
+ }
+ }
+
+ efi_bs_call(free_pool, memory_map);
+
+ return ret;
+}
+
+efi_status_t handle_kernel_image(unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ efi_loaded_image_t *image)
+{
+ efi_status_t status;
+ unsigned long kernel_size, kernel_memsize = 0;
+ u32 phys_seed = 0;
+
+ /*
+ * Although relocatable kernels can fix up the misalignment with
+ * respect to MIN_KIMG_ALIGN, the resulting virtual text addresses are
+ * subtly out of sync with those recorded in the vmlinux when kaslr is
+ * disabled but the image required relocation anyway. Therefore retain
+ * 2M alignment if KASLR was explicitly disabled, even if it was not
+ * going to be activated to begin with.
+ */
+ u64 min_kimg_align = efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ if (!efi_nokaslr) {
+ status = efi_get_random_bytes(sizeof(phys_seed),
+ (u8 *)&phys_seed);
+ if (status == EFI_NOT_FOUND) {
+ efi_info("EFI_RNG_PROTOCOL unavailable, KASLR will be disabled\n");
+ efi_nokaslr = true;
+ } else if (status != EFI_SUCCESS) {
+ efi_err("efi_get_random_bytes() failed (0x%lx), KASLR will be disabled\n",
+ status);
+ efi_nokaslr = true;
+ }
+ } else {
+ efi_info("KASLR disabled on kernel command line\n");
+ }
+ }
+
+ if (image->image_base != _text)
+ efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
+
+ if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN))
+ efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n",
+ SEGMENT_ALIGN >> 10);
+
+ kernel_size = _edata - _text;
+ kernel_memsize = kernel_size + (_end - _edata);
+ *reserve_size = kernel_memsize;
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
+ /*
+ * If KASLR is enabled, and we have some randomness available,
+ * locate the kernel at a randomized offset in physical memory.
+ */
+ status = efi_random_alloc(*reserve_size, min_kimg_align,
+ reserve_addr, phys_seed);
+ } else {
+ status = EFI_OUT_OF_RESOURCES;
+ }
+
+ if (status != EFI_SUCCESS) {
+ if (!check_image_region((u64)_text, kernel_memsize)) {
+ efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
+ } else if (IS_ALIGNED((u64)_text, min_kimg_align)) {
+ /*
+ * Just execute from wherever we were loaded by the
+ * UEFI PE/COFF loader if the alignment is suitable.
+ */
+ *image_addr = (u64)_text;
+ *reserve_size = 0;
+ return EFI_SUCCESS;
+ }
+
+ status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
+ ULONG_MAX, min_kimg_align);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to relocate kernel\n");
+ *reserve_size = 0;
+ return status;
+ }
+ }
+
+ *image_addr = *reserve_addr;
+ memcpy((void *)*image_addr, _text, kernel_size);
+
+ return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
new file mode 100644
index 000000000..aa8da0a49
--- /dev/null
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -0,0 +1,704 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helper functions used by the EFI stub on multiple
+ * architectures. This should be #included by the EFI stub
+ * implementation files.
+ *
+ * Copyright 2011 Intel Corporation; author Matt Fleming
+ */
+
+#include <stdarg.h>
+
+#include <linux/ctype.h>
+#include <linux/efi.h>
+#include <linux/kernel.h>
+#include <linux/printk.h> /* For CONSOLE_LOGLEVEL_* */
+#include <asm/efi.h>
+#include <asm/setup.h>
+
+#include "efistub.h"
+
+bool efi_nochunk;
+bool efi_nokaslr = !IS_ENABLED(CONFIG_RANDOMIZE_BASE);
+bool efi_noinitrd;
+int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
+bool efi_novamap;
+
+static bool efi_nosoftreserve;
+static bool efi_disable_pci_dma = IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
+
+bool __pure __efi_soft_reserve_enabled(void)
+{
+ return !efi_nosoftreserve;
+}
+
+/**
+ * efi_char16_puts() - Write a UCS-2 encoded string to the console
+ * @str: UCS-2 encoded string
+ */
+void efi_char16_puts(efi_char16_t *str)
+{
+ efi_call_proto(efi_table_attr(efi_system_table, con_out),
+ output_string, str);
+}
+
+static
+u32 utf8_to_utf32(const u8 **s8)
+{
+ u32 c32;
+ u8 c0, cx;
+ size_t clen, i;
+
+ c0 = cx = *(*s8)++;
+ /*
+ * The position of the most-significant 0 bit gives us the length of
+ * a multi-octet encoding.
+ */
+ for (clen = 0; cx & 0x80; ++clen)
+ cx <<= 1;
+ /*
+ * If the 0 bit is in position 8, this is a valid single-octet
+ * encoding. If the 0 bit is in position 7 or positions 1-3, the
+ * encoding is invalid.
+ * In either case, we just return the first octet.
+ */
+ if (clen < 2 || clen > 4)
+ return c0;
+ /* Get the bits from the first octet. */
+ c32 = cx >> clen--;
+ for (i = 0; i < clen; ++i) {
+ /* Trailing octets must have 10 in most significant bits. */
+ cx = (*s8)[i] ^ 0x80;
+ if (cx & 0xc0)
+ return c0;
+ c32 = (c32 << 6) | cx;
+ }
+ /*
+ * Check for validity:
+ * - The character must be in the Unicode range.
+ * - It must not be a surrogate.
+ * - It must be encoded using the correct number of octets.
+ */
+ if (c32 > 0x10ffff ||
+ (c32 & 0xf800) == 0xd800 ||
+ clen != (c32 >= 0x80) + (c32 >= 0x800) + (c32 >= 0x10000))
+ return c0;
+ *s8 += clen;
+ return c32;
+}
+
+/**
+ * efi_puts() - Write a UTF-8 encoded string to the console
+ * @str: UTF-8 encoded string
+ */
+void efi_puts(const char *str)
+{
+ efi_char16_t buf[128];
+ size_t pos = 0, lim = ARRAY_SIZE(buf);
+ const u8 *s8 = (const u8 *)str;
+ u32 c32;
+
+ while (*s8) {
+ if (*s8 == '\n')
+ buf[pos++] = L'\r';
+ c32 = utf8_to_utf32(&s8);
+ if (c32 < 0x10000) {
+ /* Characters in plane 0 use a single word. */
+ buf[pos++] = c32;
+ } else {
+ /*
+ * Characters in other planes encode into a surrogate
+ * pair.
+ */
+ buf[pos++] = (0xd800 - (0x10000 >> 10)) + (c32 >> 10);
+ buf[pos++] = 0xdc00 + (c32 & 0x3ff);
+ }
+ if (*s8 == '\0' || pos >= lim - 2) {
+ buf[pos] = L'\0';
+ efi_char16_puts(buf);
+ pos = 0;
+ }
+ }
+}
+
+/**
+ * efi_printk() - Print a kernel message
+ * @fmt: format string
+ *
+ * The first letter of the format string is used to determine the logging level
+ * of the message. If the level is less then the current EFI logging level, the
+ * message is suppressed. The message will be truncated to 255 bytes.
+ *
+ * Return: number of printed characters
+ */
+int efi_printk(const char *fmt, ...)
+{
+ char printf_buf[256];
+ va_list args;
+ int printed;
+ int loglevel = printk_get_level(fmt);
+
+ switch (loglevel) {
+ case '0' ... '9':
+ loglevel -= '0';
+ break;
+ default:
+ /*
+ * Use loglevel -1 for cases where we just want to print to
+ * the screen.
+ */
+ loglevel = -1;
+ break;
+ }
+
+ if (loglevel >= efi_loglevel)
+ return 0;
+
+ if (loglevel >= 0)
+ efi_puts("EFI stub: ");
+
+ fmt = printk_skip_level(fmt);
+
+ va_start(args, fmt);
+ printed = vsnprintf(printf_buf, sizeof(printf_buf), fmt, args);
+ va_end(args);
+
+ efi_puts(printf_buf);
+ if (printed >= sizeof(printf_buf)) {
+ efi_puts("[Message truncated]\n");
+ return -1;
+ }
+
+ return printed;
+}
+
+/**
+ * efi_parse_options() - Parse EFI command line options
+ * @cmdline: kernel command line
+ *
+ * Parse the ASCII string @cmdline for EFI options, denoted by the efi=
+ * option, e.g. efi=nochunk.
+ *
+ * It should be noted that efi= is parsed in two very different
+ * environments, first in the early boot environment of the EFI boot
+ * stub, and subsequently during the kernel boot.
+ *
+ * Return: status code
+ */
+efi_status_t efi_parse_options(char const *cmdline)
+{
+ size_t len;
+ efi_status_t status;
+ char *str, *buf;
+
+ if (!cmdline)
+ return EFI_SUCCESS;
+
+ len = strnlen(cmdline, COMMAND_LINE_SIZE - 1) + 1;
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, len, (void **)&buf);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ memcpy(buf, cmdline, len - 1);
+ buf[len - 1] = '\0';
+ str = skip_spaces(buf);
+
+ while (*str) {
+ char *param, *val;
+
+ str = next_arg(str, &param, &val);
+ if (!val && !strcmp(param, "--"))
+ break;
+
+ if (!strcmp(param, "nokaslr")) {
+ efi_nokaslr = true;
+ } else if (!strcmp(param, "quiet")) {
+ efi_loglevel = CONSOLE_LOGLEVEL_QUIET;
+ } else if (!strcmp(param, "noinitrd")) {
+ efi_noinitrd = true;
+ } else if (!strcmp(param, "efi") && val) {
+ efi_nochunk = parse_option_str(val, "nochunk");
+ efi_novamap = parse_option_str(val, "novamap");
+
+ efi_nosoftreserve = IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) &&
+ parse_option_str(val, "nosoftreserve");
+
+ if (parse_option_str(val, "disable_early_pci_dma"))
+ efi_disable_pci_dma = true;
+ if (parse_option_str(val, "no_disable_early_pci_dma"))
+ efi_disable_pci_dma = false;
+ if (parse_option_str(val, "debug"))
+ efi_loglevel = CONSOLE_LOGLEVEL_DEBUG;
+ } else if (!strcmp(param, "video") &&
+ val && strstarts(val, "efifb:")) {
+ efi_parse_option_graphics(val + strlen("efifb:"));
+ }
+ }
+ efi_bs_call(free_pool, buf);
+ return EFI_SUCCESS;
+}
+
+/*
+ * The EFI_LOAD_OPTION descriptor has the following layout:
+ * u32 Attributes;
+ * u16 FilePathListLength;
+ * u16 Description[];
+ * efi_device_path_protocol_t FilePathList[];
+ * u8 OptionalData[];
+ *
+ * This function validates and unpacks the variable-size data fields.
+ */
+static
+bool efi_load_option_unpack(efi_load_option_unpacked_t *dest,
+ const efi_load_option_t *src, size_t size)
+{
+ const void *pos;
+ u16 c;
+ efi_device_path_protocol_t header;
+ const efi_char16_t *description;
+ const efi_device_path_protocol_t *file_path_list;
+
+ if (size < offsetof(efi_load_option_t, variable_data))
+ return false;
+ pos = src->variable_data;
+ size -= offsetof(efi_load_option_t, variable_data);
+
+ if ((src->attributes & ~EFI_LOAD_OPTION_MASK) != 0)
+ return false;
+
+ /* Scan description. */
+ description = pos;
+ do {
+ if (size < sizeof(c))
+ return false;
+ c = *(const u16 *)pos;
+ pos += sizeof(c);
+ size -= sizeof(c);
+ } while (c != L'\0');
+
+ /* Scan file_path_list. */
+ file_path_list = pos;
+ do {
+ if (size < sizeof(header))
+ return false;
+ header = *(const efi_device_path_protocol_t *)pos;
+ if (header.length < sizeof(header))
+ return false;
+ if (size < header.length)
+ return false;
+ pos += header.length;
+ size -= header.length;
+ } while ((header.type != EFI_DEV_END_PATH && header.type != EFI_DEV_END_PATH2) ||
+ (header.sub_type != EFI_DEV_END_ENTIRE));
+ if (pos != (const void *)file_path_list + src->file_path_list_length)
+ return false;
+
+ dest->attributes = src->attributes;
+ dest->file_path_list_length = src->file_path_list_length;
+ dest->description = description;
+ dest->file_path_list = file_path_list;
+ dest->optional_data_size = size;
+ dest->optional_data = size ? pos : NULL;
+
+ return true;
+}
+
+/*
+ * At least some versions of Dell firmware pass the entire contents of the
+ * Boot#### variable, i.e. the EFI_LOAD_OPTION descriptor, rather than just the
+ * OptionalData field.
+ *
+ * Detect this case and extract OptionalData.
+ */
+void efi_apply_loadoptions_quirk(const void **load_options, int *load_options_size)
+{
+ const efi_load_option_t *load_option = *load_options;
+ efi_load_option_unpacked_t load_option_unpacked;
+
+ if (!IS_ENABLED(CONFIG_X86))
+ return;
+ if (!load_option)
+ return;
+ if (*load_options_size < sizeof(*load_option))
+ return;
+ if ((load_option->attributes & ~EFI_LOAD_OPTION_BOOT_MASK) != 0)
+ return;
+
+ if (!efi_load_option_unpack(&load_option_unpacked, load_option, *load_options_size))
+ return;
+
+ efi_warn_once(FW_BUG "LoadOptions is an EFI_LOAD_OPTION descriptor\n");
+ efi_warn_once(FW_BUG "Using OptionalData as a workaround\n");
+
+ *load_options = load_option_unpacked.optional_data;
+ *load_options_size = load_option_unpacked.optional_data_size;
+}
+
+/*
+ * Convert the unicode UEFI command line to ASCII to pass to kernel.
+ * Size of memory allocated return in *cmd_line_len.
+ * Returns NULL on error.
+ */
+char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
+{
+ const u16 *s2;
+ unsigned long cmdline_addr = 0;
+ int options_chars = efi_table_attr(image, load_options_size);
+ const u16 *options = efi_table_attr(image, load_options);
+ int options_bytes = 0, safe_options_bytes = 0; /* UTF-8 bytes */
+ bool in_quote = false;
+ efi_status_t status;
+
+ efi_apply_loadoptions_quirk((const void **)&options, &options_chars);
+ options_chars /= sizeof(*options);
+
+ if (options) {
+ s2 = options;
+ while (options_bytes < COMMAND_LINE_SIZE && options_chars--) {
+ u16 c = *s2++;
+
+ if (c < 0x80) {
+ if (c == L'\0' || c == L'\n')
+ break;
+ if (c == L'"')
+ in_quote = !in_quote;
+ else if (!in_quote && isspace((char)c))
+ safe_options_bytes = options_bytes;
+
+ options_bytes++;
+ continue;
+ }
+
+ /*
+ * Get the number of UTF-8 bytes corresponding to a
+ * UTF-16 character.
+ * The first part handles everything in the BMP.
+ */
+ options_bytes += 2 + (c >= 0x800);
+ /*
+ * Add one more byte for valid surrogate pairs. Invalid
+ * surrogates will be replaced with 0xfffd and take up
+ * only 3 bytes.
+ */
+ if ((c & 0xfc00) == 0xd800) {
+ /*
+ * If the very last word is a high surrogate,
+ * we must ignore it since we can't access the
+ * low surrogate.
+ */
+ if (!options_chars) {
+ options_bytes -= 3;
+ } else if ((*s2 & 0xfc00) == 0xdc00) {
+ options_bytes++;
+ options_chars--;
+ s2++;
+ }
+ }
+ }
+ if (options_bytes >= COMMAND_LINE_SIZE) {
+ options_bytes = safe_options_bytes;
+ efi_err("Command line is too long: truncated to %d bytes\n",
+ options_bytes);
+ }
+ }
+
+ options_bytes++; /* NUL termination */
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, options_bytes,
+ (void **)&cmdline_addr);
+ if (status != EFI_SUCCESS)
+ return NULL;
+
+ snprintf((char *)cmdline_addr, options_bytes, "%.*ls",
+ options_bytes - 1, options);
+
+ *cmd_line_len = options_bytes;
+ return (char *)cmdline_addr;
+}
+
+/**
+ * efi_exit_boot_services() - Exit boot services
+ * @handle: handle of the exiting image
+ * @map: pointer to receive the memory map
+ * @priv: argument to be passed to @priv_func
+ * @priv_func: function to process the memory map before exiting boot services
+ *
+ * Handle calling ExitBootServices according to the requirements set out by the
+ * spec. Obtains the current memory map, and returns that info after calling
+ * ExitBootServices. The client must specify a function to perform any
+ * processing of the memory map data prior to ExitBootServices. A client
+ * specific structure may be passed to the function via priv. The client
+ * function may be called multiple times.
+ *
+ * Return: status code
+ */
+efi_status_t efi_exit_boot_services(void *handle,
+ struct efi_boot_memmap *map,
+ void *priv,
+ efi_exit_boot_map_processing priv_func)
+{
+ efi_status_t status;
+
+ status = efi_get_memory_map(map);
+
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ status = priv_func(map, priv);
+ if (status != EFI_SUCCESS)
+ goto free_map;
+
+ if (efi_disable_pci_dma)
+ efi_pci_disable_bridge_busmaster();
+
+ status = efi_bs_call(exit_boot_services, handle, *map->key_ptr);
+
+ if (status == EFI_INVALID_PARAMETER) {
+ /*
+ * The memory map changed between efi_get_memory_map() and
+ * exit_boot_services(). Per the UEFI Spec v2.6, Section 6.4:
+ * EFI_BOOT_SERVICES.ExitBootServices we need to get the
+ * updated map, and try again. The spec implies one retry
+ * should be sufficent, which is confirmed against the EDK2
+ * implementation. Per the spec, we can only invoke
+ * get_memory_map() and exit_boot_services() - we cannot alloc
+ * so efi_get_memory_map() cannot be used, and we must reuse
+ * the buffer. For all practical purposes, the headroom in the
+ * buffer should account for any changes in the map so the call
+ * to get_memory_map() is expected to succeed here.
+ */
+ *map->map_size = *map->buff_size;
+ status = efi_bs_call(get_memory_map,
+ map->map_size,
+ *map->map,
+ map->key_ptr,
+ map->desc_size,
+ map->desc_ver);
+
+ /* exit_boot_services() was called, thus cannot free */
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ status = priv_func(map, priv);
+ /* exit_boot_services() was called, thus cannot free */
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ status = efi_bs_call(exit_boot_services, handle, *map->key_ptr);
+ }
+
+ /* exit_boot_services() was called, thus cannot free */
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ return EFI_SUCCESS;
+
+free_map:
+ efi_bs_call(free_pool, *map->map);
+fail:
+ return status;
+}
+
+/**
+ * get_efi_config_table() - retrieve UEFI configuration table
+ * @guid: GUID of the configuration table to be retrieved
+ * Return: pointer to the configuration table or NULL
+ */
+void *get_efi_config_table(efi_guid_t guid)
+{
+ unsigned long tables = efi_table_attr(efi_system_table, tables);
+ int nr_tables = efi_table_attr(efi_system_table, nr_tables);
+ int i;
+
+ for (i = 0; i < nr_tables; i++) {
+ efi_config_table_t *t = (void *)tables;
+
+ if (efi_guidcmp(t->guid, guid) == 0)
+ return efi_table_attr(t, table);
+
+ tables += efi_is_native() ? sizeof(efi_config_table_t)
+ : sizeof(efi_config_table_32_t);
+ }
+ return NULL;
+}
+
+/*
+ * The LINUX_EFI_INITRD_MEDIA_GUID vendor media device path below provides a way
+ * for the firmware or bootloader to expose the initrd data directly to the stub
+ * via the trivial LoadFile2 protocol, which is defined in the UEFI spec, and is
+ * very easy to implement. It is a simple Linux initrd specific conduit between
+ * kernel and firmware, allowing us to put the EFI stub (being part of the
+ * kernel) in charge of where and when to load the initrd, while leaving it up
+ * to the firmware to decide whether it needs to expose its filesystem hierarchy
+ * via EFI protocols.
+ */
+static const struct {
+ struct efi_vendor_dev_path vendor;
+ struct efi_generic_dev_path end;
+} __packed initrd_dev_path = {
+ {
+ {
+ EFI_DEV_MEDIA,
+ EFI_DEV_MEDIA_VENDOR,
+ sizeof(struct efi_vendor_dev_path),
+ },
+ LINUX_EFI_INITRD_MEDIA_GUID
+ }, {
+ EFI_DEV_END_PATH,
+ EFI_DEV_END_ENTIRE,
+ sizeof(struct efi_generic_dev_path)
+ }
+};
+
+/**
+ * efi_load_initrd_dev_path() - load the initrd from the Linux initrd device path
+ * @load_addr: pointer to store the address where the initrd was loaded
+ * @load_size: pointer to store the size of the loaded initrd
+ * @max: upper limit for the initrd memory allocation
+ *
+ * Return:
+ * * %EFI_SUCCESS if the initrd was loaded successfully, in which
+ * case @load_addr and @load_size are assigned accordingly
+ * * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd device path
+ * * %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL
+ * * %EFI_OUT_OF_RESOURCES if memory allocation failed
+ * * %EFI_LOAD_ERROR in all other cases
+ */
+static
+efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
+ unsigned long *load_size,
+ unsigned long max)
+{
+ efi_guid_t lf2_proto_guid = EFI_LOAD_FILE2_PROTOCOL_GUID;
+ efi_device_path_protocol_t *dp;
+ efi_load_file2_protocol_t *lf2;
+ unsigned long initrd_addr;
+ unsigned long initrd_size;
+ efi_handle_t handle;
+ efi_status_t status;
+
+ dp = (efi_device_path_protocol_t *)&initrd_dev_path;
+ status = efi_bs_call(locate_device_path, &lf2_proto_guid, &dp, &handle);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_bs_call(handle_protocol, handle, &lf2_proto_guid,
+ (void **)&lf2);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_call_proto(lf2, load_file, dp, false, &initrd_size, NULL);
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return EFI_LOAD_ERROR;
+
+ status = efi_allocate_pages(initrd_size, &initrd_addr, max);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_call_proto(lf2, load_file, dp, false, &initrd_size,
+ (void *)initrd_addr);
+ if (status != EFI_SUCCESS) {
+ efi_free(initrd_size, initrd_addr);
+ return EFI_LOAD_ERROR;
+ }
+
+ *load_addr = initrd_addr;
+ *load_size = initrd_size;
+ return EFI_SUCCESS;
+}
+
+static
+efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
+ unsigned long *load_addr,
+ unsigned long *load_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit)
+{
+ if (!IS_ENABLED(CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER) ||
+ (IS_ENABLED(CONFIG_X86) && (!efi_is_native() || image == NULL))) {
+ *load_addr = *load_size = 0;
+ return EFI_SUCCESS;
+ }
+
+ return handle_cmdline_files(image, L"initrd=", sizeof(L"initrd=") - 2,
+ soft_limit, hard_limit,
+ load_addr, load_size);
+}
+
+/**
+ * efi_load_initrd() - Load initial RAM disk
+ * @image: EFI loaded image protocol
+ * @load_addr: pointer to loaded initrd
+ * @load_size: size of loaded initrd
+ * @soft_limit: preferred size of allocated memory for loading the initrd
+ * @hard_limit: minimum size of allocated memory
+ *
+ * Return: status code
+ */
+efi_status_t efi_load_initrd(efi_loaded_image_t *image,
+ unsigned long *load_addr,
+ unsigned long *load_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit)
+{
+ efi_status_t status;
+
+ if (!load_addr || !load_size)
+ return EFI_INVALID_PARAMETER;
+
+ status = efi_load_initrd_dev_path(load_addr, load_size, hard_limit);
+ if (status == EFI_SUCCESS) {
+ efi_info("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n");
+ } else if (status == EFI_NOT_FOUND) {
+ status = efi_load_initrd_cmdline(image, load_addr, load_size,
+ soft_limit, hard_limit);
+ if (status == EFI_SUCCESS && *load_size > 0)
+ efi_info("Loaded initrd from command line option\n");
+ }
+
+ return status;
+}
+
+/**
+ * efi_wait_for_key() - Wait for key stroke
+ * @usec: number of microseconds to wait for key stroke
+ * @key: key entered
+ *
+ * Wait for up to @usec microseconds for a key stroke.
+ *
+ * Return: status code, EFI_SUCCESS if key received
+ */
+efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key)
+{
+ efi_event_t events[2], timer;
+ unsigned long index;
+ efi_simple_text_input_protocol_t *con_in;
+ efi_status_t status;
+
+ con_in = efi_table_attr(efi_system_table, con_in);
+ if (!con_in)
+ return EFI_UNSUPPORTED;
+ efi_set_event_at(events, 0, efi_table_attr(con_in, wait_for_key));
+
+ status = efi_bs_call(create_event, EFI_EVT_TIMER, 0, NULL, NULL, &timer);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_bs_call(set_timer, timer, EfiTimerRelative,
+ EFI_100NSEC_PER_USEC * usec);
+ if (status != EFI_SUCCESS)
+ return status;
+ efi_set_event_at(events, 1, timer);
+
+ status = efi_bs_call(wait_for_event, 2, events, &index);
+ if (status == EFI_SUCCESS) {
+ if (index == 0)
+ status = efi_call_proto(con_in, read_keystroke, key);
+ else
+ status = EFI_TIMEOUT;
+ }
+
+ efi_bs_call(close_event, timer);
+
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
new file mode 100644
index 000000000..0ab439c53
--- /dev/null
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * EFI stub implementation that is shared by arm and arm64 architectures.
+ * This should be #included by the EFI stub implementation files.
+ *
+ * Copyright (C) 2013,2014 Linaro Limited
+ * Roy Franz <roy.franz@linaro.org
+ * Copyright (C) 2013 Red Hat, Inc.
+ * Mark Salter <msalter@redhat.com>
+ */
+
+#include <linux/efi.h>
+#include <linux/libfdt.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/*
+ * This is the base address at which to start allocating virtual memory ranges
+ * for UEFI Runtime Services.
+ *
+ * For ARM/ARM64:
+ * This is in the low TTBR0 range so that we can use
+ * any allocation we choose, and eliminate the risk of a conflict after kexec.
+ * The value chosen is the largest non-zero power of 2 suitable for this purpose
+ * both on 32-bit and 64-bit ARM CPUs, to maximize the likelihood that it can
+ * be mapped efficiently.
+ * Since 32-bit ARM could potentially execute with a 1G/3G user/kernel split,
+ * map everything below 1 GB. (512 MB is a reasonable upper bound for the
+ * entire footprint of the UEFI runtime services memory regions)
+ *
+ * For RISC-V:
+ * There is no specific reason for which, this address (512MB) can't be used
+ * EFI runtime virtual address for RISC-V. It also helps to use EFI runtime
+ * services on both RV32/RV64. Keep the same runtime virtual address for RISC-V
+ * as well to minimize the code churn.
+ */
+#define EFI_RT_VIRTUAL_BASE SZ_512M
+#define EFI_RT_VIRTUAL_SIZE SZ_512M
+
+#ifdef CONFIG_ARM64
+# define EFI_RT_VIRTUAL_LIMIT DEFAULT_MAP_WINDOW_64
+#else
+# define EFI_RT_VIRTUAL_LIMIT TASK_SIZE
+#endif
+
+static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
+static bool flat_va_mapping;
+
+const efi_system_table_t *efi_system_table;
+
+static struct screen_info *setup_graphics(void)
+{
+ efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
+ efi_status_t status;
+ unsigned long size;
+ void **gop_handle = NULL;
+ struct screen_info *si = NULL;
+
+ size = 0;
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &gop_proto, NULL, &size, gop_handle);
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ si = alloc_screen_info();
+ if (!si)
+ return NULL;
+ status = efi_setup_gop(si, &gop_proto, size);
+ if (status != EFI_SUCCESS) {
+ free_screen_info(si);
+ return NULL;
+ }
+ }
+ return si;
+}
+
+static void install_memreserve_table(void)
+{
+ struct linux_efi_memreserve *rsv;
+ efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
+ efi_status_t status;
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
+ (void **)&rsv);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate memreserve entry!\n");
+ return;
+ }
+
+ rsv->next = 0;
+ rsv->size = 0;
+ atomic_set(&rsv->count, 0);
+
+ status = efi_bs_call(install_configuration_table,
+ &memreserve_table_guid, rsv);
+ if (status != EFI_SUCCESS)
+ efi_err("Failed to install memreserve config table!\n");
+}
+
+static u32 get_supported_rt_services(void)
+{
+ const efi_rt_properties_table_t *rt_prop_table;
+ u32 supported = EFI_RT_SUPPORTED_ALL;
+
+ rt_prop_table = get_efi_config_table(EFI_RT_PROPERTIES_TABLE_GUID);
+ if (rt_prop_table)
+ supported &= rt_prop_table->runtime_services_supported;
+
+ return supported;
+}
+
+/*
+ * EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint
+ * that is described in the PE/COFF header. Most of the code is the same
+ * for both archictectures, with the arch-specific code provided in the
+ * handle_kernel_image() function.
+ */
+efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg)
+{
+ efi_loaded_image_t *image;
+ efi_status_t status;
+ unsigned long image_addr;
+ unsigned long image_size = 0;
+ /* addr/point and size pairs for memory management*/
+ unsigned long initrd_addr = 0;
+ unsigned long initrd_size = 0;
+ unsigned long fdt_addr = 0; /* Original DTB */
+ unsigned long fdt_size = 0;
+ char *cmdline_ptr = NULL;
+ int cmdline_size = 0;
+ efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID;
+ unsigned long reserve_addr = 0;
+ unsigned long reserve_size = 0;
+ enum efi_secureboot_mode secure_boot;
+ struct screen_info *si;
+ efi_properties_table_t *prop_tbl;
+ unsigned long max_addr;
+
+ efi_system_table = sys_table_arg;
+
+ /* Check if we were booted by the EFI firmware */
+ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
+ status = EFI_INVALID_PARAMETER;
+ goto fail;
+ }
+
+ status = check_platform_features();
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ /*
+ * Get a handle to the loaded image protocol. This is used to get
+ * information about the running image, such as size and the command
+ * line.
+ */
+ status = efi_system_table->boottime->handle_protocol(handle,
+ &loaded_image_proto, (void *)&image);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to get loaded image protocol\n");
+ goto fail;
+ }
+
+ /*
+ * Get the command line from EFI, using the LOADED_IMAGE
+ * protocol. We are going to copy the command line into the
+ * device tree, so this can be allocated anywhere.
+ */
+ cmdline_ptr = efi_convert_cmdline(image, &cmdline_size);
+ if (!cmdline_ptr) {
+ efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
+ status = EFI_OUT_OF_RESOURCES;
+ goto fail;
+ }
+
+ if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
+ IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
+ cmdline_size == 0) {
+ status = efi_parse_options(CONFIG_CMDLINE);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail_free_cmdline;
+ }
+ }
+
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0) {
+ status = efi_parse_options(cmdline_ptr);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail_free_cmdline;
+ }
+ }
+
+ efi_info("Booting Linux Kernel...\n");
+
+ si = setup_graphics();
+
+ status = handle_kernel_image(&image_addr, &image_size,
+ &reserve_addr,
+ &reserve_size,
+ image);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to relocate kernel\n");
+ goto fail_free_screeninfo;
+ }
+
+ efi_retrieve_tpm2_eventlog();
+
+ /* Ask the firmware to clear memory on unclean shutdown */
+ efi_enable_reset_attack_mitigation();
+
+ secure_boot = efi_get_secureboot();
+
+ /*
+ * Unauthenticated device tree data is a security hazard, so ignore
+ * 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure
+ * boot is enabled if we can't determine its state.
+ */
+ if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
+ secure_boot != efi_secureboot_mode_disabled) {
+ if (strstr(cmdline_ptr, "dtb="))
+ efi_err("Ignoring DTB from command line.\n");
+ } else {
+ status = efi_load_dtb(image, &fdt_addr, &fdt_size);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to load device tree!\n");
+ goto fail_free_image;
+ }
+ }
+
+ if (fdt_addr) {
+ efi_info("Using DTB from command line\n");
+ } else {
+ /* Look for a device tree configuration table entry. */
+ fdt_addr = (uintptr_t)get_fdt(&fdt_size);
+ if (fdt_addr)
+ efi_info("Using DTB from configuration table\n");
+ }
+
+ if (!fdt_addr)
+ efi_info("Generating empty DTB\n");
+
+ if (!efi_noinitrd) {
+ max_addr = efi_get_max_initrd_addr(image_addr);
+ status = efi_load_initrd(image, &initrd_addr, &initrd_size,
+ ULONG_MAX, max_addr);
+ if (status != EFI_SUCCESS)
+ efi_err("Failed to load initrd!\n");
+ }
+
+ efi_random_get_seed();
+
+ /*
+ * If the NX PE data feature is enabled in the properties table, we
+ * should take care not to create a virtual mapping that changes the
+ * relative placement of runtime services code and data regions, as
+ * they may belong to the same PE/COFF executable image in memory.
+ * The easiest way to achieve that is to simply use a 1:1 mapping.
+ */
+ prop_tbl = get_efi_config_table(EFI_PROPERTIES_TABLE_GUID);
+ flat_va_mapping = prop_tbl &&
+ (prop_tbl->memory_protection_attribute &
+ EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA);
+
+ /* force efi_novamap if SetVirtualAddressMap() is unsupported */
+ efi_novamap |= !(get_supported_rt_services() &
+ EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP);
+
+ /* hibernation expects the runtime regions to stay in the same place */
+ if (!IS_ENABLED(CONFIG_HIBERNATION) && !efi_nokaslr && !flat_va_mapping) {
+ /*
+ * Randomize the base of the UEFI runtime services region.
+ * Preserve the 2 MB alignment of the region by taking a
+ * shift of 21 bit positions into account when scaling
+ * the headroom value using a 32-bit random value.
+ */
+ static const u64 headroom = EFI_RT_VIRTUAL_LIMIT -
+ EFI_RT_VIRTUAL_BASE -
+ EFI_RT_VIRTUAL_SIZE;
+ u32 rnd;
+
+ status = efi_get_random_bytes(sizeof(rnd), (u8 *)&rnd);
+ if (status == EFI_SUCCESS) {
+ virtmap_base = EFI_RT_VIRTUAL_BASE +
+ (((headroom >> 21) * rnd) >> (32 - 21));
+ }
+ }
+
+ install_memreserve_table();
+
+ status = allocate_new_fdt_and_exit_boot(handle, &fdt_addr,
+ efi_get_max_fdt_addr(image_addr),
+ initrd_addr, initrd_size,
+ cmdline_ptr, fdt_addr, fdt_size);
+ if (status != EFI_SUCCESS)
+ goto fail_free_initrd;
+
+ if (IS_ENABLED(CONFIG_ARM))
+ efi_handle_post_ebs_state();
+
+ efi_enter_kernel(image_addr, fdt_addr, fdt_totalsize((void *)fdt_addr));
+ /* not reached */
+
+fail_free_initrd:
+ efi_err("Failed to update FDT and exit boot services\n");
+
+ efi_free(initrd_size, initrd_addr);
+ efi_free(fdt_size, fdt_addr);
+
+fail_free_image:
+ efi_free(image_size, image_addr);
+ efi_free(reserve_size, reserve_addr);
+fail_free_screeninfo:
+ free_screen_info(si);
+fail_free_cmdline:
+ efi_bs_call(free_pool, cmdline_ptr);
+fail:
+ return status;
+}
+
+/*
+ * efi_get_virtmap() - create a virtual mapping for the EFI memory map
+ *
+ * This function populates the virt_addr fields of all memory region descriptors
+ * in @memory_map whose EFI_MEMORY_RUNTIME attribute is set. Those descriptors
+ * are also copied to @runtime_map, and their total count is returned in @count.
+ */
+void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
+ unsigned long desc_size, efi_memory_desc_t *runtime_map,
+ int *count)
+{
+ u64 efi_virt_base = virtmap_base;
+ efi_memory_desc_t *in, *out = runtime_map;
+ int l;
+
+ for (l = 0; l < map_size; l += desc_size) {
+ u64 paddr, size;
+
+ in = (void *)memory_map + l;
+ if (!(in->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+
+ paddr = in->phys_addr;
+ size = in->num_pages * EFI_PAGE_SIZE;
+
+ in->virt_addr = in->phys_addr;
+ if (efi_novamap) {
+ continue;
+ }
+
+ /*
+ * Make the mapping compatible with 64k pages: this allows
+ * a 4k page size kernel to kexec a 64k page size kernel and
+ * vice versa.
+ */
+ if (!flat_va_mapping) {
+
+ paddr = round_down(in->phys_addr, SZ_64K);
+ size += in->phys_addr - paddr;
+
+ /*
+ * Avoid wasting memory on PTEs by choosing a virtual
+ * base that is compatible with section mappings if this
+ * region has the appropriate size and physical
+ * alignment. (Sections are 2 MB on 4k granule kernels)
+ */
+ if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
+ efi_virt_base = round_up(efi_virt_base, SZ_2M);
+ else
+ efi_virt_base = round_up(efi_virt_base, SZ_64K);
+
+ in->virt_addr += efi_virt_base - paddr;
+ efi_virt_base += size;
+ }
+
+ memcpy(out, in, desc_size);
+ out = (void *)out + desc_size;
+ ++*count;
+ }
+}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
new file mode 100644
index 000000000..c5e0c6e99
--- /dev/null
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -0,0 +1,853 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DRIVERS_FIRMWARE_EFI_EFISTUB_H
+#define _DRIVERS_FIRMWARE_EFI_EFISTUB_H
+
+#include <linux/compiler.h>
+#include <linux/efi.h>
+#include <linux/kernel.h>
+#include <linux/kern_levels.h>
+#include <linux/types.h>
+#include <asm/efi.h>
+
+/*
+ * __init annotations should not be used in the EFI stub, since the code is
+ * either included in the decompressor (x86, ARM) where they have no effect,
+ * or the whole stub is __init annotated at the section level (arm64), by
+ * renaming the sections, in which case the __init annotation will be
+ * redundant, and will result in section names like .init.init.text, and our
+ * linker script does not expect that.
+ */
+#undef __init
+
+/*
+ * Allow the platform to override the allocation granularity: this allows
+ * systems that have the capability to run with a larger page size to deal
+ * with the allocations for initrd and fdt more efficiently.
+ */
+#ifndef EFI_ALLOC_ALIGN
+#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
+#endif
+
+extern bool efi_nochunk;
+extern bool efi_nokaslr;
+extern bool efi_noinitrd;
+extern int efi_loglevel;
+extern bool efi_novamap;
+
+extern const efi_system_table_t *efi_system_table;
+
+efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg);
+
+#ifndef ARCH_HAS_EFISTUB_WRAPPERS
+
+#define efi_is_native() (true)
+#define efi_bs_call(func, ...) efi_system_table->boottime->func(__VA_ARGS__)
+#define efi_rt_call(func, ...) efi_system_table->runtime->func(__VA_ARGS__)
+#define efi_table_attr(inst, attr) (inst->attr)
+#define efi_call_proto(inst, func, ...) inst->func(inst, ##__VA_ARGS__)
+
+#endif
+
+#define efi_info(fmt, ...) \
+ efi_printk(KERN_INFO fmt, ##__VA_ARGS__)
+#define efi_warn(fmt, ...) \
+ efi_printk(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
+#define efi_err(fmt, ...) \
+ efi_printk(KERN_ERR "ERROR: " fmt, ##__VA_ARGS__)
+#define efi_debug(fmt, ...) \
+ efi_printk(KERN_DEBUG "DEBUG: " fmt, ##__VA_ARGS__)
+
+#define efi_printk_once(fmt, ...) \
+({ \
+ static bool __print_once; \
+ bool __ret_print_once = !__print_once; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ efi_printk(fmt, ##__VA_ARGS__); \
+ } \
+ __ret_print_once; \
+})
+
+#define efi_info_once(fmt, ...) \
+ efi_printk_once(KERN_INFO fmt, ##__VA_ARGS__)
+#define efi_warn_once(fmt, ...) \
+ efi_printk_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
+#define efi_err_once(fmt, ...) \
+ efi_printk_once(KERN_ERR "ERROR: " fmt, ##__VA_ARGS__)
+#define efi_debug_once(fmt, ...) \
+ efi_printk_once(KERN_DEBUG "DEBUG: " fmt, ##__VA_ARGS__)
+
+/* Helper macros for the usual case of using simple C variables: */
+#ifndef fdt_setprop_inplace_var
+#define fdt_setprop_inplace_var(fdt, node_offset, name, var) \
+ fdt_setprop_inplace((fdt), (node_offset), (name), &(var), sizeof(var))
+#endif
+
+#ifndef fdt_setprop_var
+#define fdt_setprop_var(fdt, node_offset, name, var) \
+ fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var))
+#endif
+
+#define get_efi_var(name, vendor, ...) \
+ efi_rt_call(get_variable, (efi_char16_t *)(name), \
+ (efi_guid_t *)(vendor), __VA_ARGS__)
+
+#define set_efi_var(name, vendor, ...) \
+ efi_rt_call(set_variable, (efi_char16_t *)(name), \
+ (efi_guid_t *)(vendor), __VA_ARGS__)
+
+#define efi_get_handle_at(array, idx) \
+ (efi_is_native() ? (array)[idx] \
+ : (efi_handle_t)(unsigned long)((u32 *)(array))[idx])
+
+#define efi_get_handle_num(size) \
+ ((size) / (efi_is_native() ? sizeof(efi_handle_t) : sizeof(u32)))
+
+#define for_each_efi_handle(handle, array, size, i) \
+ for (i = 0; \
+ i < efi_get_handle_num(size) && \
+ ((handle = efi_get_handle_at((array), i)) || true); \
+ i++)
+
+static inline
+void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
+{
+ *lo = lower_32_bits(data);
+ *hi = upper_32_bits(data);
+}
+
+/*
+ * Allocation types for calls to boottime->allocate_pages.
+ */
+#define EFI_ALLOCATE_ANY_PAGES 0
+#define EFI_ALLOCATE_MAX_ADDRESS 1
+#define EFI_ALLOCATE_ADDRESS 2
+#define EFI_MAX_ALLOCATE_TYPE 3
+
+/*
+ * The type of search to perform when calling boottime->locate_handle
+ */
+#define EFI_LOCATE_ALL_HANDLES 0
+#define EFI_LOCATE_BY_REGISTER_NOTIFY 1
+#define EFI_LOCATE_BY_PROTOCOL 2
+
+/*
+ * boottime->stall takes the time period in microseconds
+ */
+#define EFI_USEC_PER_SEC 1000000
+
+/*
+ * boottime->set_timer takes the time in 100ns units
+ */
+#define EFI_100NSEC_PER_USEC ((u64)10)
+
+/*
+ * An efi_boot_memmap is used by efi_get_memory_map() to return the
+ * EFI memory map in a dynamically allocated buffer.
+ *
+ * The buffer allocated for the EFI memory map includes extra room for
+ * a minimum of EFI_MMAP_NR_SLACK_SLOTS additional EFI memory descriptors.
+ * This facilitates the reuse of the EFI memory map buffer when a second
+ * call to ExitBootServices() is needed because of intervening changes to
+ * the EFI memory map. Other related structures, e.g. x86 e820ext, need
+ * to factor in this headroom requirement as well.
+ */
+#define EFI_MMAP_NR_SLACK_SLOTS 8
+
+struct efi_boot_memmap {
+ efi_memory_desc_t **map;
+ unsigned long *map_size;
+ unsigned long *desc_size;
+ u32 *desc_ver;
+ unsigned long *key_ptr;
+ unsigned long *buff_size;
+};
+
+typedef struct efi_generic_dev_path efi_device_path_protocol_t;
+
+typedef void *efi_event_t;
+/* Note that notifications won't work in mixed mode */
+typedef void (__efiapi *efi_event_notify_t)(efi_event_t, void *);
+
+#define EFI_EVT_TIMER 0x80000000U
+#define EFI_EVT_RUNTIME 0x40000000U
+#define EFI_EVT_NOTIFY_WAIT 0x00000100U
+#define EFI_EVT_NOTIFY_SIGNAL 0x00000200U
+
+/**
+ * efi_set_event_at() - add event to events array
+ *
+ * @events: array of UEFI events
+ * @ids: index where to put the event in the array
+ * @event: event to add to the aray
+ *
+ * boottime->wait_for_event() takes an array of events as input.
+ * Provide a helper to set it up correctly for mixed mode.
+ */
+static inline
+void efi_set_event_at(efi_event_t *events, size_t idx, efi_event_t event)
+{
+ if (efi_is_native())
+ events[idx] = event;
+ else
+ ((u32 *)events)[idx] = (u32)(unsigned long)event;
+}
+
+#define EFI_TPL_APPLICATION 4
+#define EFI_TPL_CALLBACK 8
+#define EFI_TPL_NOTIFY 16
+#define EFI_TPL_HIGH_LEVEL 31
+
+typedef enum {
+ EfiTimerCancel,
+ EfiTimerPeriodic,
+ EfiTimerRelative
+} EFI_TIMER_DELAY;
+
+/*
+ * EFI Boot Services table
+ */
+union efi_boot_services {
+ struct {
+ efi_table_hdr_t hdr;
+ void *raise_tpl;
+ void *restore_tpl;
+ efi_status_t (__efiapi *allocate_pages)(int, int, unsigned long,
+ efi_physical_addr_t *);
+ efi_status_t (__efiapi *free_pages)(efi_physical_addr_t,
+ unsigned long);
+ efi_status_t (__efiapi *get_memory_map)(unsigned long *, void *,
+ unsigned long *,
+ unsigned long *, u32 *);
+ efi_status_t (__efiapi *allocate_pool)(int, unsigned long,
+ void **);
+ efi_status_t (__efiapi *free_pool)(void *);
+ efi_status_t (__efiapi *create_event)(u32, unsigned long,
+ efi_event_notify_t, void *,
+ efi_event_t *);
+ efi_status_t (__efiapi *set_timer)(efi_event_t,
+ EFI_TIMER_DELAY, u64);
+ efi_status_t (__efiapi *wait_for_event)(unsigned long,
+ efi_event_t *,
+ unsigned long *);
+ void *signal_event;
+ efi_status_t (__efiapi *close_event)(efi_event_t);
+ void *check_event;
+ void *install_protocol_interface;
+ void *reinstall_protocol_interface;
+ void *uninstall_protocol_interface;
+ efi_status_t (__efiapi *handle_protocol)(efi_handle_t,
+ efi_guid_t *, void **);
+ void *__reserved;
+ void *register_protocol_notify;
+ efi_status_t (__efiapi *locate_handle)(int, efi_guid_t *,
+ void *, unsigned long *,
+ efi_handle_t *);
+ efi_status_t (__efiapi *locate_device_path)(efi_guid_t *,
+ efi_device_path_protocol_t **,
+ efi_handle_t *);
+ efi_status_t (__efiapi *install_configuration_table)(efi_guid_t *,
+ void *);
+ void *load_image;
+ void *start_image;
+ efi_status_t __noreturn (__efiapi *exit)(efi_handle_t,
+ efi_status_t,
+ unsigned long,
+ efi_char16_t *);
+ void *unload_image;
+ efi_status_t (__efiapi *exit_boot_services)(efi_handle_t,
+ unsigned long);
+ void *get_next_monotonic_count;
+ efi_status_t (__efiapi *stall)(unsigned long);
+ void *set_watchdog_timer;
+ void *connect_controller;
+ efi_status_t (__efiapi *disconnect_controller)(efi_handle_t,
+ efi_handle_t,
+ efi_handle_t);
+ void *open_protocol;
+ void *close_protocol;
+ void *open_protocol_information;
+ void *protocols_per_handle;
+ void *locate_handle_buffer;
+ efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *,
+ void **);
+ void *install_multiple_protocol_interfaces;
+ void *uninstall_multiple_protocol_interfaces;
+ void *calculate_crc32;
+ void *copy_mem;
+ void *set_mem;
+ void *create_event_ex;
+ };
+ struct {
+ efi_table_hdr_t hdr;
+ u32 raise_tpl;
+ u32 restore_tpl;
+ u32 allocate_pages;
+ u32 free_pages;
+ u32 get_memory_map;
+ u32 allocate_pool;
+ u32 free_pool;
+ u32 create_event;
+ u32 set_timer;
+ u32 wait_for_event;
+ u32 signal_event;
+ u32 close_event;
+ u32 check_event;
+ u32 install_protocol_interface;
+ u32 reinstall_protocol_interface;
+ u32 uninstall_protocol_interface;
+ u32 handle_protocol;
+ u32 __reserved;
+ u32 register_protocol_notify;
+ u32 locate_handle;
+ u32 locate_device_path;
+ u32 install_configuration_table;
+ u32 load_image;
+ u32 start_image;
+ u32 exit;
+ u32 unload_image;
+ u32 exit_boot_services;
+ u32 get_next_monotonic_count;
+ u32 stall;
+ u32 set_watchdog_timer;
+ u32 connect_controller;
+ u32 disconnect_controller;
+ u32 open_protocol;
+ u32 close_protocol;
+ u32 open_protocol_information;
+ u32 protocols_per_handle;
+ u32 locate_handle_buffer;
+ u32 locate_protocol;
+ u32 install_multiple_protocol_interfaces;
+ u32 uninstall_multiple_protocol_interfaces;
+ u32 calculate_crc32;
+ u32 copy_mem;
+ u32 set_mem;
+ u32 create_event_ex;
+ } mixed_mode;
+};
+
+typedef union efi_uga_draw_protocol efi_uga_draw_protocol_t;
+
+union efi_uga_draw_protocol {
+ struct {
+ efi_status_t (__efiapi *get_mode)(efi_uga_draw_protocol_t *,
+ u32*, u32*, u32*, u32*);
+ void *set_mode;
+ void *blt;
+ };
+ struct {
+ u32 get_mode;
+ u32 set_mode;
+ u32 blt;
+ } mixed_mode;
+};
+
+typedef struct {
+ u16 scan_code;
+ efi_char16_t unicode_char;
+} efi_input_key_t;
+
+union efi_simple_text_input_protocol {
+ struct {
+ void *reset;
+ efi_status_t (__efiapi *read_keystroke)(efi_simple_text_input_protocol_t *,
+ efi_input_key_t *);
+ efi_event_t wait_for_key;
+ };
+ struct {
+ u32 reset;
+ u32 read_keystroke;
+ u32 wait_for_key;
+ } mixed_mode;
+};
+
+efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key);
+
+union efi_simple_text_output_protocol {
+ struct {
+ void *reset;
+ efi_status_t (__efiapi *output_string)(efi_simple_text_output_protocol_t *,
+ efi_char16_t *);
+ void *test_string;
+ };
+ struct {
+ u32 reset;
+ u32 output_string;
+ u32 test_string;
+ } mixed_mode;
+};
+
+#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0
+#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR 1
+#define PIXEL_BIT_MASK 2
+#define PIXEL_BLT_ONLY 3
+#define PIXEL_FORMAT_MAX 4
+
+typedef struct {
+ u32 red_mask;
+ u32 green_mask;
+ u32 blue_mask;
+ u32 reserved_mask;
+} efi_pixel_bitmask_t;
+
+typedef struct {
+ u32 version;
+ u32 horizontal_resolution;
+ u32 vertical_resolution;
+ int pixel_format;
+ efi_pixel_bitmask_t pixel_information;
+ u32 pixels_per_scan_line;
+} efi_graphics_output_mode_info_t;
+
+typedef union efi_graphics_output_protocol_mode efi_graphics_output_protocol_mode_t;
+
+union efi_graphics_output_protocol_mode {
+ struct {
+ u32 max_mode;
+ u32 mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long size_of_info;
+ efi_physical_addr_t frame_buffer_base;
+ unsigned long frame_buffer_size;
+ };
+ struct {
+ u32 max_mode;
+ u32 mode;
+ u32 info;
+ u32 size_of_info;
+ u64 frame_buffer_base;
+ u32 frame_buffer_size;
+ } mixed_mode;
+};
+
+typedef union efi_graphics_output_protocol efi_graphics_output_protocol_t;
+
+union efi_graphics_output_protocol {
+ struct {
+ efi_status_t (__efiapi *query_mode)(efi_graphics_output_protocol_t *,
+ u32, unsigned long *,
+ efi_graphics_output_mode_info_t **);
+ efi_status_t (__efiapi *set_mode) (efi_graphics_output_protocol_t *, u32);
+ void *blt;
+ efi_graphics_output_protocol_mode_t *mode;
+ };
+ struct {
+ u32 query_mode;
+ u32 set_mode;
+ u32 blt;
+ u32 mode;
+ } mixed_mode;
+};
+
+typedef union {
+ struct {
+ u32 revision;
+ efi_handle_t parent_handle;
+ efi_system_table_t *system_table;
+ efi_handle_t device_handle;
+ void *file_path;
+ void *reserved;
+ u32 load_options_size;
+ void *load_options;
+ void *image_base;
+ __aligned_u64 image_size;
+ unsigned int image_code_type;
+ unsigned int image_data_type;
+ efi_status_t (__efiapi *unload)(efi_handle_t image_handle);
+ };
+ struct {
+ u32 revision;
+ u32 parent_handle;
+ u32 system_table;
+ u32 device_handle;
+ u32 file_path;
+ u32 reserved;
+ u32 load_options_size;
+ u32 load_options;
+ u32 image_base;
+ __aligned_u64 image_size;
+ u32 image_code_type;
+ u32 image_data_type;
+ u32 unload;
+ } mixed_mode;
+} efi_loaded_image_t;
+
+typedef struct {
+ u64 size;
+ u64 file_size;
+ u64 phys_size;
+ efi_time_t create_time;
+ efi_time_t last_access_time;
+ efi_time_t modification_time;
+ __aligned_u64 attribute;
+ efi_char16_t filename[];
+} efi_file_info_t;
+
+typedef struct efi_file_protocol efi_file_protocol_t;
+
+struct efi_file_protocol {
+ u64 revision;
+ efi_status_t (__efiapi *open) (efi_file_protocol_t *,
+ efi_file_protocol_t **,
+ efi_char16_t *, u64, u64);
+ efi_status_t (__efiapi *close) (efi_file_protocol_t *);
+ efi_status_t (__efiapi *delete) (efi_file_protocol_t *);
+ efi_status_t (__efiapi *read) (efi_file_protocol_t *,
+ unsigned long *, void *);
+ efi_status_t (__efiapi *write) (efi_file_protocol_t *,
+ unsigned long, void *);
+ efi_status_t (__efiapi *get_position)(efi_file_protocol_t *, u64 *);
+ efi_status_t (__efiapi *set_position)(efi_file_protocol_t *, u64);
+ efi_status_t (__efiapi *get_info) (efi_file_protocol_t *,
+ efi_guid_t *, unsigned long *,
+ void *);
+ efi_status_t (__efiapi *set_info) (efi_file_protocol_t *,
+ efi_guid_t *, unsigned long,
+ void *);
+ efi_status_t (__efiapi *flush) (efi_file_protocol_t *);
+};
+
+typedef struct efi_simple_file_system_protocol efi_simple_file_system_protocol_t;
+
+struct efi_simple_file_system_protocol {
+ u64 revision;
+ int (__efiapi *open_volume)(efi_simple_file_system_protocol_t *,
+ efi_file_protocol_t **);
+};
+
+#define EFI_FILE_MODE_READ 0x0000000000000001
+#define EFI_FILE_MODE_WRITE 0x0000000000000002
+#define EFI_FILE_MODE_CREATE 0x8000000000000000
+
+typedef enum {
+ EfiPciIoWidthUint8,
+ EfiPciIoWidthUint16,
+ EfiPciIoWidthUint32,
+ EfiPciIoWidthUint64,
+ EfiPciIoWidthFifoUint8,
+ EfiPciIoWidthFifoUint16,
+ EfiPciIoWidthFifoUint32,
+ EfiPciIoWidthFifoUint64,
+ EfiPciIoWidthFillUint8,
+ EfiPciIoWidthFillUint16,
+ EfiPciIoWidthFillUint32,
+ EfiPciIoWidthFillUint64,
+ EfiPciIoWidthMaximum
+} EFI_PCI_IO_PROTOCOL_WIDTH;
+
+typedef enum {
+ EfiPciIoAttributeOperationGet,
+ EfiPciIoAttributeOperationSet,
+ EfiPciIoAttributeOperationEnable,
+ EfiPciIoAttributeOperationDisable,
+ EfiPciIoAttributeOperationSupported,
+ EfiPciIoAttributeOperationMaximum
+} EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION;
+
+typedef struct {
+ u32 read;
+ u32 write;
+} efi_pci_io_protocol_access_32_t;
+
+typedef union efi_pci_io_protocol efi_pci_io_protocol_t;
+
+typedef
+efi_status_t (__efiapi *efi_pci_io_protocol_cfg_t)(efi_pci_io_protocol_t *,
+ EFI_PCI_IO_PROTOCOL_WIDTH,
+ u32 offset,
+ unsigned long count,
+ void *buffer);
+
+typedef struct {
+ void *read;
+ void *write;
+} efi_pci_io_protocol_access_t;
+
+typedef struct {
+ efi_pci_io_protocol_cfg_t read;
+ efi_pci_io_protocol_cfg_t write;
+} efi_pci_io_protocol_config_access_t;
+
+union efi_pci_io_protocol {
+ struct {
+ void *poll_mem;
+ void *poll_io;
+ efi_pci_io_protocol_access_t mem;
+ efi_pci_io_protocol_access_t io;
+ efi_pci_io_protocol_config_access_t pci;
+ void *copy_mem;
+ void *map;
+ void *unmap;
+ void *allocate_buffer;
+ void *free_buffer;
+ void *flush;
+ efi_status_t (__efiapi *get_location)(efi_pci_io_protocol_t *,
+ unsigned long *segment_nr,
+ unsigned long *bus_nr,
+ unsigned long *device_nr,
+ unsigned long *func_nr);
+ void *attributes;
+ void *get_bar_attributes;
+ void *set_bar_attributes;
+ uint64_t romsize;
+ void *romimage;
+ };
+ struct {
+ u32 poll_mem;
+ u32 poll_io;
+ efi_pci_io_protocol_access_32_t mem;
+ efi_pci_io_protocol_access_32_t io;
+ efi_pci_io_protocol_access_32_t pci;
+ u32 copy_mem;
+ u32 map;
+ u32 unmap;
+ u32 allocate_buffer;
+ u32 free_buffer;
+ u32 flush;
+ u32 get_location;
+ u32 attributes;
+ u32 get_bar_attributes;
+ u32 set_bar_attributes;
+ u64 romsize;
+ u32 romimage;
+ } mixed_mode;
+};
+
+#define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001
+#define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002
+#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO 0x0004
+#define EFI_PCI_IO_ATTRIBUTE_VGA_MEMORY 0x0008
+#define EFI_PCI_IO_ATTRIBUTE_VGA_IO 0x0010
+#define EFI_PCI_IO_ATTRIBUTE_IDE_PRIMARY_IO 0x0020
+#define EFI_PCI_IO_ATTRIBUTE_IDE_SECONDARY_IO 0x0040
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY_WRITE_COMBINE 0x0080
+#define EFI_PCI_IO_ATTRIBUTE_IO 0x0100
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY 0x0200
+#define EFI_PCI_IO_ATTRIBUTE_BUS_MASTER 0x0400
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY_CACHED 0x0800
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY_DISABLE 0x1000
+#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_DEVICE 0x2000
+#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM 0x4000
+#define EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE 0x8000
+#define EFI_PCI_IO_ATTRIBUTE_ISA_IO_16 0x10000
+#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000
+#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000
+
+struct efi_dev_path;
+
+typedef union apple_properties_protocol apple_properties_protocol_t;
+
+union apple_properties_protocol {
+ struct {
+ unsigned long version;
+ efi_status_t (__efiapi *get)(apple_properties_protocol_t *,
+ struct efi_dev_path *,
+ efi_char16_t *, void *, u32 *);
+ efi_status_t (__efiapi *set)(apple_properties_protocol_t *,
+ struct efi_dev_path *,
+ efi_char16_t *, void *, u32);
+ efi_status_t (__efiapi *del)(apple_properties_protocol_t *,
+ struct efi_dev_path *,
+ efi_char16_t *);
+ efi_status_t (__efiapi *get_all)(apple_properties_protocol_t *,
+ void *buffer, u32 *);
+ };
+ struct {
+ u32 version;
+ u32 get;
+ u32 set;
+ u32 del;
+ u32 get_all;
+ } mixed_mode;
+};
+
+typedef u32 efi_tcg2_event_log_format;
+
+typedef union efi_tcg2_protocol efi_tcg2_protocol_t;
+
+union efi_tcg2_protocol {
+ struct {
+ void *get_capability;
+ efi_status_t (__efiapi *get_event_log)(efi_handle_t,
+ efi_tcg2_event_log_format,
+ efi_physical_addr_t *,
+ efi_physical_addr_t *,
+ efi_bool_t *);
+ void *hash_log_extend_event;
+ void *submit_command;
+ void *get_active_pcr_banks;
+ void *set_active_pcr_banks;
+ void *get_result_of_set_active_pcr_banks;
+ };
+ struct {
+ u32 get_capability;
+ u32 get_event_log;
+ u32 hash_log_extend_event;
+ u32 submit_command;
+ u32 get_active_pcr_banks;
+ u32 set_active_pcr_banks;
+ u32 get_result_of_set_active_pcr_banks;
+ } mixed_mode;
+};
+
+typedef union efi_load_file_protocol efi_load_file_protocol_t;
+typedef union efi_load_file_protocol efi_load_file2_protocol_t;
+
+union efi_load_file_protocol {
+ struct {
+ efi_status_t (__efiapi *load_file)(efi_load_file_protocol_t *,
+ efi_device_path_protocol_t *,
+ bool, unsigned long *, void *);
+ };
+ struct {
+ u32 load_file;
+ } mixed_mode;
+};
+
+typedef struct {
+ u32 attributes;
+ u16 file_path_list_length;
+ u8 variable_data[];
+ // efi_char16_t description[];
+ // efi_device_path_protocol_t file_path_list[];
+ // u8 optional_data[];
+} __packed efi_load_option_t;
+
+#define EFI_LOAD_OPTION_ACTIVE 0x0001U
+#define EFI_LOAD_OPTION_FORCE_RECONNECT 0x0002U
+#define EFI_LOAD_OPTION_HIDDEN 0x0008U
+#define EFI_LOAD_OPTION_CATEGORY 0x1f00U
+#define EFI_LOAD_OPTION_CATEGORY_BOOT 0x0000U
+#define EFI_LOAD_OPTION_CATEGORY_APP 0x0100U
+
+#define EFI_LOAD_OPTION_BOOT_MASK \
+ (EFI_LOAD_OPTION_ACTIVE|EFI_LOAD_OPTION_HIDDEN|EFI_LOAD_OPTION_CATEGORY)
+#define EFI_LOAD_OPTION_MASK (EFI_LOAD_OPTION_FORCE_RECONNECT|EFI_LOAD_OPTION_BOOT_MASK)
+
+typedef struct {
+ u32 attributes;
+ u16 file_path_list_length;
+ const efi_char16_t *description;
+ const efi_device_path_protocol_t *file_path_list;
+ size_t optional_data_size;
+ const void *optional_data;
+} efi_load_option_unpacked_t;
+
+void efi_pci_disable_bridge_busmaster(void);
+
+typedef efi_status_t (*efi_exit_boot_map_processing)(
+ struct efi_boot_memmap *map,
+ void *priv);
+
+efi_status_t efi_exit_boot_services(void *handle,
+ struct efi_boot_memmap *map,
+ void *priv,
+ efi_exit_boot_map_processing priv_func);
+
+efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
+ unsigned long *new_fdt_addr,
+ unsigned long max_addr,
+ u64 initrd_addr, u64 initrd_size,
+ char *cmdline_ptr,
+ unsigned long fdt_addr,
+ unsigned long fdt_size);
+
+void *get_fdt(unsigned long *fdt_size);
+
+void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
+ unsigned long desc_size, efi_memory_desc_t *runtime_map,
+ int *count);
+
+efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
+
+efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long random_seed);
+
+efi_status_t efi_random_get_seed(void);
+
+efi_status_t check_platform_features(void);
+
+void *get_efi_config_table(efi_guid_t guid);
+
+/* NOTE: These functions do not print a trailing newline after the string */
+void efi_char16_puts(efi_char16_t *);
+void efi_puts(const char *str);
+
+__printf(1, 2) int efi_printk(char const *fmt, ...);
+
+void efi_free(unsigned long size, unsigned long addr);
+
+void efi_apply_loadoptions_quirk(const void **load_options, int *load_options_size);
+
+char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len);
+
+efi_status_t efi_get_memory_map(struct efi_boot_memmap *map);
+
+efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
+ unsigned long max);
+
+efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
+ unsigned long max, unsigned long align);
+
+efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long min);
+
+efi_status_t efi_relocate_kernel(unsigned long *image_addr,
+ unsigned long image_size,
+ unsigned long alloc_size,
+ unsigned long preferred_addr,
+ unsigned long alignment,
+ unsigned long min_addr);
+
+efi_status_t efi_parse_options(char const *cmdline);
+
+void efi_parse_option_graphics(char *option);
+
+efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
+ unsigned long size);
+
+efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
+ const efi_char16_t *optstr,
+ int optstr_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit,
+ unsigned long *load_addr,
+ unsigned long *load_size);
+
+
+static inline efi_status_t efi_load_dtb(efi_loaded_image_t *image,
+ unsigned long *load_addr,
+ unsigned long *load_size)
+{
+ return handle_cmdline_files(image, L"dtb=", sizeof(L"dtb=") - 2,
+ ULONG_MAX, ULONG_MAX, load_addr, load_size);
+}
+
+efi_status_t efi_load_initrd(efi_loaded_image_t *image,
+ unsigned long *load_addr,
+ unsigned long *load_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit);
+/*
+ * This function handles the architcture specific differences between arm and
+ * arm64 regarding where the kernel image must be loaded and any memory that
+ * must be reserved. On failure it is required to free all
+ * all allocations it has made.
+ */
+efi_status_t handle_kernel_image(unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ efi_loaded_image_t *image);
+
+asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
+ unsigned long fdt_addr,
+ unsigned long fdt_size);
+
+void efi_handle_post_ebs_state(void);
+
+#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
new file mode 100644
index 000000000..d48b0de05
--- /dev/null
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FDT related Helper functions used by the EFI stub on multiple
+ * architectures. This should be #included by the EFI stub
+ * implementation files.
+ *
+ * Copyright 2013 Linaro Limited; author Roy Franz
+ */
+
+#include <linux/efi.h>
+#include <linux/libfdt.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+#define EFI_DT_ADDR_CELLS_DEFAULT 2
+#define EFI_DT_SIZE_CELLS_DEFAULT 2
+
+static void fdt_update_cell_size(void *fdt)
+{
+ int offset;
+
+ offset = fdt_path_offset(fdt, "/");
+ /* Set the #address-cells and #size-cells values for an empty tree */
+
+ fdt_setprop_u32(fdt, offset, "#address-cells", EFI_DT_ADDR_CELLS_DEFAULT);
+ fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT);
+}
+
+static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
+ void *fdt, int new_fdt_size, char *cmdline_ptr,
+ u64 initrd_addr, u64 initrd_size)
+{
+ int node, num_rsv;
+ int status;
+ u32 fdt_val32;
+ u64 fdt_val64;
+
+ /* Do some checks on provided FDT, if it exists: */
+ if (orig_fdt) {
+ if (fdt_check_header(orig_fdt)) {
+ efi_err("Device Tree header not valid!\n");
+ return EFI_LOAD_ERROR;
+ }
+ /*
+ * We don't get the size of the FDT if we get if from a
+ * configuration table:
+ */
+ if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) {
+ efi_err("Truncated device tree! foo!\n");
+ return EFI_LOAD_ERROR;
+ }
+ }
+
+ if (orig_fdt) {
+ status = fdt_open_into(orig_fdt, fdt, new_fdt_size);
+ } else {
+ status = fdt_create_empty_tree(fdt, new_fdt_size);
+ if (status == 0) {
+ /*
+ * Any failure from the following function is
+ * non-critical:
+ */
+ fdt_update_cell_size(fdt);
+ }
+ }
+
+ if (status != 0)
+ goto fdt_set_fail;
+
+ /*
+ * Delete all memory reserve map entries. When booting via UEFI,
+ * kernel will use the UEFI memory map to find reserved regions.
+ */
+ num_rsv = fdt_num_mem_rsv(fdt);
+ while (num_rsv-- > 0)
+ fdt_del_mem_rsv(fdt, num_rsv);
+
+ node = fdt_subnode_offset(fdt, 0, "chosen");
+ if (node < 0) {
+ node = fdt_add_subnode(fdt, 0, "chosen");
+ if (node < 0) {
+ /* 'node' is an error code when negative: */
+ status = node;
+ goto fdt_set_fail;
+ }
+ }
+
+ if (cmdline_ptr != NULL && strlen(cmdline_ptr) > 0) {
+ status = fdt_setprop(fdt, node, "bootargs", cmdline_ptr,
+ strlen(cmdline_ptr) + 1);
+ if (status)
+ goto fdt_set_fail;
+ }
+
+ /* Set initrd address/end in device tree, if present */
+ if (initrd_size != 0) {
+ u64 initrd_image_end;
+ u64 initrd_image_start = cpu_to_fdt64(initrd_addr);
+
+ status = fdt_setprop_var(fdt, node, "linux,initrd-start", initrd_image_start);
+ if (status)
+ goto fdt_set_fail;
+
+ initrd_image_end = cpu_to_fdt64(initrd_addr + initrd_size);
+ status = fdt_setprop_var(fdt, node, "linux,initrd-end", initrd_image_end);
+ if (status)
+ goto fdt_set_fail;
+ }
+
+ /* Add FDT entries for EFI runtime services in chosen node. */
+ node = fdt_subnode_offset(fdt, 0, "chosen");
+ fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table);
+
+ status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64);
+ if (status)
+ goto fdt_set_fail;
+
+ fdt_val64 = U64_MAX; /* placeholder */
+
+ status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-start", fdt_val64);
+ if (status)
+ goto fdt_set_fail;
+
+ fdt_val32 = U32_MAX; /* placeholder */
+
+ status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-size", fdt_val32);
+ if (status)
+ goto fdt_set_fail;
+
+ status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32);
+ if (status)
+ goto fdt_set_fail;
+
+ status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32);
+ if (status)
+ goto fdt_set_fail;
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && !efi_nokaslr) {
+ efi_status_t efi_status;
+
+ efi_status = efi_get_random_bytes(sizeof(fdt_val64),
+ (u8 *)&fdt_val64);
+ if (efi_status == EFI_SUCCESS) {
+ status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64);
+ if (status)
+ goto fdt_set_fail;
+ }
+ }
+
+ /* Shrink the FDT back to its minimum size: */
+ fdt_pack(fdt);
+
+ return EFI_SUCCESS;
+
+fdt_set_fail:
+ if (status == -FDT_ERR_NOSPACE)
+ return EFI_BUFFER_TOO_SMALL;
+
+ return EFI_LOAD_ERROR;
+}
+
+static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
+{
+ int node = fdt_path_offset(fdt, "/chosen");
+ u64 fdt_val64;
+ u32 fdt_val32;
+ int err;
+
+ if (node < 0)
+ return EFI_LOAD_ERROR;
+
+ fdt_val64 = cpu_to_fdt64((unsigned long)*map->map);
+
+ err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-start", fdt_val64);
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ fdt_val32 = cpu_to_fdt32(*map->map_size);
+
+ err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-size", fdt_val32);
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ fdt_val32 = cpu_to_fdt32(*map->desc_size);
+
+ err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32);
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ fdt_val32 = cpu_to_fdt32(*map->desc_ver);
+
+ err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32);
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ return EFI_SUCCESS;
+}
+
+struct exit_boot_struct {
+ efi_memory_desc_t *runtime_map;
+ int *runtime_entry_count;
+ void *new_fdt_addr;
+};
+
+static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
+ void *priv)
+{
+ struct exit_boot_struct *p = priv;
+ /*
+ * Update the memory map with virtual addresses. The function will also
+ * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
+ * entries so that we can pass it straight to SetVirtualAddressMap()
+ */
+ efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
+ p->runtime_map, p->runtime_entry_count);
+
+ return update_fdt_memmap(p->new_fdt_addr, map);
+}
+
+#ifndef MAX_FDT_SIZE
+# define MAX_FDT_SIZE SZ_2M
+#endif
+
+/*
+ * Allocate memory for a new FDT, then add EFI, commandline, and
+ * initrd related fields to the FDT. This routine increases the
+ * FDT allocation size until the allocated memory is large
+ * enough. EFI allocations are in EFI_PAGE_SIZE granules,
+ * which are fixed at 4K bytes, so in most cases the first
+ * allocation should succeed.
+ * EFI boot services are exited at the end of this function.
+ * There must be no allocations between the get_memory_map()
+ * call and the exit_boot_services() call, so the exiting of
+ * boot services is very tightly tied to the creation of the FDT
+ * with the final memory map in it.
+ */
+
+efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
+ unsigned long *new_fdt_addr,
+ unsigned long max_addr,
+ u64 initrd_addr, u64 initrd_size,
+ char *cmdline_ptr,
+ unsigned long fdt_addr,
+ unsigned long fdt_size)
+{
+ unsigned long map_size, desc_size, buff_size;
+ u32 desc_ver;
+ unsigned long mmap_key;
+ efi_memory_desc_t *memory_map, *runtime_map;
+ efi_status_t status;
+ int runtime_entry_count;
+ struct efi_boot_memmap map;
+ struct exit_boot_struct priv;
+
+ map.map = &runtime_map;
+ map.map_size = &map_size;
+ map.desc_size = &desc_size;
+ map.desc_ver = &desc_ver;
+ map.key_ptr = &mmap_key;
+ map.buff_size = &buff_size;
+
+ /*
+ * Get a copy of the current memory map that we will use to prepare
+ * the input for SetVirtualAddressMap(). We don't have to worry about
+ * subsequent allocations adding entries, since they could not affect
+ * the number of EFI_MEMORY_RUNTIME regions.
+ */
+ status = efi_get_memory_map(&map);
+ if (status != EFI_SUCCESS) {
+ efi_err("Unable to retrieve UEFI memory map.\n");
+ return status;
+ }
+
+ efi_info("Exiting boot services and installing virtual address map...\n");
+
+ map.map = &memory_map;
+ status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, max_addr);
+ if (status != EFI_SUCCESS) {
+ efi_err("Unable to allocate memory for new device tree.\n");
+ goto fail;
+ }
+
+ status = update_fdt((void *)fdt_addr, fdt_size,
+ (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr,
+ initrd_addr, initrd_size);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Unable to construct new device tree.\n");
+ goto fail_free_new_fdt;
+ }
+
+ runtime_entry_count = 0;
+ priv.runtime_map = runtime_map;
+ priv.runtime_entry_count = &runtime_entry_count;
+ priv.new_fdt_addr = (void *)*new_fdt_addr;
+
+ status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func);
+
+ if (status == EFI_SUCCESS) {
+ efi_set_virtual_address_map_t *svam;
+
+ if (efi_novamap)
+ return EFI_SUCCESS;
+
+ /* Install the new virtual address map */
+ svam = efi_system_table->runtime->set_virtual_address_map;
+ status = svam(runtime_entry_count * desc_size, desc_size,
+ desc_ver, runtime_map);
+
+ /*
+ * We are beyond the point of no return here, so if the call to
+ * SetVirtualAddressMap() failed, we need to signal that to the
+ * incoming kernel but proceed normally otherwise.
+ */
+ if (status != EFI_SUCCESS) {
+ int l;
+
+ /*
+ * Set the virtual address field of all
+ * EFI_MEMORY_RUNTIME entries to 0. This will signal
+ * the incoming kernel that no virtual translation has
+ * been installed.
+ */
+ for (l = 0; l < map_size; l += desc_size) {
+ efi_memory_desc_t *p = (void *)memory_map + l;
+
+ if (p->attribute & EFI_MEMORY_RUNTIME)
+ p->virt_addr = 0;
+ }
+ }
+ return EFI_SUCCESS;
+ }
+
+ efi_err("Exit boot services failed.\n");
+
+fail_free_new_fdt:
+ efi_free(MAX_FDT_SIZE, *new_fdt_addr);
+
+fail:
+ efi_system_table->boottime->free_pool(runtime_map);
+
+ return EFI_LOAD_ERROR;
+}
+
+void *get_fdt(unsigned long *fdt_size)
+{
+ void *fdt;
+
+ fdt = get_efi_config_table(DEVICE_TREE_GUID);
+
+ if (!fdt)
+ return NULL;
+
+ if (fdt_check_header(fdt) != 0) {
+ efi_err("Invalid header detected on UEFI supplied FDT, ignoring ...\n");
+ return NULL;
+ }
+ *fdt_size = fdt_totalsize(fdt);
+ return fdt;
+}
diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
new file mode 100644
index 000000000..dd95f330f
--- /dev/null
+++ b/drivers/firmware/efi/libstub/file.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helper functions used by the EFI stub on multiple
+ * architectures. This should be #included by the EFI stub
+ * implementation files.
+ *
+ * Copyright 2011 Intel Corporation; author Matt Fleming
+ */
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+#define MAX_FILENAME_SIZE 256
+
+/*
+ * Some firmware implementations have problems reading files in one go.
+ * A read chunk size of 1MB seems to work for most platforms.
+ *
+ * Unfortunately, reading files in chunks triggers *other* bugs on some
+ * platforms, so we provide a way to disable this workaround, which can
+ * be done by passing "efi=nochunk" on the EFI boot stub command line.
+ *
+ * If you experience issues with initrd images being corrupt it's worth
+ * trying efi=nochunk, but chunking is enabled by default on x86 because
+ * there are far more machines that require the workaround than those that
+ * break with it enabled.
+ */
+#define EFI_READ_CHUNK_SIZE SZ_1M
+
+struct finfo {
+ efi_file_info_t info;
+ efi_char16_t filename[MAX_FILENAME_SIZE];
+};
+
+static efi_status_t efi_open_file(efi_file_protocol_t *volume,
+ struct finfo *fi,
+ efi_file_protocol_t **handle,
+ unsigned long *file_size)
+{
+ efi_guid_t info_guid = EFI_FILE_INFO_ID;
+ efi_file_protocol_t *fh;
+ unsigned long info_sz;
+ efi_status_t status;
+
+ status = volume->open(volume, &fh, fi->filename, EFI_FILE_MODE_READ, 0);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to open file: %ls\n", fi->filename);
+ return status;
+ }
+
+ info_sz = sizeof(struct finfo);
+ status = fh->get_info(fh, &info_guid, &info_sz, fi);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to get file info\n");
+ fh->close(fh);
+ return status;
+ }
+
+ *handle = fh;
+ *file_size = fi->info.file_size;
+ return EFI_SUCCESS;
+}
+
+static efi_status_t efi_open_volume(efi_loaded_image_t *image,
+ efi_file_protocol_t **fh)
+{
+ efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+ efi_simple_file_system_protocol_t *io;
+ efi_status_t status;
+
+ status = efi_bs_call(handle_protocol, image->device_handle, &fs_proto,
+ (void **)&io);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to handle fs_proto\n");
+ return status;
+ }
+
+ status = io->open_volume(io, fh);
+ if (status != EFI_SUCCESS)
+ efi_err("Failed to open volume\n");
+
+ return status;
+}
+
+static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
+ const efi_char16_t *prefix, int prefix_size,
+ efi_char16_t *result, int result_len)
+{
+ int prefix_len = prefix_size / 2;
+ bool found = false;
+ int i;
+
+ for (i = prefix_len; i < cmdline_len; i++) {
+ if (!memcmp(&cmdline[i - prefix_len], prefix, prefix_size)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return 0;
+
+ /* Skip any leading slashes */
+ while (i < cmdline_len && (cmdline[i] == L'/' || cmdline[i] == L'\\'))
+ i++;
+
+ while (--result_len > 0 && i < cmdline_len) {
+ efi_char16_t c = cmdline[i++];
+
+ if (c == L'\0' || c == L'\n' || c == L' ')
+ break;
+ else if (c == L'/')
+ /* Replace UNIX dir separators with EFI standard ones */
+ *result++ = L'\\';
+ else
+ *result++ = c;
+ }
+ *result = L'\0';
+ return i;
+}
+
+/*
+ * Check the cmdline for a LILO-style file= arguments.
+ *
+ * We only support loading a file from the same filesystem as
+ * the kernel image.
+ */
+efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
+ const efi_char16_t *optstr,
+ int optstr_size,
+ unsigned long soft_limit,
+ unsigned long hard_limit,
+ unsigned long *load_addr,
+ unsigned long *load_size)
+{
+ const efi_char16_t *cmdline = image->load_options;
+ int cmdline_len = image->load_options_size;
+ unsigned long efi_chunk_size = ULONG_MAX;
+ efi_file_protocol_t *volume = NULL;
+ efi_file_protocol_t *file;
+ unsigned long alloc_addr;
+ unsigned long alloc_size;
+ efi_status_t status;
+ int offset;
+
+ if (!load_addr || !load_size)
+ return EFI_INVALID_PARAMETER;
+
+ efi_apply_loadoptions_quirk((const void **)&cmdline, &cmdline_len);
+ cmdline_len /= sizeof(*cmdline);
+
+ if (IS_ENABLED(CONFIG_X86) && !efi_nochunk)
+ efi_chunk_size = EFI_READ_CHUNK_SIZE;
+
+ alloc_addr = alloc_size = 0;
+ do {
+ struct finfo fi;
+ unsigned long size;
+ void *addr;
+
+ offset = find_file_option(cmdline, cmdline_len,
+ optstr, optstr_size,
+ fi.filename, ARRAY_SIZE(fi.filename));
+
+ if (!offset)
+ break;
+
+ cmdline += offset;
+ cmdline_len -= offset;
+
+ if (!volume) {
+ status = efi_open_volume(image, &volume);
+ if (status != EFI_SUCCESS)
+ return status;
+ }
+
+ status = efi_open_file(volume, &fi, &file, &size);
+ if (status != EFI_SUCCESS)
+ goto err_close_volume;
+
+ /*
+ * Check whether the existing allocation can contain the next
+ * file. This condition will also trigger naturally during the
+ * first (and typically only) iteration of the loop, given that
+ * alloc_size == 0 in that case.
+ */
+ if (round_up(alloc_size + size, EFI_ALLOC_ALIGN) >
+ round_up(alloc_size, EFI_ALLOC_ALIGN)) {
+ unsigned long old_addr = alloc_addr;
+
+ status = EFI_OUT_OF_RESOURCES;
+ if (soft_limit < hard_limit)
+ status = efi_allocate_pages(alloc_size + size,
+ &alloc_addr,
+ soft_limit);
+ if (status == EFI_OUT_OF_RESOURCES)
+ status = efi_allocate_pages(alloc_size + size,
+ &alloc_addr,
+ hard_limit);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate memory for files\n");
+ goto err_close_file;
+ }
+
+ if (old_addr != 0) {
+ /*
+ * This is not the first time we've gone
+ * around this loop, and so we are loading
+ * multiple files that need to be concatenated
+ * and returned in a single buffer.
+ */
+ memcpy((void *)alloc_addr, (void *)old_addr, alloc_size);
+ efi_free(alloc_size, old_addr);
+ }
+ }
+
+ addr = (void *)alloc_addr + alloc_size;
+ alloc_size += size;
+
+ while (size) {
+ unsigned long chunksize = min(size, efi_chunk_size);
+
+ status = file->read(file, &chunksize, addr);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to read file\n");
+ goto err_close_file;
+ }
+ addr += chunksize;
+ size -= chunksize;
+ }
+ file->close(file);
+ } while (offset > 0);
+
+ *load_addr = alloc_addr;
+ *load_size = alloc_size;
+
+ if (volume)
+ volume->close(volume);
+ return EFI_SUCCESS;
+
+err_close_file:
+ file->close(file);
+
+err_close_volume:
+ volume->close(volume);
+ efi_free(alloc_size, alloc_addr);
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
new file mode 100644
index 000000000..ea5da307d
--- /dev/null
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -0,0 +1,580 @@
+// SPDX-License-Identifier: GPL-2.0
+/* -----------------------------------------------------------------------
+ *
+ * Copyright 2011 Intel Corporation; author Matt Fleming
+ *
+ * ----------------------------------------------------------------------- */
+
+#include <linux/bitops.h>
+#include <linux/ctype.h>
+#include <linux/efi.h>
+#include <linux/screen_info.h>
+#include <linux/string.h>
+#include <asm/efi.h>
+#include <asm/setup.h>
+
+#include "efistub.h"
+
+enum efi_cmdline_option {
+ EFI_CMDLINE_NONE,
+ EFI_CMDLINE_MODE_NUM,
+ EFI_CMDLINE_RES,
+ EFI_CMDLINE_AUTO,
+ EFI_CMDLINE_LIST
+};
+
+static struct {
+ enum efi_cmdline_option option;
+ union {
+ u32 mode;
+ struct {
+ u32 width, height;
+ int format;
+ u8 depth;
+ } res;
+ };
+} cmdline = { .option = EFI_CMDLINE_NONE };
+
+static bool parse_modenum(char *option, char **next)
+{
+ u32 m;
+
+ if (!strstarts(option, "mode="))
+ return false;
+ option += strlen("mode=");
+ m = simple_strtoull(option, &option, 0);
+ if (*option && *option++ != ',')
+ return false;
+ cmdline.option = EFI_CMDLINE_MODE_NUM;
+ cmdline.mode = m;
+
+ *next = option;
+ return true;
+}
+
+static bool parse_res(char *option, char **next)
+{
+ u32 w, h, d = 0;
+ int pf = -1;
+
+ if (!isdigit(*option))
+ return false;
+ w = simple_strtoull(option, &option, 10);
+ if (*option++ != 'x' || !isdigit(*option))
+ return false;
+ h = simple_strtoull(option, &option, 10);
+ if (*option == '-') {
+ option++;
+ if (strstarts(option, "rgb")) {
+ option += strlen("rgb");
+ pf = PIXEL_RGB_RESERVED_8BIT_PER_COLOR;
+ } else if (strstarts(option, "bgr")) {
+ option += strlen("bgr");
+ pf = PIXEL_BGR_RESERVED_8BIT_PER_COLOR;
+ } else if (isdigit(*option))
+ d = simple_strtoull(option, &option, 10);
+ else
+ return false;
+ }
+ if (*option && *option++ != ',')
+ return false;
+ cmdline.option = EFI_CMDLINE_RES;
+ cmdline.res.width = w;
+ cmdline.res.height = h;
+ cmdline.res.format = pf;
+ cmdline.res.depth = d;
+
+ *next = option;
+ return true;
+}
+
+static bool parse_auto(char *option, char **next)
+{
+ if (!strstarts(option, "auto"))
+ return false;
+ option += strlen("auto");
+ if (*option && *option++ != ',')
+ return false;
+ cmdline.option = EFI_CMDLINE_AUTO;
+
+ *next = option;
+ return true;
+}
+
+static bool parse_list(char *option, char **next)
+{
+ if (!strstarts(option, "list"))
+ return false;
+ option += strlen("list");
+ if (*option && *option++ != ',')
+ return false;
+ cmdline.option = EFI_CMDLINE_LIST;
+
+ *next = option;
+ return true;
+}
+
+void efi_parse_option_graphics(char *option)
+{
+ while (*option) {
+ if (parse_modenum(option, &option))
+ continue;
+ if (parse_res(option, &option))
+ continue;
+ if (parse_auto(option, &option))
+ continue;
+ if (parse_list(option, &option))
+ continue;
+
+ while (*option && *option++ != ',')
+ ;
+ }
+}
+
+static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
+{
+ efi_status_t status;
+
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long info_size;
+
+ u32 max_mode, cur_mode;
+ int pf;
+
+ mode = efi_table_attr(gop, mode);
+
+ cur_mode = efi_table_attr(mode, mode);
+ if (cmdline.mode == cur_mode)
+ return cur_mode;
+
+ max_mode = efi_table_attr(mode, max_mode);
+ if (cmdline.mode >= max_mode) {
+ efi_err("Requested mode is invalid\n");
+ return cur_mode;
+ }
+
+ status = efi_call_proto(gop, query_mode, cmdline.mode,
+ &info_size, &info);
+ if (status != EFI_SUCCESS) {
+ efi_err("Couldn't get mode information\n");
+ return cur_mode;
+ }
+
+ pf = info->pixel_format;
+
+ efi_bs_call(free_pool, info);
+
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX) {
+ efi_err("Invalid PixelFormat\n");
+ return cur_mode;
+ }
+
+ return cmdline.mode;
+}
+
+static u8 pixel_bpp(int pixel_format, efi_pixel_bitmask_t pixel_info)
+{
+ if (pixel_format == PIXEL_BIT_MASK) {
+ u32 mask = pixel_info.red_mask | pixel_info.green_mask |
+ pixel_info.blue_mask | pixel_info.reserved_mask;
+ if (!mask)
+ return 0;
+ return __fls(mask) - __ffs(mask) + 1;
+ } else
+ return 32;
+}
+
+static u32 choose_mode_res(efi_graphics_output_protocol_t *gop)
+{
+ efi_status_t status;
+
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long info_size;
+
+ u32 max_mode, cur_mode;
+ int pf;
+ efi_pixel_bitmask_t pi;
+ u32 m, w, h;
+
+ mode = efi_table_attr(gop, mode);
+
+ cur_mode = efi_table_attr(mode, mode);
+ info = efi_table_attr(mode, info);
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ if (w == cmdline.res.width && h == cmdline.res.height &&
+ (cmdline.res.format < 0 || cmdline.res.format == pf) &&
+ (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
+ return cur_mode;
+
+ max_mode = efi_table_attr(mode, max_mode);
+
+ for (m = 0; m < max_mode; m++) {
+ if (m == cur_mode)
+ continue;
+
+ status = efi_call_proto(gop, query_mode, m,
+ &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ efi_bs_call(free_pool, info);
+
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ continue;
+ if (w == cmdline.res.width && h == cmdline.res.height &&
+ (cmdline.res.format < 0 || cmdline.res.format == pf) &&
+ (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
+ return m;
+ }
+
+ efi_err("Couldn't find requested mode\n");
+
+ return cur_mode;
+}
+
+static u32 choose_mode_auto(efi_graphics_output_protocol_t *gop)
+{
+ efi_status_t status;
+
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long info_size;
+
+ u32 max_mode, cur_mode, best_mode, area;
+ u8 depth;
+ int pf;
+ efi_pixel_bitmask_t pi;
+ u32 m, w, h, a;
+ u8 d;
+
+ mode = efi_table_attr(gop, mode);
+
+ cur_mode = efi_table_attr(mode, mode);
+ max_mode = efi_table_attr(mode, max_mode);
+
+ info = efi_table_attr(mode, info);
+
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ best_mode = cur_mode;
+ area = w * h;
+ depth = pixel_bpp(pf, pi);
+
+ for (m = 0; m < max_mode; m++) {
+ if (m == cur_mode)
+ continue;
+
+ status = efi_call_proto(gop, query_mode, m,
+ &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ efi_bs_call(free_pool, info);
+
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ continue;
+ a = w * h;
+ if (a < area)
+ continue;
+ d = pixel_bpp(pf, pi);
+ if (a > area || d > depth) {
+ best_mode = m;
+ area = a;
+ depth = d;
+ }
+ }
+
+ return best_mode;
+}
+
+static u32 choose_mode_list(efi_graphics_output_protocol_t *gop)
+{
+ efi_status_t status;
+
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+ unsigned long info_size;
+
+ u32 max_mode, cur_mode;
+ int pf;
+ efi_pixel_bitmask_t pi;
+ u32 m, w, h;
+ u8 d;
+ const char *dstr;
+ bool valid;
+ efi_input_key_t key;
+
+ mode = efi_table_attr(gop, mode);
+
+ cur_mode = efi_table_attr(mode, mode);
+ max_mode = efi_table_attr(mode, max_mode);
+
+ efi_printk("Available graphics modes are 0-%u\n", max_mode-1);
+ efi_puts(" * = current mode\n"
+ " - = unusable mode\n");
+ for (m = 0; m < max_mode; m++) {
+ status = efi_call_proto(gop, query_mode, m,
+ &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ pf = info->pixel_format;
+ pi = info->pixel_information;
+ w = info->horizontal_resolution;
+ h = info->vertical_resolution;
+
+ efi_bs_call(free_pool, info);
+
+ valid = !(pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX);
+ d = 0;
+ switch (pf) {
+ case PIXEL_RGB_RESERVED_8BIT_PER_COLOR:
+ dstr = "rgb";
+ break;
+ case PIXEL_BGR_RESERVED_8BIT_PER_COLOR:
+ dstr = "bgr";
+ break;
+ case PIXEL_BIT_MASK:
+ dstr = "";
+ d = pixel_bpp(pf, pi);
+ break;
+ case PIXEL_BLT_ONLY:
+ dstr = "blt";
+ break;
+ default:
+ dstr = "xxx";
+ break;
+ }
+
+ efi_printk("Mode %3u %c%c: Resolution %ux%u-%s%.0hhu\n",
+ m,
+ m == cur_mode ? '*' : ' ',
+ !valid ? '-' : ' ',
+ w, h, dstr, d);
+ }
+
+ efi_puts("\nPress any key to continue (or wait 10 seconds)\n");
+ status = efi_wait_for_key(10 * EFI_USEC_PER_SEC, &key);
+ if (status != EFI_SUCCESS && status != EFI_TIMEOUT) {
+ efi_err("Unable to read key, continuing in 10 seconds\n");
+ efi_bs_call(stall, 10 * EFI_USEC_PER_SEC);
+ }
+
+ return cur_mode;
+}
+
+static void set_mode(efi_graphics_output_protocol_t *gop)
+{
+ efi_graphics_output_protocol_mode_t *mode;
+ u32 cur_mode, new_mode;
+
+ switch (cmdline.option) {
+ case EFI_CMDLINE_MODE_NUM:
+ new_mode = choose_mode_modenum(gop);
+ break;
+ case EFI_CMDLINE_RES:
+ new_mode = choose_mode_res(gop);
+ break;
+ case EFI_CMDLINE_AUTO:
+ new_mode = choose_mode_auto(gop);
+ break;
+ case EFI_CMDLINE_LIST:
+ new_mode = choose_mode_list(gop);
+ break;
+ default:
+ return;
+ }
+
+ mode = efi_table_attr(gop, mode);
+ cur_mode = efi_table_attr(mode, mode);
+
+ if (new_mode == cur_mode)
+ return;
+
+ if (efi_call_proto(gop, set_mode, new_mode) != EFI_SUCCESS)
+ efi_err("Failed to set requested mode\n");
+}
+
+static void find_bits(u32 mask, u8 *pos, u8 *size)
+{
+ if (!mask) {
+ *pos = *size = 0;
+ return;
+ }
+
+ /* UEFI spec guarantees that the set bits are contiguous */
+ *pos = __ffs(mask);
+ *size = __fls(mask) - *pos + 1;
+}
+
+static void
+setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
+ efi_pixel_bitmask_t pixel_info, int pixel_format)
+{
+ if (pixel_format == PIXEL_BIT_MASK) {
+ find_bits(pixel_info.red_mask,
+ &si->red_pos, &si->red_size);
+ find_bits(pixel_info.green_mask,
+ &si->green_pos, &si->green_size);
+ find_bits(pixel_info.blue_mask,
+ &si->blue_pos, &si->blue_size);
+ find_bits(pixel_info.reserved_mask,
+ &si->rsvd_pos, &si->rsvd_size);
+ si->lfb_depth = si->red_size + si->green_size +
+ si->blue_size + si->rsvd_size;
+ si->lfb_linelength = (pixels_per_scan_line * si->lfb_depth) / 8;
+ } else {
+ if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
+ si->red_pos = 0;
+ si->blue_pos = 16;
+ } else /* PIXEL_BGR_RESERVED_8BIT_PER_COLOR */ {
+ si->blue_pos = 0;
+ si->red_pos = 16;
+ }
+
+ si->green_pos = 8;
+ si->rsvd_pos = 24;
+ si->red_size = si->green_size =
+ si->blue_size = si->rsvd_size = 8;
+
+ si->lfb_depth = 32;
+ si->lfb_linelength = pixels_per_scan_line * 4;
+ }
+}
+
+static efi_graphics_output_protocol_t *
+find_gop(efi_guid_t *proto, unsigned long size, void **handles)
+{
+ efi_graphics_output_protocol_t *first_gop;
+ efi_handle_t h;
+ int i;
+
+ first_gop = NULL;
+
+ for_each_efi_handle(h, handles, size, i) {
+ efi_status_t status;
+
+ efi_graphics_output_protocol_t *gop;
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+
+ efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
+ void *dummy = NULL;
+
+ status = efi_bs_call(handle_protocol, h, proto, (void **)&gop);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ mode = efi_table_attr(gop, mode);
+ info = efi_table_attr(mode, info);
+ if (info->pixel_format == PIXEL_BLT_ONLY ||
+ info->pixel_format >= PIXEL_FORMAT_MAX)
+ continue;
+
+ /*
+ * Systems that use the UEFI Console Splitter may
+ * provide multiple GOP devices, not all of which are
+ * backed by real hardware. The workaround is to search
+ * for a GOP implementing the ConOut protocol, and if
+ * one isn't found, to just fall back to the first GOP.
+ *
+ * Once we've found a GOP supporting ConOut,
+ * don't bother looking any further.
+ */
+ status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy);
+ if (status == EFI_SUCCESS)
+ return gop;
+
+ if (!first_gop)
+ first_gop = gop;
+ }
+
+ return first_gop;
+}
+
+static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
+ unsigned long size, void **handles)
+{
+ efi_graphics_output_protocol_t *gop;
+ efi_graphics_output_protocol_mode_t *mode;
+ efi_graphics_output_mode_info_t *info;
+
+ gop = find_gop(proto, size, handles);
+
+ /* Did we find any GOPs? */
+ if (!gop)
+ return EFI_NOT_FOUND;
+
+ /* Change mode if requested */
+ set_mode(gop);
+
+ /* EFI framebuffer */
+ mode = efi_table_attr(gop, mode);
+ info = efi_table_attr(mode, info);
+
+ si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+ si->lfb_width = info->horizontal_resolution;
+ si->lfb_height = info->vertical_resolution;
+
+ efi_set_u64_split(efi_table_attr(mode, frame_buffer_base),
+ &si->lfb_base, &si->ext_lfb_base);
+ if (si->ext_lfb_base)
+ si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+
+ si->pages = 1;
+
+ setup_pixel_info(si, info->pixels_per_scan_line,
+ info->pixel_information, info->pixel_format);
+
+ si->lfb_size = si->lfb_linelength * si->lfb_height;
+
+ si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+
+ return EFI_SUCCESS;
+}
+
+/*
+ * See if we have Graphics Output Protocol
+ */
+efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
+ unsigned long size)
+{
+ efi_status_t status;
+ void **gop_handle = NULL;
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&gop_handle);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL,
+ &size, gop_handle);
+ if (status != EFI_SUCCESS)
+ goto free_handle;
+
+ status = setup_gop(si, proto, size, gop_handle);
+
+free_handle:
+ efi_bs_call(free_pool, gop_handle);
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c
new file mode 100644
index 000000000..feef8d4be
--- /dev/null
+++ b/drivers/firmware/efi/libstub/mem.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+static inline bool mmap_has_headroom(unsigned long buff_size,
+ unsigned long map_size,
+ unsigned long desc_size)
+{
+ unsigned long slack = buff_size - map_size;
+
+ return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
+}
+
+/**
+ * efi_get_memory_map() - get memory map
+ * @map: on return pointer to memory map
+ *
+ * Retrieve the UEFI memory map. The allocated memory leaves room for
+ * up to EFI_MMAP_NR_SLACK_SLOTS additional memory map entries.
+ *
+ * Return: status code
+ */
+efi_status_t efi_get_memory_map(struct efi_boot_memmap *map)
+{
+ efi_memory_desc_t *m = NULL;
+ efi_status_t status;
+ unsigned long key;
+ u32 desc_version;
+
+ *map->desc_size = sizeof(*m);
+ *map->map_size = *map->desc_size * 32;
+ *map->buff_size = *map->map_size;
+again:
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ *map->map_size, (void **)&m);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ *map->desc_size = 0;
+ key = 0;
+ status = efi_bs_call(get_memory_map, map->map_size, m,
+ &key, map->desc_size, &desc_version);
+ if (status == EFI_BUFFER_TOO_SMALL ||
+ !mmap_has_headroom(*map->buff_size, *map->map_size,
+ *map->desc_size)) {
+ efi_bs_call(free_pool, m);
+ /*
+ * Make sure there is some entries of headroom so that the
+ * buffer can be reused for a new map after allocations are
+ * no longer permitted. Its unlikely that the map will grow to
+ * exceed this headroom once we are ready to trigger
+ * ExitBootServices()
+ */
+ *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS;
+ *map->buff_size = *map->map_size;
+ goto again;
+ }
+
+ if (status == EFI_SUCCESS) {
+ if (map->key_ptr)
+ *map->key_ptr = key;
+ if (map->desc_ver)
+ *map->desc_ver = desc_version;
+ } else {
+ efi_bs_call(free_pool, m);
+ }
+
+fail:
+ *map->map = m;
+ return status;
+}
+
+/**
+ * efi_allocate_pages() - Allocate memory pages
+ * @size: minimum number of bytes to allocate
+ * @addr: On return the address of the first allocated page. The first
+ * allocated page has alignment EFI_ALLOC_ALIGN which is an
+ * architecture dependent multiple of the page size.
+ * @max: the address that the last allocated memory page shall not
+ * exceed
+ *
+ * Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according
+ * to EFI_ALLOC_ALIGN. The last allocated page will not exceed the address
+ * given by @max.
+ *
+ * Return: status code
+ */
+efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
+ unsigned long max)
+{
+ efi_physical_addr_t alloc_addr;
+ efi_status_t status;
+
+ if (EFI_ALLOC_ALIGN > EFI_PAGE_SIZE)
+ return efi_allocate_pages_aligned(size, addr, max,
+ EFI_ALLOC_ALIGN);
+
+ alloc_addr = ALIGN_DOWN(max + 1, EFI_ALLOC_ALIGN) - 1;
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
+ EFI_LOADER_DATA, DIV_ROUND_UP(size, EFI_PAGE_SIZE),
+ &alloc_addr);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ *addr = alloc_addr;
+ return EFI_SUCCESS;
+}
+
+/**
+ * efi_free() - free memory pages
+ * @size: size of the memory area to free in bytes
+ * @addr: start of the memory area to free (must be EFI_PAGE_SIZE
+ * aligned)
+ *
+ * @size is rounded up to a multiple of EFI_ALLOC_ALIGN which is an
+ * architecture specific multiple of EFI_PAGE_SIZE. So this function should
+ * only be used to return pages allocated with efi_allocate_pages() or
+ * efi_low_alloc_above().
+ */
+void efi_free(unsigned long size, unsigned long addr)
+{
+ unsigned long nr_pages;
+
+ if (!size)
+ return;
+
+ nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+ efi_bs_call(free_pages, addr, nr_pages);
+}
diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c
new file mode 100644
index 000000000..99fb25d2b
--- /dev/null
+++ b/drivers/firmware/efi/libstub/pci.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI-related functions used by the EFI stub on multiple
+ * architectures.
+ *
+ * Copyright 2019 Google, LLC
+ */
+
+#include <linux/efi.h>
+#include <linux/pci.h>
+
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+void efi_pci_disable_bridge_busmaster(void)
+{
+ efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
+ unsigned long pci_handle_size = 0;
+ efi_handle_t *pci_handle = NULL;
+ efi_handle_t handle;
+ efi_status_t status;
+ u16 class, command;
+ int i;
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
+ NULL, &pci_handle_size, NULL);
+
+ if (status != EFI_BUFFER_TOO_SMALL) {
+ if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
+ efi_err("Failed to locate PCI I/O handles'\n");
+ return;
+ }
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size,
+ (void **)&pci_handle);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate memory for 'pci_handle'\n");
+ return;
+ }
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
+ NULL, &pci_handle_size, pci_handle);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to locate PCI I/O handles'\n");
+ goto free_handle;
+ }
+
+ for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ efi_pci_io_protocol_t *pci;
+ unsigned long segment_nr, bus_nr, device_nr, func_nr;
+
+ status = efi_bs_call(handle_protocol, handle, &pci_proto,
+ (void **)&pci);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ /*
+ * Disregard devices living on bus 0 - these are not behind a
+ * bridge so no point in disconnecting them from their drivers.
+ */
+ status = efi_call_proto(pci, get_location, &segment_nr, &bus_nr,
+ &device_nr, &func_nr);
+ if (status != EFI_SUCCESS || bus_nr == 0)
+ continue;
+
+ /*
+ * Don't disconnect VGA controllers so we don't risk losing
+ * access to the framebuffer. Drivers for true PCIe graphics
+ * controllers that are behind a PCIe root port do not use
+ * DMA to implement the GOP framebuffer anyway [although they
+ * may use it in their implementation of Gop->Blt()], and so
+ * disabling DMA in the PCI bridge should not interfere with
+ * normal operation of the device.
+ */
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_CLASS_DEVICE, 1, &class);
+ if (status != EFI_SUCCESS || class == PCI_CLASS_DISPLAY_VGA)
+ continue;
+
+ /* Disconnect this handle from all its drivers */
+ efi_bs_call(disconnect_controller, handle, NULL, NULL);
+ }
+
+ for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ efi_pci_io_protocol_t *pci;
+
+ status = efi_bs_call(handle_protocol, handle, &pci_proto,
+ (void **)&pci);
+ if (status != EFI_SUCCESS || !pci)
+ continue;
+
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_CLASS_DEVICE, 1, &class);
+
+ if (status != EFI_SUCCESS || class != PCI_CLASS_BRIDGE_PCI)
+ continue;
+
+ /* Disable busmastering */
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_COMMAND, 1, &command);
+ if (status != EFI_SUCCESS || !(command & PCI_COMMAND_MASTER))
+ continue;
+
+ command &= ~PCI_COMMAND_MASTER;
+ status = efi_call_proto(pci, pci.write, EfiPciIoWidthUint16,
+ PCI_COMMAND, 1, &command);
+ if (status != EFI_SUCCESS)
+ efi_err("Failed to disable PCI busmastering\n");
+ }
+
+free_handle:
+ efi_bs_call(free_pool, pci_handle);
+}
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
new file mode 100644
index 000000000..f85d2c066
--- /dev/null
+++ b/drivers/firmware/efi/libstub/random.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Linaro Ltd; <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+typedef union efi_rng_protocol efi_rng_protocol_t;
+
+union efi_rng_protocol {
+ struct {
+ efi_status_t (__efiapi *get_info)(efi_rng_protocol_t *,
+ unsigned long *,
+ efi_guid_t *);
+ efi_status_t (__efiapi *get_rng)(efi_rng_protocol_t *,
+ efi_guid_t *, unsigned long,
+ u8 *out);
+ };
+ struct {
+ u32 get_info;
+ u32 get_rng;
+ } mixed_mode;
+};
+
+/**
+ * efi_get_random_bytes() - fill a buffer with random bytes
+ * @size: size of the buffer
+ * @out: caller allocated buffer to receive the random bytes
+ *
+ * The call will fail if either the firmware does not implement the
+ * EFI_RNG_PROTOCOL or there are not enough random bytes available to fill
+ * the buffer.
+ *
+ * Return: status code
+ */
+efi_status_t efi_get_random_bytes(unsigned long size, u8 *out)
+{
+ efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
+ efi_status_t status;
+ efi_rng_protocol_t *rng = NULL;
+
+ status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ return efi_call_proto(rng, get_rng, NULL, size, out);
+}
+
+/**
+ * efi_random_get_seed() - provide random seed as configuration table
+ *
+ * The EFI_RNG_PROTOCOL is used to read random bytes. These random bytes are
+ * saved as a configuration table which can be used as entropy by the kernel
+ * for the initialization of its pseudo random number generator.
+ *
+ * If the EFI_RNG_PROTOCOL is not available or there are not enough random bytes
+ * available, the configuration table will not be installed and an error code
+ * will be returned.
+ *
+ * Return: status code
+ */
+efi_status_t efi_random_get_seed(void)
+{
+ efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
+ efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW;
+ efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID;
+ struct linux_efi_random_seed *prev_seed, *seed = NULL;
+ int prev_seed_size = 0, seed_size = EFI_RANDOM_SEED_SIZE;
+ efi_rng_protocol_t *rng = NULL;
+ efi_status_t status;
+
+ status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ /*
+ * Check whether a seed was provided by a prior boot stage. In that
+ * case, instead of overwriting it, let's create a new buffer that can
+ * hold both, and concatenate the existing and the new seeds.
+ * Note that we should read the seed size with caution, in case the
+ * table got corrupted in memory somehow.
+ */
+ prev_seed = get_efi_config_table(LINUX_EFI_RANDOM_SEED_TABLE_GUID);
+ if (prev_seed && prev_seed->size <= 512U) {
+ prev_seed_size = prev_seed->size;
+ seed_size += prev_seed_size;
+ }
+
+ /*
+ * Use EFI_ACPI_RECLAIM_MEMORY here so that it is guaranteed that the
+ * allocation will survive a kexec reboot (although we refresh the seed
+ * beforehand)
+ */
+ status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
+ struct_size(seed, bits, seed_size),
+ (void **)&seed);
+ if (status != EFI_SUCCESS) {
+ efi_warn("Failed to allocate memory for RNG seed.\n");
+ goto err_warn;
+ }
+
+ status = efi_call_proto(rng, get_rng, &rng_algo_raw,
+ EFI_RANDOM_SEED_SIZE, seed->bits);
+
+ if (status == EFI_UNSUPPORTED)
+ /*
+ * Use whatever algorithm we have available if the raw algorithm
+ * is not implemented.
+ */
+ status = efi_call_proto(rng, get_rng, NULL,
+ EFI_RANDOM_SEED_SIZE, seed->bits);
+
+ if (status != EFI_SUCCESS)
+ goto err_freepool;
+
+ seed->size = seed_size;
+ if (prev_seed_size)
+ memcpy(seed->bits + EFI_RANDOM_SEED_SIZE, prev_seed->bits,
+ prev_seed_size);
+
+ status = efi_bs_call(install_configuration_table, &rng_table_guid, seed);
+ if (status != EFI_SUCCESS)
+ goto err_freepool;
+
+ if (prev_seed_size) {
+ /* wipe and free the old seed if we managed to install the new one */
+ memzero_explicit(prev_seed->bits, prev_seed_size);
+ efi_bs_call(free_pool, prev_seed);
+ }
+ return EFI_SUCCESS;
+
+err_freepool:
+ memzero_explicit(seed, struct_size(seed, bits, seed_size));
+ efi_bs_call(free_pool, seed);
+ efi_warn("Failed to obtain seed from EFI_RNG_PROTOCOL\n");
+err_warn:
+ if (prev_seed)
+ efi_warn("Retaining bootloader-supplied seed only");
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
new file mode 100644
index 000000000..724155b9e
--- /dev/null
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Linaro Ltd; <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/efi.h>
+#include <linux/log2.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/*
+ * Return the number of slots covered by this entry, i.e., the number of
+ * addresses it covers that are suitably aligned and supply enough room
+ * for the allocation.
+ */
+static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+ unsigned long size,
+ unsigned long align_shift)
+{
+ unsigned long align = 1UL << align_shift;
+ u64 first_slot, last_slot, region_end;
+
+ if (md->type != EFI_CONVENTIONAL_MEMORY)
+ return 0;
+
+ if (efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ return 0;
+
+ region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
+ (u64)ULONG_MAX);
+ if (region_end < size)
+ return 0;
+
+ first_slot = round_up(md->phys_addr, align);
+ last_slot = round_down(region_end - size + 1, align);
+
+ if (first_slot > last_slot)
+ return 0;
+
+ return ((unsigned long)(last_slot - first_slot) >> align_shift) + 1;
+}
+
+/*
+ * The UEFI memory descriptors have a virtual address field that is only used
+ * when installing the virtual mapping using SetVirtualAddressMap(). Since it
+ * is unused here, we can reuse it to keep track of each descriptor's slot
+ * count.
+ */
+#define MD_NUM_SLOTS(md) ((md)->virt_addr)
+
+efi_status_t efi_random_alloc(unsigned long size,
+ unsigned long align,
+ unsigned long *addr,
+ unsigned long random_seed)
+{
+ unsigned long map_size, desc_size, total_slots = 0, target_slot;
+ unsigned long buff_size;
+ efi_status_t status;
+ efi_memory_desc_t *memory_map;
+ int map_offset;
+ struct efi_boot_memmap map;
+
+ map.map = &memory_map;
+ map.map_size = &map_size;
+ map.desc_size = &desc_size;
+ map.desc_ver = NULL;
+ map.key_ptr = NULL;
+ map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(&map);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
+
+ size = round_up(size, EFI_ALLOC_ALIGN);
+
+ /* count the suitable slots in each memory map entry */
+ for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+ efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ unsigned long slots;
+
+ slots = get_entry_num_slots(md, size, ilog2(align));
+ MD_NUM_SLOTS(md) = slots;
+ total_slots += slots;
+ }
+
+ /* find a random number between 0 and total_slots */
+ target_slot = (total_slots * (u64)(random_seed & U32_MAX)) >> 32;
+
+ /*
+ * target_slot is now a value in the range [0, total_slots), and so
+ * it corresponds with exactly one of the suitable slots we recorded
+ * when iterating over the memory map the first time around.
+ *
+ * So iterate over the memory map again, subtracting the number of
+ * slots of each entry at each iteration, until we have found the entry
+ * that covers our chosen slot. Use the residual value of target_slot
+ * to calculate the randomly chosen address, and allocate it directly
+ * using EFI_ALLOCATE_ADDRESS.
+ */
+ for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+ efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ efi_physical_addr_t target;
+ unsigned long pages;
+
+ if (target_slot >= MD_NUM_SLOTS(md)) {
+ target_slot -= MD_NUM_SLOTS(md);
+ continue;
+ }
+
+ target = round_up(md->phys_addr, align) + target_slot * align;
+ pages = size / EFI_PAGE_SIZE;
+
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, pages, &target);
+ if (status == EFI_SUCCESS)
+ *addr = target;
+ break;
+ }
+
+ efi_bs_call(free_pool, memory_map);
+
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
new file mode 100644
index 000000000..8ee9eb2b9
--- /dev/null
+++ b/drivers/firmware/efi/libstub/relocate.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/**
+ * efi_low_alloc_above() - allocate pages at or above given address
+ * @size: size of the memory area to allocate
+ * @align: minimum alignment of the allocated memory area. It should
+ * a power of two.
+ * @addr: on exit the address of the allocated memory
+ * @min: minimum address to used for the memory allocation
+ *
+ * Allocate at the lowest possible address that is not below @min as
+ * EFI_LOADER_DATA. The allocated pages are aligned according to @align but at
+ * least EFI_ALLOC_ALIGN. The first allocated page will not below the address
+ * given by @min.
+ *
+ * Return: status code
+ */
+efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long min)
+{
+ unsigned long map_size, desc_size, buff_size;
+ efi_memory_desc_t *map;
+ efi_status_t status;
+ unsigned long nr_pages;
+ int i;
+ struct efi_boot_memmap boot_map;
+
+ boot_map.map = &map;
+ boot_map.map_size = &map_size;
+ boot_map.desc_size = &desc_size;
+ boot_map.desc_ver = NULL;
+ boot_map.key_ptr = NULL;
+ boot_map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(&boot_map);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ /*
+ * Enforce minimum alignment that EFI or Linux requires when
+ * requesting a specific address. We are doing page-based (or
+ * larger) allocations, and both the address and size must meet
+ * alignment constraints.
+ */
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
+
+ size = round_up(size, EFI_ALLOC_ALIGN);
+ nr_pages = size / EFI_PAGE_SIZE;
+ for (i = 0; i < map_size / desc_size; i++) {
+ efi_memory_desc_t *desc;
+ unsigned long m = (unsigned long)map;
+ u64 start, end;
+
+ desc = efi_early_memdesc_ptr(m, desc_size, i);
+
+ if (desc->type != EFI_CONVENTIONAL_MEMORY)
+ continue;
+
+ if (efi_soft_reserve_enabled() &&
+ (desc->attribute & EFI_MEMORY_SP))
+ continue;
+
+ if (desc->num_pages < nr_pages)
+ continue;
+
+ start = desc->phys_addr;
+ end = start + desc->num_pages * EFI_PAGE_SIZE;
+
+ if (start < min)
+ start = min;
+
+ start = round_up(start, align);
+ if ((start + size) > end)
+ continue;
+
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &start);
+ if (status == EFI_SUCCESS) {
+ *addr = start;
+ break;
+ }
+ }
+
+ if (i == map_size / desc_size)
+ status = EFI_NOT_FOUND;
+
+ efi_bs_call(free_pool, map);
+fail:
+ return status;
+}
+
+/**
+ * efi_relocate_kernel() - copy memory area
+ * @image_addr: pointer to address of memory area to copy
+ * @image_size: size of memory area to copy
+ * @alloc_size: minimum size of memory to allocate, must be greater or
+ * equal to image_size
+ * @preferred_addr: preferred target address
+ * @alignment: minimum alignment of the allocated memory area. It
+ * should be a power of two.
+ * @min_addr: minimum target address
+ *
+ * Copy a memory area to a newly allocated memory area aligned according
+ * to @alignment but at least EFI_ALLOC_ALIGN. If the preferred address
+ * is not available, the allocated address will not be below @min_addr.
+ * On exit, @image_addr is updated to the target copy address that was used.
+ *
+ * This function is used to copy the Linux kernel verbatim. It does not apply
+ * any relocation changes.
+ *
+ * Return: status code
+ */
+efi_status_t efi_relocate_kernel(unsigned long *image_addr,
+ unsigned long image_size,
+ unsigned long alloc_size,
+ unsigned long preferred_addr,
+ unsigned long alignment,
+ unsigned long min_addr)
+{
+ unsigned long cur_image_addr;
+ unsigned long new_addr = 0;
+ efi_status_t status;
+ unsigned long nr_pages;
+ efi_physical_addr_t efi_addr = preferred_addr;
+
+ if (!image_addr || !image_size || !alloc_size)
+ return EFI_INVALID_PARAMETER;
+ if (alloc_size < image_size)
+ return EFI_INVALID_PARAMETER;
+
+ cur_image_addr = *image_addr;
+
+ /*
+ * The EFI firmware loader could have placed the kernel image
+ * anywhere in memory, but the kernel has restrictions on the
+ * max physical address it can run at. Some architectures
+ * also have a preferred address, so first try to relocate
+ * to the preferred address. If that fails, allocate as low
+ * as possible while respecting the required alignment.
+ */
+ nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+ status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages, &efi_addr);
+ new_addr = efi_addr;
+ /*
+ * If preferred address allocation failed allocate as low as
+ * possible.
+ */
+ if (status != EFI_SUCCESS) {
+ status = efi_low_alloc_above(alloc_size, alignment, &new_addr,
+ min_addr);
+ }
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate usable memory for kernel.\n");
+ return status;
+ }
+
+ /*
+ * We know source/dest won't overlap since both memory ranges
+ * have been allocated by UEFI, so we can safely use memcpy.
+ */
+ memcpy((void *)new_addr, (void *)cur_image_addr, image_size);
+
+ /* Return the new address of the relocated image. */
+ *image_addr = new_addr;
+
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c
new file mode 100644
index 000000000..9c4608434
--- /dev/null
+++ b/drivers/firmware/efi/libstub/riscv-stub.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/efi.h>
+#include <linux/libfdt.h>
+
+#include <asm/efi.h>
+#include <asm/sections.h>
+
+#include "efistub.h"
+
+/*
+ * RISC-V requires the kernel image to placed 2 MB aligned base for 64 bit and
+ * 4MB for 32 bit.
+ */
+#ifdef CONFIG_64BIT
+#define MIN_KIMG_ALIGN SZ_2M
+#else
+#define MIN_KIMG_ALIGN SZ_4M
+#endif
+
+typedef void __noreturn (*jump_kernel_func)(unsigned int, unsigned long);
+
+static u32 hartid;
+
+static int get_boot_hartid_from_fdt(void)
+{
+ const void *fdt;
+ int chosen_node, len;
+ const fdt32_t *prop;
+
+ fdt = get_efi_config_table(DEVICE_TREE_GUID);
+ if (!fdt)
+ return -EINVAL;
+
+ chosen_node = fdt_path_offset(fdt, "/chosen");
+ if (chosen_node < 0)
+ return -EINVAL;
+
+ prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len);
+ if (!prop || len != sizeof(u32))
+ return -EINVAL;
+
+ hartid = fdt32_to_cpu(*prop);
+ return 0;
+}
+
+efi_status_t check_platform_features(void)
+{
+ int ret;
+
+ ret = get_boot_hartid_from_fdt();
+ if (ret) {
+ efi_err("/chosen/boot-hartid missing or invalid!\n");
+ return EFI_UNSUPPORTED;
+ }
+ return EFI_SUCCESS;
+}
+
+void __noreturn efi_enter_kernel(unsigned long entrypoint, unsigned long fdt,
+ unsigned long fdt_size)
+{
+ unsigned long stext_offset = _start_kernel - _start;
+ unsigned long kernel_entry = entrypoint + stext_offset;
+ jump_kernel_func jump_kernel = (jump_kernel_func)kernel_entry;
+
+ /*
+ * Jump to real kernel here with following constraints.
+ * 1. MMU should be disabled.
+ * 2. a0 should contain hartid
+ * 3. a1 should DT address
+ */
+ csr_write(CSR_SATP, 0);
+ jump_kernel(hartid, fdt);
+}
+
+efi_status_t handle_kernel_image(unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ efi_loaded_image_t *image)
+{
+ unsigned long kernel_size = 0;
+ unsigned long preferred_addr;
+ efi_status_t status;
+
+ kernel_size = _edata - _start;
+ *image_addr = (unsigned long)_start;
+ *image_size = kernel_size + (_end - _edata);
+
+ /*
+ * RISC-V kernel maps PAGE_OFFSET virtual address to the same physical
+ * address where kernel is booted. That's why kernel should boot from
+ * as low as possible to avoid wastage of memory. Currently, dram_base
+ * is occupied by the firmware. So the preferred address for kernel to
+ * boot is next aligned address. If preferred address is not available,
+ * relocate_kernel will fall back to efi_low_alloc_above to allocate
+ * lowest possible memory region as long as the address and size meets
+ * the alignment constraints.
+ */
+ preferred_addr = MIN_KIMG_ALIGN;
+ status = efi_relocate_kernel(image_addr, kernel_size, *image_size,
+ preferred_addr, MIN_KIMG_ALIGN, 0x0);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to relocate kernel\n");
+ *image_size = 0;
+ }
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
new file mode 100644
index 000000000..a2be3a71b
--- /dev/null
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Secure boot handling.
+ *
+ * Copyright (C) 2013,2014 Linaro Limited
+ * Roy Franz <roy.franz@linaro.org
+ * Copyright (C) 2013 Red Hat, Inc.
+ * Mark Salter <msalter@redhat.com>
+ */
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+/* BIOS variables */
+static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
+static const efi_char16_t efi_SecureBoot_name[] = L"SecureBoot";
+static const efi_char16_t efi_SetupMode_name[] = L"SetupMode";
+
+/* SHIM variables */
+static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
+static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT";
+
+/*
+ * Determine whether we're in secure boot mode.
+ *
+ * Please keep the logic in sync with
+ * arch/x86/xen/efi.c:xen_efi_get_secureboot().
+ */
+enum efi_secureboot_mode efi_get_secureboot(void)
+{
+ u32 attr;
+ u8 secboot, setupmode, moksbstate;
+ unsigned long size;
+ efi_status_t status;
+
+ size = sizeof(secboot);
+ status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid,
+ NULL, &size, &secboot);
+ if (status == EFI_NOT_FOUND)
+ return efi_secureboot_mode_disabled;
+ if (status != EFI_SUCCESS)
+ goto out_efi_err;
+
+ size = sizeof(setupmode);
+ status = get_efi_var(efi_SetupMode_name, &efi_variable_guid,
+ NULL, &size, &setupmode);
+ if (status != EFI_SUCCESS)
+ goto out_efi_err;
+
+ if (secboot == 0 || setupmode == 1)
+ return efi_secureboot_mode_disabled;
+
+ /*
+ * See if a user has put the shim into insecure mode. If so, and if the
+ * variable doesn't have the non-volatile attribute set, we might as
+ * well honor that.
+ */
+ size = sizeof(moksbstate);
+ status = get_efi_var(shim_MokSBState_name, &shim_guid,
+ &attr, &size, &moksbstate);
+
+ /* If it fails, we don't care why. Default to secure */
+ if (status != EFI_SUCCESS)
+ goto secure_boot_enabled;
+ if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1)
+ return efi_secureboot_mode_disabled;
+
+secure_boot_enabled:
+ efi_info("UEFI Secure Boot is enabled.\n");
+ return efi_secureboot_mode_enabled;
+
+out_efi_err:
+ efi_err("Could not determine UEFI Secure Boot status.\n");
+ return efi_secureboot_mode_unknown;
+}
diff --git a/drivers/firmware/efi/libstub/skip_spaces.c b/drivers/firmware/efi/libstub/skip_spaces.c
new file mode 100644
index 000000000..159fb4e45
--- /dev/null
+++ b/drivers/firmware/efi/libstub/skip_spaces.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+char *skip_spaces(const char *str)
+{
+ while (isspace(*str))
+ ++str;
+ return (char *)str;
+}
diff --git a/drivers/firmware/efi/libstub/string.c b/drivers/firmware/efi/libstub/string.c
new file mode 100644
index 000000000..5d13e4386
--- /dev/null
+++ b/drivers/firmware/efi/libstub/string.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Taken from:
+ * linux/lib/string.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+#ifndef __HAVE_ARCH_STRSTR
+/**
+ * strstr - Find the first substring in a %NUL terminated string
+ * @s1: The string to be searched
+ * @s2: The string to search for
+ */
+char *strstr(const char *s1, const char *s2)
+{
+ size_t l1, l2;
+
+ l2 = strlen(s2);
+ if (!l2)
+ return (char *)s1;
+ l1 = strlen(s1);
+ while (l1 >= l2) {
+ l1--;
+ if (!memcmp(s1, s2, l2))
+ return (char *)s1;
+ s1++;
+ }
+ return NULL;
+}
+#endif
+
+#ifndef __HAVE_ARCH_STRNCMP
+/**
+ * strncmp - Compare two length-limited strings
+ * @cs: One string
+ * @ct: Another string
+ * @count: The maximum number of bytes to compare
+ */
+int strncmp(const char *cs, const char *ct, size_t count)
+{
+ unsigned char c1, c2;
+
+ while (count) {
+ c1 = *cs++;
+ c2 = *ct++;
+ if (c1 != c2)
+ return c1 < c2 ? -1 : 1;
+ if (!c1)
+ break;
+ count--;
+ }
+ return 0;
+}
+#endif
+
+/* Works only for digits and letters, but small and fast */
+#define TOLOWER(x) ((x) | 0x20)
+
+static unsigned int simple_guess_base(const char *cp)
+{
+ if (cp[0] == '0') {
+ if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2]))
+ return 16;
+ else
+ return 8;
+ } else {
+ return 10;
+ }
+}
+
+/**
+ * simple_strtoull - convert a string to an unsigned long long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
+
+unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
+{
+ unsigned long long result = 0;
+
+ if (!base)
+ base = simple_guess_base(cp);
+
+ if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
+ cp += 2;
+
+ while (isxdigit(*cp)) {
+ unsigned int value;
+
+ value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
+ if (value >= base)
+ break;
+ result = result * base + value;
+ cp++;
+ }
+ if (endp)
+ *endp = (char *)cp;
+
+ return result;
+}
+
+long simple_strtol(const char *cp, char **endp, unsigned int base)
+{
+ if (*cp == '-')
+ return -simple_strtoull(cp + 1, endp, base);
+
+ return simple_strtoull(cp, endp, base);
+}
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
new file mode 100644
index 000000000..7acbac16e
--- /dev/null
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TPM handling.
+ *
+ * Copyright (C) 2016 CoreOS, Inc
+ * Copyright (C) 2017 Google, Inc.
+ * Matthew Garrett <mjg59@google.com>
+ * Thiebaud Weksteen <tweek@google.com>
+ */
+#include <linux/efi.h>
+#include <linux/tpm_eventlog.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+#ifdef CONFIG_RESET_ATTACK_MITIGATION
+static const efi_char16_t efi_MemoryOverWriteRequest_name[] =
+ L"MemoryOverwriteRequestControl";
+
+#define MEMORY_ONLY_RESET_CONTROL_GUID \
+ EFI_GUID(0xe20939be, 0x32d4, 0x41be, 0xa1, 0x50, 0x89, 0x7f, 0x85, 0xd4, 0x98, 0x29)
+
+/*
+ * Enable reboot attack mitigation. This requests that the firmware clear the
+ * RAM on next reboot before proceeding with boot, ensuring that any secrets
+ * are cleared. If userland has ensured that all secrets have been removed
+ * from RAM before reboot it can simply reset this variable.
+ */
+void efi_enable_reset_attack_mitigation(void)
+{
+ u8 val = 1;
+ efi_guid_t var_guid = MEMORY_ONLY_RESET_CONTROL_GUID;
+ efi_status_t status;
+ unsigned long datasize = 0;
+
+ status = get_efi_var(efi_MemoryOverWriteRequest_name, &var_guid,
+ NULL, &datasize, NULL);
+
+ if (status == EFI_NOT_FOUND)
+ return;
+
+ set_efi_var(efi_MemoryOverWriteRequest_name, &var_guid,
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS, sizeof(val), &val);
+}
+
+#endif
+
+void efi_retrieve_tpm2_eventlog(void)
+{
+ efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
+ efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
+ efi_status_t status;
+ efi_physical_addr_t log_location = 0, log_last_entry = 0;
+ struct linux_efi_tpm_eventlog *log_tbl = NULL;
+ struct efi_tcg2_final_events_table *final_events_table = NULL;
+ unsigned long first_entry_addr, last_entry_addr;
+ size_t log_size, last_entry_size;
+ efi_bool_t truncated;
+ int version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
+ efi_tcg2_protocol_t *tcg2_protocol = NULL;
+ int final_events_size = 0;
+
+ status = efi_bs_call(locate_protocol, &tcg2_guid, NULL,
+ (void **)&tcg2_protocol);
+ if (status != EFI_SUCCESS)
+ return;
+
+ status = efi_call_proto(tcg2_protocol, get_event_log, version,
+ &log_location, &log_last_entry, &truncated);
+
+ if (status != EFI_SUCCESS || !log_location) {
+ version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+ status = efi_call_proto(tcg2_protocol, get_event_log, version,
+ &log_location, &log_last_entry,
+ &truncated);
+ if (status != EFI_SUCCESS || !log_location)
+ return;
+
+ }
+
+ first_entry_addr = (unsigned long) log_location;
+
+ /*
+ * We populate the EFI table even if the logs are empty.
+ */
+ if (!log_last_entry) {
+ log_size = 0;
+ } else {
+ last_entry_addr = (unsigned long) log_last_entry;
+ /*
+ * get_event_log only returns the address of the last entry.
+ * We need to calculate its size to deduce the full size of
+ * the logs.
+ */
+ if (version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
+ /*
+ * The TCG2 log format has variable length entries,
+ * and the information to decode the hash algorithms
+ * back into a size is contained in the first entry -
+ * pass a pointer to the final entry (to calculate its
+ * size) and the first entry (so we know how long each
+ * digest is)
+ */
+ last_entry_size =
+ __calc_tpm2_event_size((void *)last_entry_addr,
+ (void *)(long)log_location,
+ false);
+ } else {
+ last_entry_size = sizeof(struct tcpa_event) +
+ ((struct tcpa_event *) last_entry_addr)->event_size;
+ }
+ log_size = log_last_entry - log_location + last_entry_size;
+ }
+
+ /* Allocate space for the logs and copy them. */
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ sizeof(*log_tbl) + log_size, (void **)&log_tbl);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Unable to allocate memory for event log\n");
+ return;
+ }
+
+ /*
+ * Figure out whether any events have already been logged to the
+ * final events structure, and if so how much space they take up
+ */
+ if (version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+ final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID);
+ if (final_events_table && final_events_table->nr_events) {
+ struct tcg_pcr_event2_head *header;
+ int offset;
+ void *data;
+ int event_size;
+ int i = final_events_table->nr_events;
+
+ data = (void *)final_events_table;
+ offset = sizeof(final_events_table->version) +
+ sizeof(final_events_table->nr_events);
+
+ while (i > 0) {
+ header = data + offset + final_events_size;
+ event_size = __calc_tpm2_event_size(header,
+ (void *)(long)log_location,
+ false);
+ final_events_size += event_size;
+ i--;
+ }
+ }
+
+ memset(log_tbl, 0, sizeof(*log_tbl) + log_size);
+ log_tbl->size = log_size;
+ log_tbl->final_events_preboot_size = final_events_size;
+ log_tbl->version = version;
+ memcpy(log_tbl->log, (void *) first_entry_addr, log_size);
+
+ status = efi_bs_call(install_configuration_table,
+ &linux_eventlog_guid, log_tbl);
+ if (status != EFI_SUCCESS)
+ goto err_free;
+ return;
+
+err_free:
+ efi_bs_call(free_pool, log_tbl);
+}
diff --git a/drivers/firmware/efi/libstub/vsprintf.c b/drivers/firmware/efi/libstub/vsprintf.c
new file mode 100644
index 000000000..1088e288c
--- /dev/null
+++ b/drivers/firmware/efi/libstub/vsprintf.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright 2007 rPath, Inc. - All Rights Reserved
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * Oh, it's a waste of space, but oh-so-yummy for debugging.
+ */
+
+#include <stdarg.h>
+
+#include <linux/compiler.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+static
+int skip_atoi(const char **s)
+{
+ int i = 0;
+
+ while (isdigit(**s))
+ i = i * 10 + *((*s)++) - '0';
+ return i;
+}
+
+/*
+ * put_dec_full4 handles numbers in the range 0 <= r < 10000.
+ * The multiplier 0xccd is round(2^15/10), and the approximation
+ * r/10 == (r * 0xccd) >> 15 is exact for all r < 16389.
+ */
+static
+void put_dec_full4(char *end, unsigned int r)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ unsigned int q = (r * 0xccd) >> 15;
+ *--end = '0' + (r - q * 10);
+ r = q;
+ }
+ *--end = '0' + r;
+}
+
+/* put_dec is copied from lib/vsprintf.c with small modifications */
+
+/*
+ * Call put_dec_full4 on x % 10000, return x / 10000.
+ * The approximation x/10000 == (x * 0x346DC5D7) >> 43
+ * holds for all x < 1,128,869,999. The largest value this
+ * helper will ever be asked to convert is 1,125,520,955.
+ * (second call in the put_dec code, assuming n is all-ones).
+ */
+static
+unsigned int put_dec_helper4(char *end, unsigned int x)
+{
+ unsigned int q = (x * 0x346DC5D7ULL) >> 43;
+
+ put_dec_full4(end, x - q * 10000);
+ return q;
+}
+
+/* Based on code by Douglas W. Jones found at
+ * <http://www.cs.uiowa.edu/~jones/bcd/decimal.html#sixtyfour>
+ * (with permission from the author).
+ * Performs no 64-bit division and hence should be fast on 32-bit machines.
+ */
+static
+char *put_dec(char *end, unsigned long long n)
+{
+ unsigned int d3, d2, d1, q, h;
+ char *p = end;
+
+ d1 = ((unsigned int)n >> 16); /* implicit "& 0xffff" */
+ h = (n >> 32);
+ d2 = (h ) & 0xffff;
+ d3 = (h >> 16); /* implicit "& 0xffff" */
+
+ /* n = 2^48 d3 + 2^32 d2 + 2^16 d1 + d0
+ = 281_4749_7671_0656 d3 + 42_9496_7296 d2 + 6_5536 d1 + d0 */
+ q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((unsigned int)n & 0xffff);
+ q = put_dec_helper4(p, q);
+ p -= 4;
+
+ q += 7671 * d3 + 9496 * d2 + 6 * d1;
+ q = put_dec_helper4(p, q);
+ p -= 4;
+
+ q += 4749 * d3 + 42 * d2;
+ q = put_dec_helper4(p, q);
+ p -= 4;
+
+ q += 281 * d3;
+ q = put_dec_helper4(p, q);
+ p -= 4;
+
+ put_dec_full4(p, q);
+ p -= 4;
+
+ /* strip off the extra 0's we printed */
+ while (p < end && *p == '0')
+ ++p;
+
+ return p;
+}
+
+static
+char *number(char *end, unsigned long long num, int base, char locase)
+{
+ /*
+ * locase = 0 or 0x20. ORing digits or letters with 'locase'
+ * produces same digits or (maybe lowercased) letters
+ */
+
+ /* we are called with base 8, 10 or 16, only, thus don't need "G..." */
+ static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
+
+ switch (base) {
+ case 10:
+ if (num != 0)
+ end = put_dec(end, num);
+ break;
+ case 8:
+ for (; num != 0; num >>= 3)
+ *--end = '0' + (num & 07);
+ break;
+ case 16:
+ for (; num != 0; num >>= 4)
+ *--end = digits[num & 0xf] | locase;
+ break;
+ default:
+ unreachable();
+ }
+
+ return end;
+}
+
+#define ZEROPAD 1 /* pad with zero */
+#define SIGN 2 /* unsigned/signed long */
+#define PLUS 4 /* show plus */
+#define SPACE 8 /* space if plus */
+#define LEFT 16 /* left justified */
+#define SMALL 32 /* Must be 32 == 0x20 */
+#define SPECIAL 64 /* 0x */
+#define WIDE 128 /* UTF-16 string */
+
+static
+int get_flags(const char **fmt)
+{
+ int flags = 0;
+
+ do {
+ switch (**fmt) {
+ case '-':
+ flags |= LEFT;
+ break;
+ case '+':
+ flags |= PLUS;
+ break;
+ case ' ':
+ flags |= SPACE;
+ break;
+ case '#':
+ flags |= SPECIAL;
+ break;
+ case '0':
+ flags |= ZEROPAD;
+ break;
+ default:
+ return flags;
+ }
+ ++(*fmt);
+ } while (1);
+}
+
+static
+int get_int(const char **fmt, va_list *ap)
+{
+ if (isdigit(**fmt))
+ return skip_atoi(fmt);
+ if (**fmt == '*') {
+ ++(*fmt);
+ /* it's the next argument */
+ return va_arg(*ap, int);
+ }
+ return 0;
+}
+
+static
+unsigned long long get_number(int sign, int qualifier, va_list *ap)
+{
+ if (sign) {
+ switch (qualifier) {
+ case 'L':
+ return va_arg(*ap, long long);
+ case 'l':
+ return va_arg(*ap, long);
+ case 'h':
+ return (short)va_arg(*ap, int);
+ case 'H':
+ return (signed char)va_arg(*ap, int);
+ default:
+ return va_arg(*ap, int);
+ };
+ } else {
+ switch (qualifier) {
+ case 'L':
+ return va_arg(*ap, unsigned long long);
+ case 'l':
+ return va_arg(*ap, unsigned long);
+ case 'h':
+ return (unsigned short)va_arg(*ap, int);
+ case 'H':
+ return (unsigned char)va_arg(*ap, int);
+ default:
+ return va_arg(*ap, unsigned int);
+ }
+ }
+}
+
+static
+char get_sign(long long *num, int flags)
+{
+ if (!(flags & SIGN))
+ return 0;
+ if (*num < 0) {
+ *num = -(*num);
+ return '-';
+ }
+ if (flags & PLUS)
+ return '+';
+ if (flags & SPACE)
+ return ' ';
+ return 0;
+}
+
+static
+size_t utf16s_utf8nlen(const u16 *s16, size_t maxlen)
+{
+ size_t len, clen;
+
+ for (len = 0; len < maxlen && *s16; len += clen) {
+ u16 c0 = *s16++;
+
+ /* First, get the length for a BMP character */
+ clen = 1 + (c0 >= 0x80) + (c0 >= 0x800);
+ if (len + clen > maxlen)
+ break;
+ /*
+ * If this is a high surrogate, and we're already at maxlen, we
+ * can't include the character if it's a valid surrogate pair.
+ * Avoid accessing one extra word just to check if it's valid
+ * or not.
+ */
+ if ((c0 & 0xfc00) == 0xd800) {
+ if (len + clen == maxlen)
+ break;
+ if ((*s16 & 0xfc00) == 0xdc00) {
+ ++s16;
+ ++clen;
+ }
+ }
+ }
+
+ return len;
+}
+
+static
+u32 utf16_to_utf32(const u16 **s16)
+{
+ u16 c0, c1;
+
+ c0 = *(*s16)++;
+ /* not a surrogate */
+ if ((c0 & 0xf800) != 0xd800)
+ return c0;
+ /* invalid: low surrogate instead of high */
+ if (c0 & 0x0400)
+ return 0xfffd;
+ c1 = **s16;
+ /* invalid: missing low surrogate */
+ if ((c1 & 0xfc00) != 0xdc00)
+ return 0xfffd;
+ /* valid surrogate pair */
+ ++(*s16);
+ return (0x10000 - (0xd800 << 10) - 0xdc00) + (c0 << 10) + c1;
+}
+
+#define PUTC(c) \
+do { \
+ if (pos < size) \
+ buf[pos] = (c); \
+ ++pos; \
+} while (0);
+
+int vsnprintf(char *buf, size_t size, const char *fmt, va_list ap)
+{
+ /* The maximum space required is to print a 64-bit number in octal */
+ char tmp[(sizeof(unsigned long long) * 8 + 2) / 3];
+ char *tmp_end = &tmp[ARRAY_SIZE(tmp)];
+ long long num;
+ int base;
+ const char *s;
+ size_t len, pos;
+ char sign;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /* min. # of digits for integers; max
+ number of chars for from string */
+ int qualifier; /* 'h', 'hh', 'l' or 'll' for integer fields */
+
+ va_list args;
+
+ /*
+ * We want to pass our input va_list to helper functions by reference,
+ * but there's an annoying edge case. If va_list was originally passed
+ * to us by value, we could just pass &ap down to the helpers. This is
+ * the case on, for example, X86_32.
+ * However, on X86_64 (and possibly others), va_list is actually a
+ * size-1 array containing a structure. Our function parameter ap has
+ * decayed from T[1] to T*, and &ap has type T** rather than T(*)[1],
+ * which is what will be expected by a function taking a va_list *
+ * parameter.
+ * One standard way to solve this mess is by creating a copy in a local
+ * variable of type va_list and then passing a pointer to that local
+ * copy instead, which is what we do here.
+ */
+ va_copy(args, ap);
+
+ for (pos = 0; *fmt; ++fmt) {
+ if (*fmt != '%' || *++fmt == '%') {
+ PUTC(*fmt);
+ continue;
+ }
+
+ /* process flags */
+ flags = get_flags(&fmt);
+
+ /* get field width */
+ field_width = get_int(&fmt, &args);
+ if (field_width < 0) {
+ field_width = -field_width;
+ flags |= LEFT;
+ }
+
+ if (flags & LEFT)
+ flags &= ~ZEROPAD;
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ precision = get_int(&fmt, &args);
+ if (precision >= 0)
+ flags &= ~ZEROPAD;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l') {
+ qualifier = *fmt;
+ ++fmt;
+ if (qualifier == *fmt) {
+ qualifier -= 'a'-'A';
+ ++fmt;
+ }
+ }
+
+ sign = 0;
+
+ switch (*fmt) {
+ case 'c':
+ flags &= LEFT;
+ s = tmp;
+ if (qualifier == 'l') {
+ ((u16 *)tmp)[0] = (u16)va_arg(args, unsigned int);
+ ((u16 *)tmp)[1] = L'\0';
+ precision = INT_MAX;
+ goto wstring;
+ } else {
+ tmp[0] = (unsigned char)va_arg(args, int);
+ precision = len = 1;
+ }
+ goto output;
+
+ case 's':
+ flags &= LEFT;
+ if (precision < 0)
+ precision = INT_MAX;
+ s = va_arg(args, void *);
+ if (!s)
+ s = precision < 6 ? "" : "(null)";
+ else if (qualifier == 'l') {
+ wstring:
+ flags |= WIDE;
+ precision = len = utf16s_utf8nlen((const u16 *)s, precision);
+ goto output;
+ }
+ precision = len = strnlen(s, precision);
+ goto output;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'p':
+ if (precision < 0)
+ precision = 2 * sizeof(void *);
+ fallthrough;
+ case 'x':
+ flags |= SMALL;
+ fallthrough;
+ case 'X':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= SIGN;
+ fallthrough;
+ case 'u':
+ flags &= ~SPECIAL;
+ base = 10;
+ break;
+
+ default:
+ /*
+ * Bail out if the conversion specifier is invalid.
+ * There's probably a typo in the format string and the
+ * remaining specifiers are unlikely to match up with
+ * the arguments.
+ */
+ goto fail;
+ }
+ if (*fmt == 'p') {
+ num = (unsigned long)va_arg(args, void *);
+ } else {
+ num = get_number(flags & SIGN, qualifier, &args);
+ }
+
+ sign = get_sign(&num, flags);
+ if (sign)
+ --field_width;
+
+ s = number(tmp_end, num, base, flags & SMALL);
+ len = tmp_end - s;
+ /* default precision is 1 */
+ if (precision < 0)
+ precision = 1;
+ /* precision is minimum number of digits to print */
+ if (precision < len)
+ precision = len;
+ if (flags & SPECIAL) {
+ /*
+ * For octal, a leading 0 is printed only if necessary,
+ * i.e. if it's not already there because of the
+ * precision.
+ */
+ if (base == 8 && precision == len)
+ ++precision;
+ /*
+ * For hexadecimal, the leading 0x is skipped if the
+ * output is empty, i.e. both the number and the
+ * precision are 0.
+ */
+ if (base == 16 && precision > 0)
+ field_width -= 2;
+ else
+ flags &= ~SPECIAL;
+ }
+ /*
+ * For zero padding, increase the precision to fill the field
+ * width.
+ */
+ if ((flags & ZEROPAD) && field_width > precision)
+ precision = field_width;
+
+output:
+ /* Calculate the padding necessary */
+ field_width -= precision;
+ /* Leading padding with ' ' */
+ if (!(flags & LEFT))
+ while (field_width-- > 0)
+ PUTC(' ');
+ /* sign */
+ if (sign)
+ PUTC(sign);
+ /* 0x/0X for hexadecimal */
+ if (flags & SPECIAL) {
+ PUTC('0');
+ PUTC( 'X' | (flags & SMALL));
+ }
+ /* Zero padding and excess precision */
+ while (precision-- > len)
+ PUTC('0');
+ /* Actual output */
+ if (flags & WIDE) {
+ const u16 *ws = (const u16 *)s;
+
+ while (len-- > 0) {
+ u32 c32 = utf16_to_utf32(&ws);
+ u8 *s8;
+ size_t clen;
+
+ if (c32 < 0x80) {
+ PUTC(c32);
+ continue;
+ }
+
+ /* Number of trailing octets */
+ clen = 1 + (c32 >= 0x800) + (c32 >= 0x10000);
+
+ len -= clen;
+ s8 = (u8 *)&buf[pos];
+
+ /* Avoid writing partial character */
+ PUTC('\0');
+ pos += clen;
+ if (pos >= size)
+ continue;
+
+ /* Set high bits of leading octet */
+ *s8 = (0xf00 >> 1) >> clen;
+ /* Write trailing octets in reverse order */
+ for (s8 += clen; clen; --clen, c32 >>= 6)
+ *s8-- = 0x80 | (c32 & 0x3f);
+ /* Set low bits of leading octet */
+ *s8 |= c32;
+ }
+ } else {
+ while (len-- > 0)
+ PUTC(*s++);
+ }
+ /* Trailing padding with ' ' */
+ while (field_width-- > 0)
+ PUTC(' ');
+ }
+fail:
+ va_end(args);
+
+ if (size)
+ buf[min(pos, size-1)] = '\0';
+
+ return pos;
+}
+
+int snprintf(char *buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vsnprintf(buf, size, fmt, args);
+ va_end(args);
+ return i;
+}
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
new file mode 100644
index 000000000..9f998e6bf
--- /dev/null
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -0,0 +1,817 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* -----------------------------------------------------------------------
+ *
+ * Copyright 2011 Intel Corporation; author Matt Fleming
+ *
+ * ----------------------------------------------------------------------- */
+
+#include <linux/efi.h>
+#include <linux/pci.h>
+#include <linux/stddef.h>
+
+#include <asm/efi.h>
+#include <asm/e820/types.h>
+#include <asm/setup.h>
+#include <asm/desc.h>
+#include <asm/boot.h>
+
+#include "efistub.h"
+
+/* Maximum physical address for 64-bit kernel with 4-level paging */
+#define MAXMEM_X86_64_4LEVEL (1ull << 46)
+
+const efi_system_table_t *efi_system_table;
+extern u32 image_offset;
+static efi_loaded_image_t *image = NULL;
+
+static efi_status_t
+preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
+{
+ struct pci_setup_rom *rom = NULL;
+ efi_status_t status;
+ unsigned long size;
+ uint64_t romsize;
+ void *romimage;
+
+ /*
+ * Some firmware images contain EFI function pointers at the place where
+ * the romimage and romsize fields are supposed to be. Typically the EFI
+ * code is mapped at high addresses, translating to an unrealistically
+ * large romsize. The UEFI spec limits the size of option ROMs to 16
+ * MiB so we reject any ROMs over 16 MiB in size to catch this.
+ */
+ romimage = efi_table_attr(pci, romimage);
+ romsize = efi_table_attr(pci, romsize);
+ if (!romimage || !romsize || romsize > SZ_16M)
+ return EFI_INVALID_PARAMETER;
+
+ size = romsize + sizeof(*rom);
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&rom);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate memory for 'rom'\n");
+ return status;
+ }
+
+ memset(rom, 0, sizeof(*rom));
+
+ rom->data.type = SETUP_PCI;
+ rom->data.len = size - sizeof(struct setup_data);
+ rom->data.next = 0;
+ rom->pcilen = romsize;
+ *__rom = rom;
+
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_VENDOR_ID, 1, &rom->vendor);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to read rom->vendor\n");
+ goto free_struct;
+ }
+
+ status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
+ PCI_DEVICE_ID, 1, &rom->devid);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to read rom->devid\n");
+ goto free_struct;
+ }
+
+ status = efi_call_proto(pci, get_location, &rom->segment, &rom->bus,
+ &rom->device, &rom->function);
+
+ if (status != EFI_SUCCESS)
+ goto free_struct;
+
+ memcpy(rom->romdata, romimage, romsize);
+ return status;
+
+free_struct:
+ efi_bs_call(free_pool, rom);
+ return status;
+}
+
+/*
+ * There's no way to return an informative status from this function,
+ * because any analysis (and printing of error messages) needs to be
+ * done directly at the EFI function call-site.
+ *
+ * For example, EFI_INVALID_PARAMETER could indicate a bug or maybe we
+ * just didn't find any PCI devices, but there's no way to tell outside
+ * the context of the call.
+ */
+static void setup_efi_pci(struct boot_params *params)
+{
+ efi_status_t status;
+ void **pci_handle = NULL;
+ efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
+ unsigned long size = 0;
+ struct setup_data *data;
+ efi_handle_t h;
+ int i;
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &size, pci_handle);
+
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&pci_handle);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate memory for 'pci_handle'\n");
+ return;
+ }
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &size, pci_handle);
+ }
+
+ if (status != EFI_SUCCESS)
+ goto free_handle;
+
+ data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+ while (data && data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+
+ for_each_efi_handle(h, pci_handle, size, i) {
+ efi_pci_io_protocol_t *pci = NULL;
+ struct pci_setup_rom *rom;
+
+ status = efi_bs_call(handle_protocol, h, &pci_proto,
+ (void **)&pci);
+ if (status != EFI_SUCCESS || !pci)
+ continue;
+
+ status = preserve_pci_rom_image(pci, &rom);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ if (data)
+ data->next = (unsigned long)rom;
+ else
+ params->hdr.setup_data = (unsigned long)rom;
+
+ data = (struct setup_data *)rom;
+ }
+
+free_handle:
+ efi_bs_call(free_pool, pci_handle);
+}
+
+static void retrieve_apple_device_properties(struct boot_params *boot_params)
+{
+ efi_guid_t guid = APPLE_PROPERTIES_PROTOCOL_GUID;
+ struct setup_data *data, *new;
+ efi_status_t status;
+ u32 size = 0;
+ apple_properties_protocol_t *p;
+
+ status = efi_bs_call(locate_protocol, &guid, NULL, (void **)&p);
+ if (status != EFI_SUCCESS)
+ return;
+
+ if (efi_table_attr(p, version) != 0x10000) {
+ efi_err("Unsupported properties proto version\n");
+ return;
+ }
+
+ efi_call_proto(p, get_all, NULL, &size);
+ if (!size)
+ return;
+
+ do {
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ size + sizeof(struct setup_data),
+ (void **)&new);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate memory for 'properties'\n");
+ return;
+ }
+
+ status = efi_call_proto(p, get_all, new->data, &size);
+
+ if (status == EFI_BUFFER_TOO_SMALL)
+ efi_bs_call(free_pool, new);
+ } while (status == EFI_BUFFER_TOO_SMALL);
+
+ new->type = SETUP_APPLE_PROPERTIES;
+ new->len = size;
+ new->next = 0;
+
+ data = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
+ if (!data) {
+ boot_params->hdr.setup_data = (unsigned long)new;
+ } else {
+ while (data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+ data->next = (unsigned long)new;
+ }
+}
+
+static const efi_char16_t apple[] = L"Apple";
+
+static void setup_quirks(struct boot_params *boot_params)
+{
+ efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long)
+ efi_table_attr(efi_system_table, fw_vendor);
+
+ if (!memcmp(fw_vendor, apple, sizeof(apple))) {
+ if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
+ retrieve_apple_device_properties(boot_params);
+ }
+}
+
+/*
+ * See if we have Universal Graphics Adapter (UGA) protocol
+ */
+static efi_status_t
+setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
+{
+ efi_status_t status;
+ u32 width, height;
+ void **uga_handle = NULL;
+ efi_uga_draw_protocol_t *uga = NULL, *first_uga;
+ efi_handle_t handle;
+ int i;
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&uga_handle);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ uga_proto, NULL, &size, uga_handle);
+ if (status != EFI_SUCCESS)
+ goto free_handle;
+
+ height = 0;
+ width = 0;
+
+ first_uga = NULL;
+ for_each_efi_handle(handle, uga_handle, size, i) {
+ efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
+ u32 w, h, depth, refresh;
+ void *pciio;
+
+ status = efi_bs_call(handle_protocol, handle, uga_proto,
+ (void **)&uga);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ pciio = NULL;
+ efi_bs_call(handle_protocol, handle, &pciio_proto, &pciio);
+
+ status = efi_call_proto(uga, get_mode, &w, &h, &depth, &refresh);
+ if (status == EFI_SUCCESS && (!first_uga || pciio)) {
+ width = w;
+ height = h;
+
+ /*
+ * Once we've found a UGA supporting PCIIO,
+ * don't bother looking any further.
+ */
+ if (pciio)
+ break;
+
+ first_uga = uga;
+ }
+ }
+
+ if (!width && !height)
+ goto free_handle;
+
+ /* EFI framebuffer */
+ si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+ si->lfb_depth = 32;
+ si->lfb_width = width;
+ si->lfb_height = height;
+
+ si->red_size = 8;
+ si->red_pos = 16;
+ si->green_size = 8;
+ si->green_pos = 8;
+ si->blue_size = 8;
+ si->blue_pos = 0;
+ si->rsvd_size = 8;
+ si->rsvd_pos = 24;
+
+free_handle:
+ efi_bs_call(free_pool, uga_handle);
+
+ return status;
+}
+
+static void setup_graphics(struct boot_params *boot_params)
+{
+ efi_guid_t graphics_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
+ struct screen_info *si;
+ efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
+ efi_status_t status;
+ unsigned long size;
+ void **gop_handle = NULL;
+ void **uga_handle = NULL;
+
+ si = &boot_params->screen_info;
+ memset(si, 0, sizeof(*si));
+
+ size = 0;
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &graphics_proto, NULL, &size, gop_handle);
+ if (status == EFI_BUFFER_TOO_SMALL)
+ status = efi_setup_gop(si, &graphics_proto, size);
+
+ if (status != EFI_SUCCESS) {
+ size = 0;
+ status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &uga_proto, NULL, &size, uga_handle);
+ if (status == EFI_BUFFER_TOO_SMALL)
+ setup_uga(si, &uga_proto, size);
+ }
+}
+
+
+static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status)
+{
+ efi_bs_call(exit, handle, status, 0, NULL);
+ for(;;)
+ asm("hlt");
+}
+
+void startup_32(struct boot_params *boot_params);
+
+void __noreturn efi_stub_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params);
+
+/*
+ * Because the x86 boot code expects to be passed a boot_params we
+ * need to create one ourselves (usually the bootloader would create
+ * one for us).
+ */
+efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg)
+{
+ struct boot_params *boot_params;
+ struct setup_header *hdr;
+ void *image_base;
+ efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
+ int options_size = 0;
+ efi_status_t status;
+ char *cmdline_ptr;
+
+ efi_system_table = sys_table_arg;
+
+ /* Check if we were booted by the EFI firmware */
+ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+ efi_exit(handle, EFI_INVALID_PARAMETER);
+
+ status = efi_bs_call(handle_protocol, handle, &proto, (void **)&image);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
+ efi_exit(handle, status);
+ }
+
+ image_base = efi_table_attr(image, image_base);
+ image_offset = (void *)startup_32 - image_base;
+
+ status = efi_allocate_pages(sizeof(struct boot_params),
+ (unsigned long *)&boot_params, ULONG_MAX);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to allocate lowmem for boot params\n");
+ efi_exit(handle, status);
+ }
+
+ memset(boot_params, 0x0, sizeof(struct boot_params));
+
+ hdr = &boot_params->hdr;
+
+ /* Copy the setup header from the second sector to boot_params */
+ memcpy(&hdr->jump, image_base + 512,
+ sizeof(struct setup_header) - offsetof(struct setup_header, jump));
+
+ /*
+ * Fill out some of the header fields ourselves because the
+ * EFI firmware loader doesn't load the first sector.
+ */
+ hdr->root_flags = 1;
+ hdr->vid_mode = 0xffff;
+ hdr->boot_flag = 0xAA55;
+
+ hdr->type_of_loader = 0x21;
+
+ /* Convert unicode cmdline to ascii */
+ cmdline_ptr = efi_convert_cmdline(image, &options_size);
+ if (!cmdline_ptr)
+ goto fail;
+
+ efi_set_u64_split((unsigned long)cmdline_ptr,
+ &hdr->cmd_line_ptr, &boot_params->ext_cmd_line_ptr);
+
+ hdr->ramdisk_image = 0;
+ hdr->ramdisk_size = 0;
+
+ /*
+ * Disregard any setup data that was provided by the bootloader:
+ * setup_data could be pointing anywhere, and we have no way of
+ * authenticating or validating the payload.
+ */
+ hdr->setup_data = 0;
+
+ efi_stub_entry(handle, sys_table_arg, boot_params);
+ /* not reached */
+
+fail:
+ efi_free(sizeof(struct boot_params), (unsigned long)boot_params);
+
+ efi_exit(handle, status);
+}
+
+static void add_e820ext(struct boot_params *params,
+ struct setup_data *e820ext, u32 nr_entries)
+{
+ struct setup_data *data;
+
+ e820ext->type = SETUP_E820_EXT;
+ e820ext->len = nr_entries * sizeof(struct boot_e820_entry);
+ e820ext->next = 0;
+
+ data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+ while (data && data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+
+ if (data)
+ data->next = (unsigned long)e820ext;
+ else
+ params->hdr.setup_data = (unsigned long)e820ext;
+}
+
+static efi_status_t
+setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_size)
+{
+ struct boot_e820_entry *entry = params->e820_table;
+ struct efi_info *efi = &params->efi_info;
+ struct boot_e820_entry *prev = NULL;
+ u32 nr_entries;
+ u32 nr_desc;
+ int i;
+
+ nr_entries = 0;
+ nr_desc = efi->efi_memmap_size / efi->efi_memdesc_size;
+
+ for (i = 0; i < nr_desc; i++) {
+ efi_memory_desc_t *d;
+ unsigned int e820_type = 0;
+ unsigned long m = efi->efi_memmap;
+
+#ifdef CONFIG_X86_64
+ m |= (u64)efi->efi_memmap_hi << 32;
+#endif
+
+ d = efi_early_memdesc_ptr(m, efi->efi_memdesc_size, i);
+ switch (d->type) {
+ case EFI_RESERVED_TYPE:
+ case EFI_RUNTIME_SERVICES_CODE:
+ case EFI_RUNTIME_SERVICES_DATA:
+ case EFI_MEMORY_MAPPED_IO:
+ case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+ case EFI_PAL_CODE:
+ e820_type = E820_TYPE_RESERVED;
+ break;
+
+ case EFI_UNUSABLE_MEMORY:
+ e820_type = E820_TYPE_UNUSABLE;
+ break;
+
+ case EFI_ACPI_RECLAIM_MEMORY:
+ e820_type = E820_TYPE_ACPI;
+ break;
+
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_CONVENTIONAL_MEMORY:
+ if (efi_soft_reserve_enabled() &&
+ (d->attribute & EFI_MEMORY_SP))
+ e820_type = E820_TYPE_SOFT_RESERVED;
+ else
+ e820_type = E820_TYPE_RAM;
+ break;
+
+ case EFI_ACPI_MEMORY_NVS:
+ e820_type = E820_TYPE_NVS;
+ break;
+
+ case EFI_PERSISTENT_MEMORY:
+ e820_type = E820_TYPE_PMEM;
+ break;
+
+ default:
+ continue;
+ }
+
+ /* Merge adjacent mappings */
+ if (prev && prev->type == e820_type &&
+ (prev->addr + prev->size) == d->phys_addr) {
+ prev->size += d->num_pages << 12;
+ continue;
+ }
+
+ if (nr_entries == ARRAY_SIZE(params->e820_table)) {
+ u32 need = (nr_desc - i) * sizeof(struct e820_entry) +
+ sizeof(struct setup_data);
+
+ if (!e820ext || e820ext_size < need)
+ return EFI_BUFFER_TOO_SMALL;
+
+ /* boot_params map full, switch to e820 extended */
+ entry = (struct boot_e820_entry *)e820ext->data;
+ }
+
+ entry->addr = d->phys_addr;
+ entry->size = d->num_pages << PAGE_SHIFT;
+ entry->type = e820_type;
+ prev = entry++;
+ nr_entries++;
+ }
+
+ if (nr_entries > ARRAY_SIZE(params->e820_table)) {
+ u32 nr_e820ext = nr_entries - ARRAY_SIZE(params->e820_table);
+
+ add_e820ext(params, e820ext, nr_e820ext);
+ nr_entries -= nr_e820ext;
+ }
+
+ params->e820_entries = (u8)nr_entries;
+
+ return EFI_SUCCESS;
+}
+
+static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
+ u32 *e820ext_size)
+{
+ efi_status_t status;
+ unsigned long size;
+
+ size = sizeof(struct setup_data) +
+ sizeof(struct e820_entry) * nr_desc;
+
+ if (*e820ext) {
+ efi_bs_call(free_pool, *e820ext);
+ *e820ext = NULL;
+ *e820ext_size = 0;
+ }
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)e820ext);
+ if (status == EFI_SUCCESS)
+ *e820ext_size = size;
+
+ return status;
+}
+
+static efi_status_t allocate_e820(struct boot_params *params,
+ struct setup_data **e820ext,
+ u32 *e820ext_size)
+{
+ unsigned long map_size, desc_size, map_key;
+ efi_status_t status;
+ __u32 nr_desc, desc_version;
+
+ /* Only need the size of the mem map and size of each mem descriptor */
+ map_size = 0;
+ status = efi_bs_call(get_memory_map, &map_size, NULL, &map_key,
+ &desc_size, &desc_version);
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return (status != EFI_SUCCESS) ? status : EFI_UNSUPPORTED;
+
+ nr_desc = map_size / desc_size + EFI_MMAP_NR_SLACK_SLOTS;
+
+ if (nr_desc > ARRAY_SIZE(params->e820_table)) {
+ u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table);
+
+ status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
+ }
+
+ return EFI_SUCCESS;
+}
+
+struct exit_boot_struct {
+ struct boot_params *boot_params;
+ struct efi_info *efi;
+};
+
+static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
+ void *priv)
+{
+ const char *signature;
+ struct exit_boot_struct *p = priv;
+
+ signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE
+ : EFI32_LOADER_SIGNATURE;
+ memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
+
+ efi_set_u64_split((unsigned long)efi_system_table,
+ &p->efi->efi_systab, &p->efi->efi_systab_hi);
+ p->efi->efi_memdesc_size = *map->desc_size;
+ p->efi->efi_memdesc_version = *map->desc_ver;
+ efi_set_u64_split((unsigned long)*map->map,
+ &p->efi->efi_memmap, &p->efi->efi_memmap_hi);
+ p->efi->efi_memmap_size = *map->map_size;
+
+ return EFI_SUCCESS;
+}
+
+static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
+{
+ unsigned long map_sz, key, desc_size, buff_size;
+ efi_memory_desc_t *mem_map;
+ struct setup_data *e820ext = NULL;
+ __u32 e820ext_size = 0;
+ efi_status_t status;
+ __u32 desc_version;
+ struct efi_boot_memmap map;
+ struct exit_boot_struct priv;
+
+ map.map = &mem_map;
+ map.map_size = &map_sz;
+ map.desc_size = &desc_size;
+ map.desc_ver = &desc_version;
+ map.key_ptr = &key;
+ map.buff_size = &buff_size;
+ priv.boot_params = boot_params;
+ priv.efi = &boot_params->efi_info;
+
+ status = allocate_e820(boot_params, &e820ext, &e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ /* Might as well exit boot services now */
+ status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ /* Historic? */
+ boot_params->alt_mem_k = 32 * 1024;
+
+ status = setup_e820(boot_params, e820ext, e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ return EFI_SUCCESS;
+}
+
+/*
+ * On success, we return the address of startup_32, which has potentially been
+ * relocated by efi_relocate_kernel.
+ * On failure, we exit to the firmware via efi_exit instead of returning.
+ */
+unsigned long efi_main(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params)
+{
+ unsigned long bzimage_addr = (unsigned long)startup_32;
+ unsigned long buffer_start, buffer_end;
+ struct setup_header *hdr = &boot_params->hdr;
+ efi_status_t status;
+
+ efi_system_table = sys_table_arg;
+
+ /* Check if we were booted by the EFI firmware */
+ if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
+ efi_exit(handle, EFI_INVALID_PARAMETER);
+
+ /*
+ * If the kernel isn't already loaded at a suitable address,
+ * relocate it.
+ *
+ * It must be loaded above LOAD_PHYSICAL_ADDR.
+ *
+ * The maximum address for 64-bit is 1 << 46 for 4-level paging. This
+ * is defined as the macro MAXMEM, but unfortunately that is not a
+ * compile-time constant if 5-level paging is configured, so we instead
+ * define our own macro for use here.
+ *
+ * For 32-bit, the maximum address is complicated to figure out, for
+ * now use KERNEL_IMAGE_SIZE, which will be 512MiB, the same as what
+ * KASLR uses.
+ *
+ * Also relocate it if image_offset is zero, i.e. the kernel wasn't
+ * loaded by LoadImage, but rather by a bootloader that called the
+ * handover entry. The reason we must always relocate in this case is
+ * to handle the case of systemd-boot booting a unified kernel image,
+ * which is a PE executable that contains the bzImage and an initrd as
+ * COFF sections. The initrd section is placed after the bzImage
+ * without ensuring that there are at least init_size bytes available
+ * for the bzImage, and thus the compressed kernel's startup code may
+ * overwrite the initrd unless it is moved out of the way.
+ */
+
+ buffer_start = ALIGN(bzimage_addr - image_offset,
+ hdr->kernel_alignment);
+ buffer_end = buffer_start + hdr->init_size;
+
+ if ((buffer_start < LOAD_PHYSICAL_ADDR) ||
+ (IS_ENABLED(CONFIG_X86_32) && buffer_end > KERNEL_IMAGE_SIZE) ||
+ (IS_ENABLED(CONFIG_X86_64) && buffer_end > MAXMEM_X86_64_4LEVEL) ||
+ (image_offset == 0)) {
+ status = efi_relocate_kernel(&bzimage_addr,
+ hdr->init_size, hdr->init_size,
+ hdr->pref_address,
+ hdr->kernel_alignment,
+ LOAD_PHYSICAL_ADDR);
+ if (status != EFI_SUCCESS) {
+ efi_err("efi_relocate_kernel() failed!\n");
+ goto fail;
+ }
+ /*
+ * Now that we've copied the kernel elsewhere, we no longer
+ * have a set up block before startup_32(), so reset image_offset
+ * to zero in case it was set earlier.
+ */
+ image_offset = 0;
+ }
+
+#ifdef CONFIG_CMDLINE_BOOL
+ status = efi_parse_options(CONFIG_CMDLINE);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail;
+ }
+#endif
+ if (!IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
+ unsigned long cmdline_paddr = ((u64)hdr->cmd_line_ptr |
+ ((u64)boot_params->ext_cmd_line_ptr << 32));
+ status = efi_parse_options((char *)cmdline_paddr);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to parse options\n");
+ goto fail;
+ }
+ }
+
+ /*
+ * At this point, an initrd may already have been loaded by the
+ * bootloader and passed via bootparams. We permit an initrd loaded
+ * from the LINUX_EFI_INITRD_MEDIA_GUID device path to supersede it.
+ *
+ * If the device path is not present, any command-line initrd=
+ * arguments will be processed only if image is not NULL, which will be
+ * the case only if we were loaded via the PE entry point.
+ */
+ if (!efi_noinitrd) {
+ unsigned long addr, size;
+
+ status = efi_load_initrd(image, &addr, &size,
+ hdr->initrd_addr_max, ULONG_MAX);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to load initrd!\n");
+ goto fail;
+ }
+ if (size > 0) {
+ efi_set_u64_split(addr, &hdr->ramdisk_image,
+ &boot_params->ext_ramdisk_image);
+ efi_set_u64_split(size, &hdr->ramdisk_size,
+ &boot_params->ext_ramdisk_size);
+ }
+ }
+
+ /*
+ * If the boot loader gave us a value for secure_boot then we use that,
+ * otherwise we ask the BIOS.
+ */
+ if (boot_params->secure_boot == efi_secureboot_mode_unset)
+ boot_params->secure_boot = efi_get_secureboot();
+
+ /* Ask the firmware to clear memory on unclean shutdown */
+ efi_enable_reset_attack_mitigation();
+
+ efi_random_get_seed();
+
+ efi_retrieve_tpm2_eventlog();
+
+ setup_graphics(boot_params);
+
+ setup_efi_pci(boot_params);
+
+ setup_quirks(boot_params);
+
+ status = exit_boot(boot_params, handle);
+ if (status != EFI_SUCCESS) {
+ efi_err("exit_boot() failed!\n");
+ goto fail;
+ }
+
+ return bzimage_addr;
+fail:
+ efi_err("efi_main() failed!\n");
+
+ efi_exit(handle, status);
+}
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
new file mode 100644
index 000000000..f178b2984
--- /dev/null
+++ b/drivers/firmware/efi/memattr.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#define pr_fmt(fmt) "efi: memattr: " fmt
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#include <asm/early_ioremap.h>
+
+static int __initdata tbl_size;
+unsigned long __ro_after_init efi_mem_attr_table = EFI_INVALID_TABLE_ADDR;
+
+/*
+ * Reserve the memory associated with the Memory Attributes configuration
+ * table, if it exists.
+ */
+int __init efi_memattr_init(void)
+{
+ efi_memory_attributes_table_t *tbl;
+
+ if (efi_mem_attr_table == EFI_INVALID_TABLE_ADDR)
+ return 0;
+
+ tbl = early_memremap(efi_mem_attr_table, sizeof(*tbl));
+ if (!tbl) {
+ pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
+ efi_mem_attr_table);
+ return -ENOMEM;
+ }
+
+ if (tbl->version > 2) {
+ pr_warn("Unexpected EFI Memory Attributes table version %d\n",
+ tbl->version);
+ goto unmap;
+ }
+
+ tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
+ memblock_reserve(efi_mem_attr_table, tbl_size);
+ set_bit(EFI_MEM_ATTR, &efi.flags);
+
+unmap:
+ early_memunmap(tbl, sizeof(*tbl));
+ return 0;
+}
+
+/*
+ * Returns a copy @out of the UEFI memory descriptor @in if it is covered
+ * entirely by a UEFI memory map entry with matching attributes. The virtual
+ * address of @out is set according to the matching entry that was found.
+ */
+static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
+{
+ u64 in_paddr = in->phys_addr;
+ u64 in_size = in->num_pages << EFI_PAGE_SHIFT;
+ efi_memory_desc_t *md;
+
+ *out = *in;
+
+ if (in->type != EFI_RUNTIME_SERVICES_CODE &&
+ in->type != EFI_RUNTIME_SERVICES_DATA) {
+ pr_warn("Entry type should be RuntimeServiceCode/Data\n");
+ return false;
+ }
+
+ if (PAGE_SIZE > EFI_PAGE_SIZE &&
+ (!PAGE_ALIGNED(in->phys_addr) ||
+ !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
+ /*
+ * Since arm64 may execute with page sizes of up to 64 KB, the
+ * UEFI spec mandates that RuntimeServices memory regions must
+ * be 64 KB aligned. We need to validate this here since we will
+ * not be able to tighten permissions on such regions without
+ * affecting adjacent regions.
+ */
+ pr_warn("Entry address region misaligned\n");
+ return false;
+ }
+
+ for_each_efi_memory_desc(md) {
+ u64 md_paddr = md->phys_addr;
+ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
+
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0 && md->phys_addr != 0) {
+ /* no virtual mapping has been installed by the stub */
+ break;
+ }
+
+ if (md_paddr > in_paddr || (in_paddr - md_paddr) >= md_size)
+ continue;
+
+ /*
+ * This entry covers the start of @in, check whether
+ * it covers the end as well.
+ */
+ if (md_paddr + md_size < in_paddr + in_size) {
+ pr_warn("Entry covers multiple EFI memory map regions\n");
+ return false;
+ }
+
+ if (md->type != in->type) {
+ pr_warn("Entry type deviates from EFI memory map region type\n");
+ return false;
+ }
+
+ out->virt_addr = in_paddr + (md->virt_addr - md_paddr);
+
+ return true;
+ }
+
+ pr_warn("No matching entry found in the EFI memory map\n");
+ return false;
+}
+
+/*
+ * To be called after the EFI page tables have been populated. If a memory
+ * attributes table is available, its contents will be used to update the
+ * mappings with tightened permissions as described by the table.
+ * This requires the UEFI memory map to have already been populated with
+ * virtual addresses.
+ */
+int __init efi_memattr_apply_permissions(struct mm_struct *mm,
+ efi_memattr_perm_setter fn)
+{
+ efi_memory_attributes_table_t *tbl;
+ int i, ret;
+
+ if (tbl_size <= sizeof(*tbl))
+ return 0;
+
+ /*
+ * We need the EFI memory map to be setup so we can use it to
+ * lookup the virtual addresses of all entries in the of EFI
+ * Memory Attributes table. If it isn't available, this
+ * function should not be called.
+ */
+ if (WARN_ON(!efi_enabled(EFI_MEMMAP)))
+ return 0;
+
+ tbl = memremap(efi_mem_attr_table, tbl_size, MEMREMAP_WB);
+ if (!tbl) {
+ pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
+ efi_mem_attr_table);
+ return -ENOMEM;
+ }
+
+ if (efi_enabled(EFI_DBG))
+ pr_info("Processing EFI Memory Attributes table:\n");
+
+ for (i = ret = 0; ret == 0 && i < tbl->num_entries; i++) {
+ efi_memory_desc_t md;
+ unsigned long size;
+ bool valid;
+ char buf[64];
+
+ valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size,
+ &md);
+ size = md.num_pages << EFI_PAGE_SHIFT;
+ if (efi_enabled(EFI_DBG) || !valid)
+ pr_info("%s 0x%012llx-0x%012llx %s\n",
+ valid ? "" : "!", md.phys_addr,
+ md.phys_addr + size - 1,
+ efi_md_typeattr_format(buf, sizeof(buf), &md));
+
+ if (valid) {
+ ret = fn(mm, &md);
+ if (ret)
+ pr_err("Error updating mappings, skipping subsequent md's\n");
+ }
+ }
+ memunmap(tbl);
+ return ret;
+}
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
new file mode 100644
index 000000000..2ff1883dc
--- /dev/null
+++ b/drivers/firmware/efi/memmap.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common EFI memory map functions.
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <asm/early_ioremap.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+
+static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
+{
+ return memblock_phys_alloc(size, SMP_CACHE_BYTES);
+}
+
+static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
+{
+ unsigned int order = get_order(size);
+ struct page *p = alloc_pages(GFP_KERNEL, order);
+
+ if (!p)
+ return 0;
+
+ return PFN_PHYS(page_to_pfn(p));
+}
+
+void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
+{
+ if (flags & EFI_MEMMAP_MEMBLOCK) {
+ if (slab_is_available())
+ memblock_free_late(phys, size);
+ else
+ memblock_free(phys, size);
+ } else if (flags & EFI_MEMMAP_SLAB) {
+ struct page *p = pfn_to_page(PHYS_PFN(phys));
+ unsigned int order = get_order(size);
+
+ free_pages((unsigned long) page_address(p), order);
+ }
+}
+
+static void __init efi_memmap_free(void)
+{
+ __efi_memmap_free(efi.memmap.phys_map,
+ efi.memmap.desc_size * efi.memmap.nr_map,
+ efi.memmap.flags);
+}
+
+/**
+ * efi_memmap_alloc - Allocate memory for the EFI memory map
+ * @num_entries: Number of entries in the allocated map.
+ * @data: efi memmap installation parameters
+ *
+ * Depending on whether mm_init() has already been invoked or not,
+ * either memblock or "normal" page allocation is used.
+ *
+ * Returns the physical address of the allocated memory map on
+ * success, zero on failure.
+ */
+int __init efi_memmap_alloc(unsigned int num_entries,
+ struct efi_memory_map_data *data)
+{
+ /* Expect allocation parameters are zero initialized */
+ WARN_ON(data->phys_map || data->size);
+
+ data->size = num_entries * efi.memmap.desc_size;
+ data->desc_version = efi.memmap.desc_version;
+ data->desc_size = efi.memmap.desc_size;
+ data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK);
+ data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE;
+
+ if (slab_is_available()) {
+ data->flags |= EFI_MEMMAP_SLAB;
+ data->phys_map = __efi_memmap_alloc_late(data->size);
+ } else {
+ data->flags |= EFI_MEMMAP_MEMBLOCK;
+ data->phys_map = __efi_memmap_alloc_early(data->size);
+ }
+
+ if (!data->phys_map)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * __efi_memmap_init - Common code for mapping the EFI memory map
+ * @data: EFI memory map data
+ *
+ * This function takes care of figuring out which function to use to
+ * map the EFI memory map in efi.memmap based on how far into the boot
+ * we are.
+ *
+ * During bootup EFI_MEMMAP_LATE in data->flags should be clear since we
+ * only have access to the early_memremap*() functions as the vmalloc
+ * space isn't setup. Once the kernel is fully booted we can fallback
+ * to the more robust memremap*() API.
+ *
+ * Returns zero on success, a negative error code on failure.
+ */
+static int __init __efi_memmap_init(struct efi_memory_map_data *data)
+{
+ struct efi_memory_map map;
+ phys_addr_t phys_map;
+
+ if (efi_enabled(EFI_PARAVIRT))
+ return 0;
+
+ phys_map = data->phys_map;
+
+ if (data->flags & EFI_MEMMAP_LATE)
+ map.map = memremap(phys_map, data->size, MEMREMAP_WB);
+ else
+ map.map = early_memremap(phys_map, data->size);
+
+ if (!map.map) {
+ pr_err("Could not map the memory map!\n");
+ return -ENOMEM;
+ }
+
+ /* NOP if data->flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB) == 0 */
+ efi_memmap_free();
+
+ map.phys_map = data->phys_map;
+ map.nr_map = data->size / data->desc_size;
+ map.map_end = map.map + data->size;
+
+ map.desc_version = data->desc_version;
+ map.desc_size = data->desc_size;
+ map.flags = data->flags;
+
+ set_bit(EFI_MEMMAP, &efi.flags);
+
+ efi.memmap = map;
+
+ return 0;
+}
+
+/**
+ * efi_memmap_init_early - Map the EFI memory map data structure
+ * @data: EFI memory map data
+ *
+ * Use early_memremap() to map the passed in EFI memory map and assign
+ * it to efi.memmap.
+ */
+int __init efi_memmap_init_early(struct efi_memory_map_data *data)
+{
+ /* Cannot go backwards */
+ WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
+
+ data->flags = 0;
+ return __efi_memmap_init(data);
+}
+
+void __init efi_memmap_unmap(void)
+{
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
+ if (!(efi.memmap.flags & EFI_MEMMAP_LATE)) {
+ unsigned long size;
+
+ size = efi.memmap.desc_size * efi.memmap.nr_map;
+ early_memunmap(efi.memmap.map, size);
+ } else {
+ memunmap(efi.memmap.map);
+ }
+
+ efi.memmap.map = NULL;
+ clear_bit(EFI_MEMMAP, &efi.flags);
+}
+
+/**
+ * efi_memmap_init_late - Map efi.memmap with memremap()
+ * @phys_addr: Physical address of the new EFI memory map
+ * @size: Size in bytes of the new EFI memory map
+ *
+ * Setup a mapping of the EFI memory map using ioremap_cache(). This
+ * function should only be called once the vmalloc space has been
+ * setup and is therefore not suitable for calling during early EFI
+ * initialise, e.g. in efi_init(). Additionally, it expects
+ * efi_memmap_init_early() to have already been called.
+ *
+ * The reason there are two EFI memmap initialisation
+ * (efi_memmap_init_early() and this late version) is because the
+ * early EFI memmap should be explicitly unmapped once EFI
+ * initialisation is complete as the fixmap space used to map the EFI
+ * memmap (via early_memremap()) is a scarce resource.
+ *
+ * This late mapping is intended to persist for the duration of
+ * runtime so that things like efi_mem_desc_lookup() and
+ * efi_mem_attributes() always work.
+ *
+ * Returns zero on success, a negative error code on failure.
+ */
+int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
+{
+ struct efi_memory_map_data data = {
+ .phys_map = addr,
+ .size = size,
+ .flags = EFI_MEMMAP_LATE,
+ };
+
+ /* Did we forget to unmap the early EFI memmap? */
+ WARN_ON(efi.memmap.map);
+
+ /* Were we already called? */
+ WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
+
+ /*
+ * It makes no sense to allow callers to register different
+ * values for the following fields. Copy them out of the
+ * existing early EFI memmap.
+ */
+ data.desc_version = efi.memmap.desc_version;
+ data.desc_size = efi.memmap.desc_size;
+
+ return __efi_memmap_init(&data);
+}
+
+/**
+ * efi_memmap_install - Install a new EFI memory map in efi.memmap
+ * @ctx: map allocation parameters (address, size, flags)
+ *
+ * Unlike efi_memmap_init_*(), this function does not allow the caller
+ * to switch from early to late mappings. It simply uses the existing
+ * mapping function and installs the new memmap.
+ *
+ * Returns zero on success, a negative error code on failure.
+ */
+int __init efi_memmap_install(struct efi_memory_map_data *data)
+{
+ efi_memmap_unmap();
+
+ return __efi_memmap_init(data);
+}
+
+/**
+ * efi_memmap_split_count - Count number of additional EFI memmap entries
+ * @md: EFI memory descriptor to split
+ * @range: Address range (start, end) to split around
+ *
+ * Returns the number of additional EFI memmap entries required to
+ * accomodate @range.
+ */
+int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
+{
+ u64 m_start, m_end;
+ u64 start, end;
+ int count = 0;
+
+ start = md->phys_addr;
+ end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+
+ /* modifying range */
+ m_start = range->start;
+ m_end = range->end;
+
+ if (m_start <= start) {
+ /* split into 2 parts */
+ if (start < m_end && m_end < end)
+ count++;
+ }
+
+ if (start < m_start && m_start < end) {
+ /* split into 3 parts */
+ if (m_end < end)
+ count += 2;
+ /* split into 2 parts */
+ if (end <= m_end)
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * efi_memmap_insert - Insert a memory region in an EFI memmap
+ * @old_memmap: The existing EFI memory map structure
+ * @buf: Address of buffer to store new map
+ * @mem: Memory map entry to insert
+ *
+ * It is suggested that you call efi_memmap_split_count() first
+ * to see how large @buf needs to be.
+ */
+void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
+ struct efi_mem_range *mem)
+{
+ u64 m_start, m_end, m_attr;
+ efi_memory_desc_t *md;
+ u64 start, end;
+ void *old, *new;
+
+ /* modifying range */
+ m_start = mem->range.start;
+ m_end = mem->range.end;
+ m_attr = mem->attribute;
+
+ /*
+ * The EFI memory map deals with regions in EFI_PAGE_SIZE
+ * units. Ensure that the region described by 'mem' is aligned
+ * correctly.
+ */
+ if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
+ !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
+ WARN_ON(1);
+ return;
+ }
+
+ for (old = old_memmap->map, new = buf;
+ old < old_memmap->map_end;
+ old += old_memmap->desc_size, new += old_memmap->desc_size) {
+
+ /* copy original EFI memory descriptor */
+ memcpy(new, old, old_memmap->desc_size);
+ md = new;
+ start = md->phys_addr;
+ end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+
+ if (m_start <= start && end <= m_end)
+ md->attribute |= m_attr;
+
+ if (m_start <= start &&
+ (start < m_end && m_end < end)) {
+ /* first part */
+ md->attribute |= m_attr;
+ md->num_pages = (m_end - md->phys_addr + 1) >>
+ EFI_PAGE_SHIFT;
+ /* latter part */
+ new += old_memmap->desc_size;
+ memcpy(new, old, old_memmap->desc_size);
+ md = new;
+ md->phys_addr = m_end + 1;
+ md->num_pages = (end - md->phys_addr + 1) >>
+ EFI_PAGE_SHIFT;
+ }
+
+ if ((start < m_start && m_start < end) && m_end < end) {
+ /* first part */
+ md->num_pages = (m_start - md->phys_addr) >>
+ EFI_PAGE_SHIFT;
+ /* middle part */
+ new += old_memmap->desc_size;
+ memcpy(new, old, old_memmap->desc_size);
+ md = new;
+ md->attribute |= m_attr;
+ md->phys_addr = m_start;
+ md->num_pages = (m_end - m_start + 1) >>
+ EFI_PAGE_SHIFT;
+ /* last part */
+ new += old_memmap->desc_size;
+ memcpy(new, old, old_memmap->desc_size);
+ md = new;
+ md->phys_addr = m_end + 1;
+ md->num_pages = (end - m_end) >>
+ EFI_PAGE_SHIFT;
+ }
+
+ if ((start < m_start && m_start < end) &&
+ (end <= m_end)) {
+ /* first part */
+ md->num_pages = (m_start - md->phys_addr) >>
+ EFI_PAGE_SHIFT;
+ /* latter part */
+ new += old_memmap->desc_size;
+ memcpy(new, old, old_memmap->desc_size);
+ md = new;
+ md->phys_addr = m_start;
+ md->num_pages = (end - md->phys_addr + 1) >>
+ EFI_PAGE_SHIFT;
+ md->attribute |= m_attr;
+ }
+ }
+}
diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
new file mode 100644
index 000000000..38722d200
--- /dev/null
+++ b/drivers/firmware/efi/mokvar-table.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mokvar-table.c
+ *
+ * Copyright (c) 2020 Red Hat
+ * Author: Lenny Szubowicz <lszubowi@redhat.com>
+ *
+ * This module contains the kernel support for the Linux EFI Machine
+ * Owner Key (MOK) variable configuration table, which is identified by
+ * the LINUX_EFI_MOK_VARIABLE_TABLE_GUID.
+ *
+ * This EFI configuration table provides a more robust alternative to
+ * EFI volatile variables by which an EFI boot loader can pass the
+ * contents of the Machine Owner Key (MOK) certificate stores to the
+ * kernel during boot. If both the EFI MOK config table and corresponding
+ * EFI MOK variables are present, the table should be considered as
+ * more authoritative.
+ *
+ * This module includes code that validates and maps the EFI MOK table,
+ * if it's presence was detected very early in boot.
+ *
+ * Kernel interface routines are provided to walk through all the
+ * entries in the MOK config table or to search for a specific named
+ * entry.
+ *
+ * The contents of the individual named MOK config table entries are
+ * made available to user space via read-only sysfs binary files under:
+ *
+ * /sys/firmware/efi/mok-variables/
+ *
+ */
+#define pr_fmt(fmt) "mokvar: " fmt
+
+#include <linux/capability.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <asm/early_ioremap.h>
+
+/*
+ * The LINUX_EFI_MOK_VARIABLE_TABLE_GUID config table is a packed
+ * sequence of struct efi_mokvar_table_entry, one for each named
+ * MOK variable. The sequence is terminated by an entry with a
+ * completely NULL name and 0 data size.
+ *
+ * efi_mokvar_table_size is set to the computed size of the
+ * MOK config table by efi_mokvar_table_init(). This will be
+ * non-zero if and only if the table if present and has been
+ * validated by efi_mokvar_table_init().
+ */
+static size_t efi_mokvar_table_size;
+
+/*
+ * efi_mokvar_table_va is the kernel virtual address at which the
+ * EFI MOK config table has been mapped by efi_mokvar_sysfs_init().
+ */
+static struct efi_mokvar_table_entry *efi_mokvar_table_va;
+
+/*
+ * Each /sys/firmware/efi/mok-variables/ sysfs file is represented by
+ * an instance of struct efi_mokvar_sysfs_attr on efi_mokvar_sysfs_list.
+ * bin_attr.private points to the associated EFI MOK config table entry.
+ *
+ * This list is created during boot and then remains unchanged.
+ * So no synchronization is currently required to walk the list.
+ */
+struct efi_mokvar_sysfs_attr {
+ struct bin_attribute bin_attr;
+ struct list_head node;
+};
+
+static LIST_HEAD(efi_mokvar_sysfs_list);
+static struct kobject *mokvar_kobj;
+
+/*
+ * efi_mokvar_table_init() - Early boot validation of EFI MOK config table
+ *
+ * If present, validate and compute the size of the EFI MOK variable
+ * configuration table. This table may be provided by an EFI boot loader
+ * as an alternative to ordinary EFI variables, due to platform-dependent
+ * limitations. The memory occupied by this table is marked as reserved.
+ *
+ * This routine must be called before efi_free_boot_services() in order
+ * to guarantee that it can mark the table as reserved.
+ *
+ * Implicit inputs:
+ * efi.mokvar_table: Physical address of EFI MOK variable config table
+ * or special value that indicates no such table.
+ *
+ * Implicit outputs:
+ * efi_mokvar_table_size: Computed size of EFI MOK variable config table.
+ * The table is considered present and valid if this
+ * is non-zero.
+ */
+void __init efi_mokvar_table_init(void)
+{
+ efi_memory_desc_t md;
+ void *va = NULL;
+ unsigned long cur_offset = 0;
+ unsigned long offset_limit;
+ unsigned long map_size = 0;
+ unsigned long map_size_needed = 0;
+ unsigned long size;
+ struct efi_mokvar_table_entry *mokvar_entry;
+ int err;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
+ if (efi.mokvar_table == EFI_INVALID_TABLE_ADDR)
+ return;
+ /*
+ * The EFI MOK config table must fit within a single EFI memory
+ * descriptor range.
+ */
+ err = efi_mem_desc_lookup(efi.mokvar_table, &md);
+ if (err) {
+ pr_warn("EFI MOKvar config table is not within the EFI memory map\n");
+ return;
+ }
+
+ offset_limit = efi_mem_desc_end(&md) - efi.mokvar_table;
+
+ /*
+ * Validate the MOK config table. Since there is no table header
+ * from which we could get the total size of the MOK config table,
+ * we compute the total size as we validate each variably sized
+ * entry, remapping as necessary.
+ */
+ err = -EINVAL;
+ while (cur_offset + sizeof(*mokvar_entry) <= offset_limit) {
+ mokvar_entry = va + cur_offset;
+ map_size_needed = cur_offset + sizeof(*mokvar_entry);
+ if (map_size_needed > map_size) {
+ if (va)
+ early_memunmap(va, map_size);
+ /*
+ * Map a little more than the fixed size entry
+ * header, anticipating some data. It's safe to
+ * do so as long as we stay within current memory
+ * descriptor.
+ */
+ map_size = min(map_size_needed + 2*EFI_PAGE_SIZE,
+ offset_limit);
+ va = early_memremap(efi.mokvar_table, map_size);
+ if (!va) {
+ pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%lu.\n",
+ efi.mokvar_table, map_size);
+ return;
+ }
+ mokvar_entry = va + cur_offset;
+ }
+
+ /* Check for last sentinel entry */
+ if (mokvar_entry->name[0] == '\0') {
+ if (mokvar_entry->data_size != 0)
+ break;
+ err = 0;
+ break;
+ }
+
+ /* Sanity check that the name is null terminated */
+ size = strnlen(mokvar_entry->name,
+ sizeof(mokvar_entry->name));
+ if (size >= sizeof(mokvar_entry->name))
+ break;
+
+ /* Advance to the next entry */
+ cur_offset = map_size_needed + mokvar_entry->data_size;
+ }
+
+ if (va)
+ early_memunmap(va, map_size);
+ if (err) {
+ pr_err("EFI MOKvar config table is not valid\n");
+ return;
+ }
+
+ if (md.type == EFI_BOOT_SERVICES_DATA)
+ efi_mem_reserve(efi.mokvar_table, map_size_needed);
+
+ efi_mokvar_table_size = map_size_needed;
+}
+
+/*
+ * efi_mokvar_entry_next() - Get next entry in the EFI MOK config table
+ *
+ * mokvar_entry: Pointer to current EFI MOK config table entry
+ * or null. Null indicates get first entry.
+ * Passed by reference. This is updated to the
+ * same value as the return value.
+ *
+ * Returns: Pointer to next EFI MOK config table entry
+ * or null, if there are no more entries.
+ * Same value is returned in the mokvar_entry
+ * parameter.
+ *
+ * This routine depends on the EFI MOK config table being entirely
+ * mapped with it's starting virtual address in efi_mokvar_table_va.
+ */
+struct efi_mokvar_table_entry *efi_mokvar_entry_next(
+ struct efi_mokvar_table_entry **mokvar_entry)
+{
+ struct efi_mokvar_table_entry *mokvar_cur;
+ struct efi_mokvar_table_entry *mokvar_next;
+ size_t size_cur;
+
+ mokvar_cur = *mokvar_entry;
+ *mokvar_entry = NULL;
+
+ if (efi_mokvar_table_va == NULL)
+ return NULL;
+
+ if (mokvar_cur == NULL) {
+ mokvar_next = efi_mokvar_table_va;
+ } else {
+ if (mokvar_cur->name[0] == '\0')
+ return NULL;
+ size_cur = sizeof(*mokvar_cur) + mokvar_cur->data_size;
+ mokvar_next = (void *)mokvar_cur + size_cur;
+ }
+
+ if (mokvar_next->name[0] == '\0')
+ return NULL;
+
+ *mokvar_entry = mokvar_next;
+ return mokvar_next;
+}
+
+/*
+ * efi_mokvar_entry_find() - Find EFI MOK config entry by name
+ *
+ * name: Name of the entry to look for.
+ *
+ * Returns: Pointer to EFI MOK config table entry if found;
+ * null otherwise.
+ *
+ * This routine depends on the EFI MOK config table being entirely
+ * mapped with it's starting virtual address in efi_mokvar_table_va.
+ */
+struct efi_mokvar_table_entry *efi_mokvar_entry_find(const char *name)
+{
+ struct efi_mokvar_table_entry *mokvar_entry = NULL;
+
+ while (efi_mokvar_entry_next(&mokvar_entry)) {
+ if (!strncmp(name, mokvar_entry->name,
+ sizeof(mokvar_entry->name)))
+ return mokvar_entry;
+ }
+ return NULL;
+}
+
+/*
+ * efi_mokvar_sysfs_read() - sysfs binary file read routine
+ *
+ * Returns: Count of bytes read.
+ *
+ * Copy EFI MOK config table entry data for this mokvar sysfs binary file
+ * to the supplied buffer, starting at the specified offset into mokvar table
+ * entry data, for the specified count bytes. The copy is limited by the
+ * amount of data in this mokvar config table entry.
+ */
+static ssize_t efi_mokvar_sysfs_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct efi_mokvar_table_entry *mokvar_entry = bin_attr->private;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return 0;
+
+ if (off >= mokvar_entry->data_size)
+ return 0;
+ if (count > mokvar_entry->data_size - off)
+ count = mokvar_entry->data_size - off;
+
+ memcpy(buf, mokvar_entry->data + off, count);
+ return count;
+}
+
+/*
+ * efi_mokvar_sysfs_init() - Map EFI MOK config table and create sysfs
+ *
+ * Map the EFI MOK variable config table for run-time use by the kernel
+ * and create the sysfs entries in /sys/firmware/efi/mok-variables/
+ *
+ * This routine just returns if a valid EFI MOK variable config table
+ * was not found earlier during boot.
+ *
+ * This routine must be called during a "middle" initcall phase, i.e.
+ * after efi_mokvar_table_init() but before UEFI certs are loaded
+ * during late init.
+ *
+ * Implicit inputs:
+ * efi.mokvar_table: Physical address of EFI MOK variable config table
+ * or special value that indicates no such table.
+ *
+ * efi_mokvar_table_size: Computed size of EFI MOK variable config table.
+ * The table is considered present and valid if this
+ * is non-zero.
+ *
+ * Implicit outputs:
+ * efi_mokvar_table_va: Start virtual address of the EFI MOK config table.
+ */
+static int __init efi_mokvar_sysfs_init(void)
+{
+ void *config_va;
+ struct efi_mokvar_table_entry *mokvar_entry = NULL;
+ struct efi_mokvar_sysfs_attr *mokvar_sysfs = NULL;
+ int err = 0;
+
+ if (efi_mokvar_table_size == 0)
+ return -ENOENT;
+
+ config_va = memremap(efi.mokvar_table, efi_mokvar_table_size,
+ MEMREMAP_WB);
+ if (!config_va) {
+ pr_err("Failed to map EFI MOKvar config table\n");
+ return -ENOMEM;
+ }
+ efi_mokvar_table_va = config_va;
+
+ mokvar_kobj = kobject_create_and_add("mok-variables", efi_kobj);
+ if (!mokvar_kobj) {
+ pr_err("Failed to create EFI mok-variables sysfs entry\n");
+ return -ENOMEM;
+ }
+
+ while (efi_mokvar_entry_next(&mokvar_entry)) {
+ mokvar_sysfs = kzalloc(sizeof(*mokvar_sysfs), GFP_KERNEL);
+ if (!mokvar_sysfs) {
+ err = -ENOMEM;
+ break;
+ }
+
+ sysfs_bin_attr_init(&mokvar_sysfs->bin_attr);
+ mokvar_sysfs->bin_attr.private = mokvar_entry;
+ mokvar_sysfs->bin_attr.attr.name = mokvar_entry->name;
+ mokvar_sysfs->bin_attr.attr.mode = 0400;
+ mokvar_sysfs->bin_attr.size = mokvar_entry->data_size;
+ mokvar_sysfs->bin_attr.read = efi_mokvar_sysfs_read;
+
+ err = sysfs_create_bin_file(mokvar_kobj,
+ &mokvar_sysfs->bin_attr);
+ if (err)
+ break;
+
+ list_add_tail(&mokvar_sysfs->node, &efi_mokvar_sysfs_list);
+ }
+
+ if (err) {
+ pr_err("Failed to create some EFI mok-variables sysfs entries\n");
+ kfree(mokvar_sysfs);
+ }
+ return err;
+}
+device_initcall(efi_mokvar_sysfs_init);
diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c
new file mode 100644
index 000000000..de1a9a1f9
--- /dev/null
+++ b/drivers/firmware/efi/rci2-table.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Export Runtime Configuration Interface Table Version 2 (RCI2)
+ * to sysfs
+ *
+ * Copyright (C) 2019 Dell Inc
+ * by Narendra K <Narendra.K@dell.com>
+ *
+ * System firmware advertises the address of the RCI2 Table via
+ * an EFI Configuration Table entry. This code retrieves the RCI2
+ * table from the address and exports it to sysfs as a binary
+ * attribute 'rci2' under /sys/firmware/efi/tables directory.
+ */
+
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/efi.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+#define RCI_SIGNATURE "_RC_"
+
+struct rci2_table_global_hdr {
+ u16 type;
+ u16 resvd0;
+ u16 hdr_len;
+ u8 rci2_sig[4];
+ u16 resvd1;
+ u32 resvd2;
+ u32 resvd3;
+ u8 major_rev;
+ u8 minor_rev;
+ u16 num_of_structs;
+ u32 rci2_len;
+ u16 rci2_chksum;
+} __packed;
+
+static u8 *rci2_base;
+static u32 rci2_table_len;
+unsigned long rci2_table_phys __ro_after_init = EFI_INVALID_TABLE_ADDR;
+
+static ssize_t raw_table_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count)
+{
+ memcpy(buf, attr->private + pos, count);
+ return count;
+}
+
+static BIN_ATTR(rci2, S_IRUSR, raw_table_read, NULL, 0);
+
+static u16 checksum(void)
+{
+ u8 len_is_odd = rci2_table_len % 2;
+ u32 chksum_len = rci2_table_len;
+ u16 *base = (u16 *)rci2_base;
+ u8 buf[2] = {0};
+ u32 offset = 0;
+ u16 chksum = 0;
+
+ if (len_is_odd)
+ chksum_len -= 1;
+
+ while (offset < chksum_len) {
+ chksum += *base;
+ offset += 2;
+ base++;
+ }
+
+ if (len_is_odd) {
+ buf[0] = *(u8 *)base;
+ chksum += *(u16 *)(buf);
+ }
+
+ return chksum;
+}
+
+static int __init efi_rci2_sysfs_init(void)
+{
+ struct kobject *tables_kobj;
+ int ret = -ENOMEM;
+
+ if (rci2_table_phys == EFI_INVALID_TABLE_ADDR)
+ return 0;
+
+ rci2_base = memremap(rci2_table_phys,
+ sizeof(struct rci2_table_global_hdr),
+ MEMREMAP_WB);
+ if (!rci2_base) {
+ pr_debug("RCI2 table init failed - could not map RCI2 table\n");
+ goto err;
+ }
+
+ if (strncmp(rci2_base +
+ offsetof(struct rci2_table_global_hdr, rci2_sig),
+ RCI_SIGNATURE, 4)) {
+ pr_debug("RCI2 table init failed - incorrect signature\n");
+ ret = -ENODEV;
+ goto err_unmap;
+ }
+
+ rci2_table_len = *(u32 *)(rci2_base +
+ offsetof(struct rci2_table_global_hdr,
+ rci2_len));
+
+ memunmap(rci2_base);
+
+ if (!rci2_table_len) {
+ pr_debug("RCI2 table init failed - incorrect table length\n");
+ goto err;
+ }
+
+ rci2_base = memremap(rci2_table_phys, rci2_table_len, MEMREMAP_WB);
+ if (!rci2_base) {
+ pr_debug("RCI2 table - could not map RCI2 table\n");
+ goto err;
+ }
+
+ if (checksum() != 0) {
+ pr_debug("RCI2 table - incorrect checksum\n");
+ ret = -ENODEV;
+ goto err_unmap;
+ }
+
+ tables_kobj = kobject_create_and_add("tables", efi_kobj);
+ if (!tables_kobj) {
+ pr_debug("RCI2 table - tables_kobj creation failed\n");
+ goto err_unmap;
+ }
+
+ bin_attr_rci2.size = rci2_table_len;
+ bin_attr_rci2.private = rci2_base;
+ ret = sysfs_create_bin_file(tables_kobj, &bin_attr_rci2);
+ if (ret != 0) {
+ pr_debug("RCI2 table - rci2 sysfs bin file creation failed\n");
+ kobject_del(tables_kobj);
+ kobject_put(tables_kobj);
+ goto err_unmap;
+ }
+
+ return 0;
+
+ err_unmap:
+ memunmap(rci2_base);
+ err:
+ pr_debug("RCI2 table - sysfs initialization failed\n");
+ return ret;
+}
+late_initcall(efi_rci2_sysfs_init);
diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c
new file mode 100644
index 000000000..73089a24f
--- /dev/null
+++ b/drivers/firmware/efi/reboot.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014 Intel Corporation; author Matt Fleming
+ * Copyright (c) 2014 Red Hat, Inc., Mark Salter <msalter@redhat.com>
+ */
+#include <linux/efi.h>
+#include <linux/reboot.h>
+
+static void (*orig_pm_power_off)(void);
+
+int efi_reboot_quirk_mode = -1;
+
+void efi_reboot(enum reboot_mode reboot_mode, const char *__unused)
+{
+ const char *str[] = { "cold", "warm", "shutdown", "platform" };
+ int efi_mode, cap_reset_mode;
+
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_RESET_SYSTEM))
+ return;
+
+ switch (reboot_mode) {
+ case REBOOT_WARM:
+ case REBOOT_SOFT:
+ efi_mode = EFI_RESET_WARM;
+ break;
+ default:
+ efi_mode = EFI_RESET_COLD;
+ break;
+ }
+
+ /*
+ * If a quirk forced an EFI reset mode, always use that.
+ */
+ if (efi_reboot_quirk_mode != -1)
+ efi_mode = efi_reboot_quirk_mode;
+
+ if (efi_capsule_pending(&cap_reset_mode)) {
+ if (efi_mode != cap_reset_mode)
+ printk(KERN_CRIT "efi: %s reset requested but pending "
+ "capsule update requires %s reset... Performing "
+ "%s reset.\n", str[efi_mode], str[cap_reset_mode],
+ str[cap_reset_mode]);
+ efi_mode = cap_reset_mode;
+ }
+
+ efi.reset_system(efi_mode, EFI_SUCCESS, 0, NULL);
+}
+
+bool __weak efi_poweroff_required(void)
+{
+ return false;
+}
+
+static void efi_power_off(void)
+{
+ efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
+ /*
+ * The above call should not return, if it does fall back to
+ * the original power off method (typically ACPI poweroff).
+ */
+ if (orig_pm_power_off)
+ orig_pm_power_off();
+}
+
+static int __init efi_shutdown_init(void)
+{
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_RESET_SYSTEM))
+ return -ENODEV;
+
+ if (efi_poweroff_required()) {
+ orig_pm_power_off = pm_power_off;
+ pm_power_off = efi_power_off;
+ }
+
+ return 0;
+}
+late_initcall(efi_shutdown_init);
diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c
new file mode 100644
index 000000000..d28e715d2
--- /dev/null
+++ b/drivers/firmware/efi/riscv-runtime.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Extensible Firmware Interface
+ *
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ * Adapted from drivers/firmware/efi/arm-runtime.c
+ *
+ */
+
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+#include <linux/mm_types.h>
+#include <linux/preempt.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/pgtable.h>
+
+#include <asm/cacheflush.h>
+#include <asm/efi.h>
+#include <asm/mmu.h>
+#include <asm/pgalloc.h>
+
+static bool __init efi_virtmap_init(void)
+{
+ efi_memory_desc_t *md;
+
+ efi_mm.pgd = pgd_alloc(&efi_mm);
+ mm_init_cpumask(&efi_mm);
+ init_new_context(NULL, &efi_mm);
+
+ for_each_efi_memory_desc(md) {
+ phys_addr_t phys = md->phys_addr;
+ int ret;
+
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0)
+ return false;
+
+ ret = efi_create_mapping(&efi_mm, md);
+ if (ret) {
+ pr_warn(" EFI remap %pa: failed to create mapping (%d)\n",
+ &phys, ret);
+ return false;
+ }
+ }
+
+ if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions))
+ return false;
+
+ return true;
+}
+
+/*
+ * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
+ * non-early mapping of the UEFI system table and virtual mappings for all
+ * EFI_MEMORY_RUNTIME regions.
+ */
+static int __init riscv_enable_runtime_services(void)
+{
+ u64 mapsize;
+
+ if (!efi_enabled(EFI_BOOT)) {
+ pr_info("EFI services will not be available.\n");
+ return 0;
+ }
+
+ efi_memmap_unmap();
+
+ mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
+
+ if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
+ pr_err("Failed to remap EFI memory map\n");
+ return 0;
+ }
+
+ if (efi_soft_reserve_enabled()) {
+ efi_memory_desc_t *md;
+
+ for_each_efi_memory_desc(md) {
+ int md_size = md->num_pages << EFI_PAGE_SHIFT;
+ struct resource *res;
+
+ if (!(md->attribute & EFI_MEMORY_SP))
+ continue;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (WARN_ON(!res))
+ break;
+
+ res->start = md->phys_addr;
+ res->end = md->phys_addr + md_size - 1;
+ res->name = "Soft Reserved";
+ res->flags = IORESOURCE_MEM;
+ res->desc = IORES_DESC_SOFT_RESERVED;
+
+ insert_resource(&iomem_resource, res);
+ }
+ }
+
+ if (efi_runtime_disabled()) {
+ pr_info("EFI runtime services will be disabled.\n");
+ return 0;
+ }
+
+ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
+ pr_info("EFI runtime services access via paravirt.\n");
+ return 0;
+ }
+
+ pr_info("Remapping and enabling EFI services.\n");
+
+ if (!efi_virtmap_init()) {
+ pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
+ return -ENOMEM;
+ }
+
+ /* Set up runtime services function pointers */
+ efi_native_runtime_setup();
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
+ return 0;
+}
+early_initcall(riscv_enable_runtime_services);
+
+void efi_virtmap_load(void)
+{
+ preempt_disable();
+ switch_mm(current->active_mm, &efi_mm, NULL);
+}
+
+void efi_virtmap_unload(void)
+{
+ switch_mm(&efi_mm, current->active_mm, NULL);
+ preempt_enable();
+}
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
new file mode 100644
index 000000000..ad9ddefc9
--- /dev/null
+++ b/drivers/firmware/efi/runtime-map.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/drivers/efi/runtime-map.c
+ * Copyright (C) 2013 Red Hat, Inc., Dave Young <dyoung@redhat.com>
+ */
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/efi.h>
+#include <linux/slab.h>
+
+#include <asm/setup.h>
+
+struct efi_runtime_map_entry {
+ efi_memory_desc_t md;
+ struct kobject kobj; /* kobject for each entry */
+};
+
+static struct efi_runtime_map_entry **map_entries;
+
+struct map_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct efi_runtime_map_entry *entry, char *buf);
+};
+
+static inline struct map_attribute *to_map_attr(struct attribute *attr)
+{
+ return container_of(attr, struct map_attribute, attr);
+}
+
+static ssize_t type_show(struct efi_runtime_map_entry *entry, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type);
+}
+
+#define EFI_RUNTIME_FIELD(var) entry->md.var
+
+#define EFI_RUNTIME_U64_ATTR_SHOW(name) \
+static ssize_t name##_show(struct efi_runtime_map_entry *entry, char *buf) \
+{ \
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n", EFI_RUNTIME_FIELD(name)); \
+}
+
+EFI_RUNTIME_U64_ATTR_SHOW(phys_addr);
+EFI_RUNTIME_U64_ATTR_SHOW(virt_addr);
+EFI_RUNTIME_U64_ATTR_SHOW(num_pages);
+EFI_RUNTIME_U64_ATTR_SHOW(attribute);
+
+static inline struct efi_runtime_map_entry *to_map_entry(struct kobject *kobj)
+{
+ return container_of(kobj, struct efi_runtime_map_entry, kobj);
+}
+
+static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct efi_runtime_map_entry *entry = to_map_entry(kobj);
+ struct map_attribute *map_attr = to_map_attr(attr);
+
+ return map_attr->show(entry, buf);
+}
+
+static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400);
+static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400);
+static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400);
+static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400);
+static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400);
+
+/*
+ * These are default attributes that are added for every memmap entry.
+ */
+static struct attribute *def_attrs[] = {
+ &map_type_attr.attr,
+ &map_phys_addr_attr.attr,
+ &map_virt_addr_attr.attr,
+ &map_num_pages_attr.attr,
+ &map_attribute_attr.attr,
+ NULL
+};
+
+static const struct sysfs_ops map_attr_ops = {
+ .show = map_attr_show,
+};
+
+static void map_release(struct kobject *kobj)
+{
+ struct efi_runtime_map_entry *entry;
+
+ entry = to_map_entry(kobj);
+ kfree(entry);
+}
+
+static struct kobj_type __refdata map_ktype = {
+ .sysfs_ops = &map_attr_ops,
+ .default_attrs = def_attrs,
+ .release = map_release,
+};
+
+static struct kset *map_kset;
+
+static struct efi_runtime_map_entry *
+add_sysfs_runtime_map_entry(struct kobject *kobj, int nr,
+ efi_memory_desc_t *md)
+{
+ int ret;
+ struct efi_runtime_map_entry *entry;
+
+ if (!map_kset) {
+ map_kset = kset_create_and_add("runtime-map", NULL, kobj);
+ if (!map_kset)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ kset_unregister(map_kset);
+ map_kset = NULL;
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memcpy(&entry->md, md, sizeof(efi_memory_desc_t));
+
+ kobject_init(&entry->kobj, &map_ktype);
+ entry->kobj.kset = map_kset;
+ ret = kobject_add(&entry->kobj, NULL, "%d", nr);
+ if (ret) {
+ kobject_put(&entry->kobj);
+ kset_unregister(map_kset);
+ map_kset = NULL;
+ return ERR_PTR(ret);
+ }
+
+ return entry;
+}
+
+int efi_get_runtime_map_size(void)
+{
+ return efi.memmap.nr_map * efi.memmap.desc_size;
+}
+
+int efi_get_runtime_map_desc_size(void)
+{
+ return efi.memmap.desc_size;
+}
+
+int efi_runtime_map_copy(void *buf, size_t bufsz)
+{
+ size_t sz = efi_get_runtime_map_size();
+
+ if (sz > bufsz)
+ sz = bufsz;
+
+ memcpy(buf, efi.memmap.map, sz);
+ return 0;
+}
+
+int __init efi_runtime_map_init(struct kobject *efi_kobj)
+{
+ int i, j, ret = 0;
+ struct efi_runtime_map_entry *entry;
+ efi_memory_desc_t *md;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return 0;
+
+ map_entries = kcalloc(efi.memmap.nr_map, sizeof(entry), GFP_KERNEL);
+ if (!map_entries) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ i = 0;
+ for_each_efi_memory_desc(md) {
+ entry = add_sysfs_runtime_map_entry(efi_kobj, i, md);
+ if (IS_ERR(entry)) {
+ ret = PTR_ERR(entry);
+ goto out_add_entry;
+ }
+ *(map_entries + i++) = entry;
+ }
+
+ return 0;
+out_add_entry:
+ for (j = i - 1; j >= 0; j--) {
+ entry = *(map_entries + j);
+ kobject_put(&entry->kobj);
+ }
+out:
+ return ret;
+}
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
new file mode 100644
index 000000000..60075e0e4
--- /dev/null
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * runtime-wrappers.c - Runtime Services function call wrappers
+ *
+ * Implementation summary:
+ * -----------------------
+ * 1. When user/kernel thread requests to execute efi_runtime_service(),
+ * enqueue work to efi_rts_wq.
+ * 2. Caller thread waits for completion until the work is finished
+ * because it's dependent on the return status and execution of
+ * efi_runtime_service().
+ * For instance, get_variable() and get_next_variable().
+ *
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * Split off from arch/x86/platform/efi/efi.c
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 1999-2002 Hewlett-Packard Co.
+ * Copyright (C) 2005-2008 Intel Co.
+ * Copyright (C) 2013 SuSE Labs
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/bug.h>
+#include <linux/efi.h>
+#include <linux/irqflags.h>
+#include <linux/mutex.h>
+#include <linux/semaphore.h>
+#include <linux/stringify.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
+#include <asm/efi.h>
+
+/*
+ * Wrap around the new efi_call_virt_generic() macros so that the
+ * code doesn't get too cluttered:
+ */
+#define efi_call_virt(f, args...) \
+ efi_call_virt_pointer(efi.runtime, f, args)
+#define __efi_call_virt(f, args...) \
+ __efi_call_virt_pointer(efi.runtime, f, args)
+
+struct efi_runtime_work efi_rts_work;
+
+/*
+ * efi_queue_work: Queue efi_runtime_service() and wait until it's done
+ * @rts: efi_runtime_service() function identifier
+ * @rts_arg<1-5>: efi_runtime_service() function arguments
+ *
+ * Accesses to efi_runtime_services() are serialized by a binary
+ * semaphore (efi_runtime_lock) and caller waits until the work is
+ * finished, hence _only_ one work is queued at a time and the caller
+ * thread waits for completion.
+ */
+#define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5) \
+({ \
+ efi_rts_work.status = EFI_ABORTED; \
+ \
+ if (!efi_enabled(EFI_RUNTIME_SERVICES)) { \
+ pr_warn_once("EFI Runtime Services are disabled!\n"); \
+ efi_rts_work.status = EFI_DEVICE_ERROR; \
+ goto exit; \
+ } \
+ \
+ init_completion(&efi_rts_work.efi_rts_comp); \
+ INIT_WORK(&efi_rts_work.work, efi_call_rts); \
+ efi_rts_work.arg1 = _arg1; \
+ efi_rts_work.arg2 = _arg2; \
+ efi_rts_work.arg3 = _arg3; \
+ efi_rts_work.arg4 = _arg4; \
+ efi_rts_work.arg5 = _arg5; \
+ efi_rts_work.efi_rts_id = _rts; \
+ \
+ /* \
+ * queue_work() returns 0 if work was already on queue, \
+ * _ideally_ this should never happen. \
+ */ \
+ if (queue_work(efi_rts_wq, &efi_rts_work.work)) \
+ wait_for_completion(&efi_rts_work.efi_rts_comp); \
+ else \
+ pr_err("Failed to queue work to efi_rts_wq.\n"); \
+ \
+exit: \
+ efi_rts_work.efi_rts_id = EFI_NONE; \
+ efi_rts_work.status; \
+})
+
+#ifndef arch_efi_save_flags
+#define arch_efi_save_flags(state_flags) local_save_flags(state_flags)
+#define arch_efi_restore_flags(state_flags) local_irq_restore(state_flags)
+#endif
+
+unsigned long efi_call_virt_save_flags(void)
+{
+ unsigned long flags;
+
+ arch_efi_save_flags(flags);
+ return flags;
+}
+
+void efi_call_virt_check_flags(unsigned long flags, const char *call)
+{
+ unsigned long cur_flags, mismatch;
+
+ cur_flags = efi_call_virt_save_flags();
+
+ mismatch = flags ^ cur_flags;
+ if (!WARN_ON_ONCE(mismatch & ARCH_EFI_IRQ_FLAGS_MASK))
+ return;
+
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_NOW_UNRELIABLE);
+ pr_err_ratelimited(FW_BUG "IRQ flags corrupted (0x%08lx=>0x%08lx) by EFI %s\n",
+ flags, cur_flags, call);
+ arch_efi_restore_flags(flags);
+}
+
+/*
+ * According to section 7.1 of the UEFI spec, Runtime Services are not fully
+ * reentrant, and there are particular combinations of calls that need to be
+ * serialized. (source: UEFI Specification v2.4A)
+ *
+ * Table 31. Rules for Reentry Into Runtime Services
+ * +------------------------------------+-------------------------------+
+ * | If previous call is busy in | Forbidden to call |
+ * +------------------------------------+-------------------------------+
+ * | Any | SetVirtualAddressMap() |
+ * +------------------------------------+-------------------------------+
+ * | ConvertPointer() | ConvertPointer() |
+ * +------------------------------------+-------------------------------+
+ * | SetVariable() | ResetSystem() |
+ * | UpdateCapsule() | |
+ * | SetTime() | |
+ * | SetWakeupTime() | |
+ * | GetNextHighMonotonicCount() | |
+ * +------------------------------------+-------------------------------+
+ * | GetVariable() | GetVariable() |
+ * | GetNextVariableName() | GetNextVariableName() |
+ * | SetVariable() | SetVariable() |
+ * | QueryVariableInfo() | QueryVariableInfo() |
+ * | UpdateCapsule() | UpdateCapsule() |
+ * | QueryCapsuleCapabilities() | QueryCapsuleCapabilities() |
+ * | GetNextHighMonotonicCount() | GetNextHighMonotonicCount() |
+ * +------------------------------------+-------------------------------+
+ * | GetTime() | GetTime() |
+ * | SetTime() | SetTime() |
+ * | GetWakeupTime() | GetWakeupTime() |
+ * | SetWakeupTime() | SetWakeupTime() |
+ * +------------------------------------+-------------------------------+
+ *
+ * Due to the fact that the EFI pstore may write to the variable store in
+ * interrupt context, we need to use a lock for at least the groups that
+ * contain SetVariable() and QueryVariableInfo(). That leaves little else, as
+ * none of the remaining functions are actually ever called at runtime.
+ * So let's just use a single lock to serialize all Runtime Services calls.
+ */
+static DEFINE_SEMAPHORE(efi_runtime_lock);
+
+/*
+ * Expose the EFI runtime lock to the UV platform
+ */
+#ifdef CONFIG_X86_UV
+extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
+#endif
+
+/*
+ * Calls the appropriate efi_runtime_service() with the appropriate
+ * arguments.
+ *
+ * Semantics followed by efi_call_rts() to understand efi_runtime_work:
+ * 1. If argument was a pointer, recast it from void pointer to original
+ * pointer type.
+ * 2. If argument was a value, recast it from void pointer to original
+ * pointer type and dereference it.
+ */
+static void efi_call_rts(struct work_struct *work)
+{
+ void *arg1, *arg2, *arg3, *arg4, *arg5;
+ efi_status_t status = EFI_NOT_FOUND;
+
+ arg1 = efi_rts_work.arg1;
+ arg2 = efi_rts_work.arg2;
+ arg3 = efi_rts_work.arg3;
+ arg4 = efi_rts_work.arg4;
+ arg5 = efi_rts_work.arg5;
+
+ switch (efi_rts_work.efi_rts_id) {
+ case EFI_GET_TIME:
+ status = efi_call_virt(get_time, (efi_time_t *)arg1,
+ (efi_time_cap_t *)arg2);
+ break;
+ case EFI_SET_TIME:
+ status = efi_call_virt(set_time, (efi_time_t *)arg1);
+ break;
+ case EFI_GET_WAKEUP_TIME:
+ status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1,
+ (efi_bool_t *)arg2, (efi_time_t *)arg3);
+ break;
+ case EFI_SET_WAKEUP_TIME:
+ status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1,
+ (efi_time_t *)arg2);
+ break;
+ case EFI_GET_VARIABLE:
+ status = efi_call_virt(get_variable, (efi_char16_t *)arg1,
+ (efi_guid_t *)arg2, (u32 *)arg3,
+ (unsigned long *)arg4, (void *)arg5);
+ break;
+ case EFI_GET_NEXT_VARIABLE:
+ status = efi_call_virt(get_next_variable, (unsigned long *)arg1,
+ (efi_char16_t *)arg2,
+ (efi_guid_t *)arg3);
+ break;
+ case EFI_SET_VARIABLE:
+ status = efi_call_virt(set_variable, (efi_char16_t *)arg1,
+ (efi_guid_t *)arg2, *(u32 *)arg3,
+ *(unsigned long *)arg4, (void *)arg5);
+ break;
+ case EFI_QUERY_VARIABLE_INFO:
+ status = efi_call_virt(query_variable_info, *(u32 *)arg1,
+ (u64 *)arg2, (u64 *)arg3, (u64 *)arg4);
+ break;
+ case EFI_GET_NEXT_HIGH_MONO_COUNT:
+ status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1);
+ break;
+ case EFI_UPDATE_CAPSULE:
+ status = efi_call_virt(update_capsule,
+ (efi_capsule_header_t **)arg1,
+ *(unsigned long *)arg2,
+ *(unsigned long *)arg3);
+ break;
+ case EFI_QUERY_CAPSULE_CAPS:
+ status = efi_call_virt(query_capsule_caps,
+ (efi_capsule_header_t **)arg1,
+ *(unsigned long *)arg2, (u64 *)arg3,
+ (int *)arg4);
+ break;
+ default:
+ /*
+ * Ideally, we should never reach here because a caller of this
+ * function should have put the right efi_runtime_service()
+ * function identifier into efi_rts_work->efi_rts_id
+ */
+ pr_err("Requested executing invalid EFI Runtime Service.\n");
+ }
+ efi_rts_work.status = status;
+ complete(&efi_rts_work.efi_rts_comp);
+}
+
+static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(EFI_GET_TIME, tm, tc, NULL, NULL, NULL);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t virt_efi_set_time(efi_time_t *tm)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(EFI_SET_TIME, tm, NULL, NULL, NULL, NULL);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
+ efi_bool_t *pending,
+ efi_time_t *tm)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(EFI_GET_WAKEUP_TIME, enabled, pending, tm, NULL,
+ NULL);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(EFI_SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
+ NULL);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t virt_efi_get_variable(efi_char16_t *name,
+ efi_guid_t *vendor,
+ u32 *attr,
+ unsigned long *data_size,
+ void *data)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(EFI_GET_VARIABLE, name, vendor, attr, data_size,
+ data);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name,
+ efi_guid_t *vendor)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(EFI_GET_NEXT_VARIABLE, name_size, name, vendor,
+ NULL, NULL);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t virt_efi_set_variable(efi_char16_t *name,
+ efi_guid_t *vendor,
+ u32 attr,
+ unsigned long data_size,
+ void *data)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(EFI_SET_VARIABLE, name, vendor, &attr, &data_size,
+ data);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t
+virt_efi_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data)
+{
+ efi_status_t status;
+
+ if (down_trylock(&efi_runtime_lock))
+ return EFI_NOT_READY;
+
+ status = efi_call_virt(set_variable, name, vendor, attr, data_size,
+ data);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+
+static efi_status_t virt_efi_query_variable_info(u32 attr,
+ u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size)
+{
+ efi_status_t status;
+
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(EFI_QUERY_VARIABLE_INFO, &attr, storage_space,
+ remaining_space, max_variable_size, NULL);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t
+virt_efi_query_variable_info_nonblocking(u32 attr,
+ u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size)
+{
+ efi_status_t status;
+
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ if (down_trylock(&efi_runtime_lock))
+ return EFI_NOT_READY;
+
+ status = efi_call_virt(query_variable_info, attr, storage_space,
+ remaining_space, max_variable_size);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(EFI_GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
+ NULL, NULL);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static void virt_efi_reset_system(int reset_type,
+ efi_status_t status,
+ unsigned long data_size,
+ efi_char16_t *data)
+{
+ if (down_trylock(&efi_runtime_lock)) {
+ pr_warn("failed to invoke the reset_system() runtime service:\n"
+ "could not get exclusive access to the firmware\n");
+ return;
+ }
+ efi_rts_work.efi_rts_id = EFI_RESET_SYSTEM;
+ __efi_call_virt(reset_system, reset_type, status, data_size, data);
+ up(&efi_runtime_lock);
+}
+
+static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
+ unsigned long count,
+ unsigned long sg_list)
+{
+ efi_status_t status;
+
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(EFI_UPDATE_CAPSULE, capsules, &count, &sg_list,
+ NULL, NULL);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
+ unsigned long count,
+ u64 *max_size,
+ int *reset_type)
+{
+ efi_status_t status;
+
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(EFI_QUERY_CAPSULE_CAPS, capsules, &count,
+ max_size, reset_type, NULL);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+void efi_native_runtime_setup(void)
+{
+ efi.get_time = virt_efi_get_time;
+ efi.set_time = virt_efi_set_time;
+ efi.get_wakeup_time = virt_efi_get_wakeup_time;
+ efi.set_wakeup_time = virt_efi_set_wakeup_time;
+ efi.get_variable = virt_efi_get_variable;
+ efi.get_next_variable = virt_efi_get_next_variable;
+ efi.set_variable = virt_efi_set_variable;
+ efi.set_variable_nonblocking = virt_efi_set_variable_nonblocking;
+ efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
+ efi.reset_system = virt_efi_reset_system;
+ efi.query_variable_info = virt_efi_query_variable_info;
+ efi.query_variable_info_nonblocking = virt_efi_query_variable_info_nonblocking;
+ efi.update_capsule = virt_efi_update_capsule;
+ efi.query_capsule_caps = virt_efi_query_capsule_caps;
+}
diff --git a/drivers/firmware/efi/test/Makefile b/drivers/firmware/efi/test/Makefile
new file mode 100644
index 000000000..419708855
--- /dev/null
+++ b/drivers/firmware/efi/test/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_EFI_TEST) += efi_test.o
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
new file mode 100644
index 000000000..ddf9eae39
--- /dev/null
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -0,0 +1,766 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * EFI Test Driver for Runtime Services
+ *
+ * Copyright(C) 2012-2016 Canonical Ltd.
+ *
+ * This driver exports EFI runtime services interfaces into userspace, which
+ * allow to use and test UEFI runtime services provided by firmware.
+ *
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/efi.h>
+#include <linux/security.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "efi_test.h"
+
+MODULE_AUTHOR("Ivan Hu <ivan.hu@canonical.com>");
+MODULE_DESCRIPTION("EFI Test Driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Count the bytes in 'str', including the terminating NULL.
+ *
+ * Note this function returns the number of *bytes*, not the number of
+ * ucs2 characters.
+ */
+static inline size_t user_ucs2_strsize(efi_char16_t __user *str)
+{
+ efi_char16_t *s = str, c;
+ size_t len;
+
+ if (!str)
+ return 0;
+
+ /* Include terminating NULL */
+ len = sizeof(efi_char16_t);
+
+ if (get_user(c, s++)) {
+ /* Can't read userspace memory for size */
+ return 0;
+ }
+
+ while (c != 0) {
+ if (get_user(c, s++)) {
+ /* Can't read userspace memory for size */
+ return 0;
+ }
+ len += sizeof(efi_char16_t);
+ }
+ return len;
+}
+
+/*
+ * Allocate a buffer and copy a ucs2 string from user space into it.
+ */
+static inline int
+copy_ucs2_from_user_len(efi_char16_t **dst, efi_char16_t __user *src,
+ size_t len)
+{
+ efi_char16_t *buf;
+
+ if (!src) {
+ *dst = NULL;
+ return 0;
+ }
+
+ buf = memdup_user(src, len);
+ if (IS_ERR(buf)) {
+ *dst = NULL;
+ return PTR_ERR(buf);
+ }
+ *dst = buf;
+
+ return 0;
+}
+
+/*
+ * Count the bytes in 'str', including the terminating NULL.
+ *
+ * Just a wrap for user_ucs2_strsize
+ */
+static inline int
+get_ucs2_strsize_from_user(efi_char16_t __user *src, size_t *len)
+{
+ *len = user_ucs2_strsize(src);
+ if (*len == 0)
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Calculate the required buffer allocation size and copy a ucs2 string
+ * from user space into it.
+ *
+ * This function differs from copy_ucs2_from_user_len() because it
+ * calculates the size of the buffer to allocate by taking the length of
+ * the string 'src'.
+ *
+ * If a non-zero value is returned, the caller MUST NOT access 'dst'.
+ *
+ * It is the caller's responsibility to free 'dst'.
+ */
+static inline int
+copy_ucs2_from_user(efi_char16_t **dst, efi_char16_t __user *src)
+{
+ size_t len;
+
+ len = user_ucs2_strsize(src);
+ if (len == 0)
+ return -EFAULT;
+ return copy_ucs2_from_user_len(dst, src, len);
+}
+
+/*
+ * Copy a ucs2 string to a user buffer.
+ *
+ * This function is a simple wrapper around copy_to_user() that does
+ * nothing if 'src' is NULL, which is useful for reducing the amount of
+ * NULL checking the caller has to do.
+ *
+ * 'len' specifies the number of bytes to copy.
+ */
+static inline int
+copy_ucs2_to_user_len(efi_char16_t __user *dst, efi_char16_t *src, size_t len)
+{
+ if (!src)
+ return 0;
+
+ return copy_to_user(dst, src, len);
+}
+
+static long efi_runtime_get_variable(unsigned long arg)
+{
+ struct efi_getvariable __user *getvariable_user;
+ struct efi_getvariable getvariable;
+ unsigned long datasize = 0, prev_datasize, *dz;
+ efi_guid_t vendor_guid, *vd = NULL;
+ efi_status_t status;
+ efi_char16_t *name = NULL;
+ u32 attr, *at;
+ void *data = NULL;
+ int rv = 0;
+
+ getvariable_user = (struct efi_getvariable __user *)arg;
+
+ if (copy_from_user(&getvariable, getvariable_user,
+ sizeof(getvariable)))
+ return -EFAULT;
+ if (getvariable.data_size &&
+ get_user(datasize, getvariable.data_size))
+ return -EFAULT;
+ if (getvariable.vendor_guid) {
+ if (copy_from_user(&vendor_guid, getvariable.vendor_guid,
+ sizeof(vendor_guid)))
+ return -EFAULT;
+ vd = &vendor_guid;
+ }
+
+ if (getvariable.variable_name) {
+ rv = copy_ucs2_from_user(&name, getvariable.variable_name);
+ if (rv)
+ return rv;
+ }
+
+ at = getvariable.attributes ? &attr : NULL;
+ dz = getvariable.data_size ? &datasize : NULL;
+
+ if (getvariable.data_size && getvariable.data) {
+ data = kmalloc(datasize, GFP_KERNEL);
+ if (!data) {
+ kfree(name);
+ return -ENOMEM;
+ }
+ }
+
+ prev_datasize = datasize;
+ status = efi.get_variable(name, vd, at, dz, data);
+ kfree(name);
+
+ if (put_user(status, getvariable.status)) {
+ rv = -EFAULT;
+ goto out;
+ }
+
+ if (status != EFI_SUCCESS) {
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ if (dz && put_user(datasize, getvariable.data_size)) {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (prev_datasize < datasize) {
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (data) {
+ if (copy_to_user(getvariable.data, data, datasize)) {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+
+ if (at && put_user(attr, getvariable.attributes)) {
+ rv = -EFAULT;
+ goto out;
+ }
+
+ if (dz && put_user(datasize, getvariable.data_size))
+ rv = -EFAULT;
+
+out:
+ kfree(data);
+ return rv;
+
+}
+
+static long efi_runtime_set_variable(unsigned long arg)
+{
+ struct efi_setvariable __user *setvariable_user;
+ struct efi_setvariable setvariable;
+ efi_guid_t vendor_guid;
+ efi_status_t status;
+ efi_char16_t *name = NULL;
+ void *data;
+ int rv = 0;
+
+ setvariable_user = (struct efi_setvariable __user *)arg;
+
+ if (copy_from_user(&setvariable, setvariable_user, sizeof(setvariable)))
+ return -EFAULT;
+ if (copy_from_user(&vendor_guid, setvariable.vendor_guid,
+ sizeof(vendor_guid)))
+ return -EFAULT;
+
+ if (setvariable.variable_name) {
+ rv = copy_ucs2_from_user(&name, setvariable.variable_name);
+ if (rv)
+ return rv;
+ }
+
+ data = memdup_user(setvariable.data, setvariable.data_size);
+ if (IS_ERR(data)) {
+ kfree(name);
+ return PTR_ERR(data);
+ }
+
+ status = efi.set_variable(name, &vendor_guid,
+ setvariable.attributes,
+ setvariable.data_size, data);
+
+ if (put_user(status, setvariable.status)) {
+ rv = -EFAULT;
+ goto out;
+ }
+
+ rv = status == EFI_SUCCESS ? 0 : -EINVAL;
+
+out:
+ kfree(data);
+ kfree(name);
+
+ return rv;
+}
+
+static long efi_runtime_get_time(unsigned long arg)
+{
+ struct efi_gettime __user *gettime_user;
+ struct efi_gettime gettime;
+ efi_status_t status;
+ efi_time_cap_t cap;
+ efi_time_t efi_time;
+
+ gettime_user = (struct efi_gettime __user *)arg;
+ if (copy_from_user(&gettime, gettime_user, sizeof(gettime)))
+ return -EFAULT;
+
+ status = efi.get_time(gettime.time ? &efi_time : NULL,
+ gettime.capabilities ? &cap : NULL);
+
+ if (put_user(status, gettime.status))
+ return -EFAULT;
+
+ if (status != EFI_SUCCESS)
+ return -EINVAL;
+
+ if (gettime.capabilities) {
+ efi_time_cap_t __user *cap_local;
+
+ cap_local = (efi_time_cap_t *)gettime.capabilities;
+ if (put_user(cap.resolution, &(cap_local->resolution)) ||
+ put_user(cap.accuracy, &(cap_local->accuracy)) ||
+ put_user(cap.sets_to_zero, &(cap_local->sets_to_zero)))
+ return -EFAULT;
+ }
+ if (gettime.time) {
+ if (copy_to_user(gettime.time, &efi_time, sizeof(efi_time_t)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static long efi_runtime_set_time(unsigned long arg)
+{
+ struct efi_settime __user *settime_user;
+ struct efi_settime settime;
+ efi_status_t status;
+ efi_time_t efi_time;
+
+ settime_user = (struct efi_settime __user *)arg;
+ if (copy_from_user(&settime, settime_user, sizeof(settime)))
+ return -EFAULT;
+ if (copy_from_user(&efi_time, settime.time,
+ sizeof(efi_time_t)))
+ return -EFAULT;
+ status = efi.set_time(&efi_time);
+
+ if (put_user(status, settime.status))
+ return -EFAULT;
+
+ return status == EFI_SUCCESS ? 0 : -EINVAL;
+}
+
+static long efi_runtime_get_waketime(unsigned long arg)
+{
+ struct efi_getwakeuptime __user *getwakeuptime_user;
+ struct efi_getwakeuptime getwakeuptime;
+ efi_bool_t enabled, pending;
+ efi_status_t status;
+ efi_time_t efi_time;
+
+ getwakeuptime_user = (struct efi_getwakeuptime __user *)arg;
+ if (copy_from_user(&getwakeuptime, getwakeuptime_user,
+ sizeof(getwakeuptime)))
+ return -EFAULT;
+
+ status = efi.get_wakeup_time(
+ getwakeuptime.enabled ? (efi_bool_t *)&enabled : NULL,
+ getwakeuptime.pending ? (efi_bool_t *)&pending : NULL,
+ getwakeuptime.time ? &efi_time : NULL);
+
+ if (put_user(status, getwakeuptime.status))
+ return -EFAULT;
+
+ if (status != EFI_SUCCESS)
+ return -EINVAL;
+
+ if (getwakeuptime.enabled && put_user(enabled,
+ getwakeuptime.enabled))
+ return -EFAULT;
+
+ if (getwakeuptime.time) {
+ if (copy_to_user(getwakeuptime.time, &efi_time,
+ sizeof(efi_time_t)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static long efi_runtime_set_waketime(unsigned long arg)
+{
+ struct efi_setwakeuptime __user *setwakeuptime_user;
+ struct efi_setwakeuptime setwakeuptime;
+ efi_bool_t enabled;
+ efi_status_t status;
+ efi_time_t efi_time;
+
+ setwakeuptime_user = (struct efi_setwakeuptime __user *)arg;
+
+ if (copy_from_user(&setwakeuptime, setwakeuptime_user,
+ sizeof(setwakeuptime)))
+ return -EFAULT;
+
+ enabled = setwakeuptime.enabled;
+ if (setwakeuptime.time) {
+ if (copy_from_user(&efi_time, setwakeuptime.time,
+ sizeof(efi_time_t)))
+ return -EFAULT;
+
+ status = efi.set_wakeup_time(enabled, &efi_time);
+ } else
+ status = efi.set_wakeup_time(enabled, NULL);
+
+ if (put_user(status, setwakeuptime.status))
+ return -EFAULT;
+
+ return status == EFI_SUCCESS ? 0 : -EINVAL;
+}
+
+static long efi_runtime_get_nextvariablename(unsigned long arg)
+{
+ struct efi_getnextvariablename __user *getnextvariablename_user;
+ struct efi_getnextvariablename getnextvariablename;
+ unsigned long name_size, prev_name_size = 0, *ns = NULL;
+ efi_status_t status;
+ efi_guid_t *vd = NULL;
+ efi_guid_t vendor_guid;
+ efi_char16_t *name = NULL;
+ int rv = 0;
+
+ getnextvariablename_user = (struct efi_getnextvariablename __user *)arg;
+
+ if (copy_from_user(&getnextvariablename, getnextvariablename_user,
+ sizeof(getnextvariablename)))
+ return -EFAULT;
+
+ if (getnextvariablename.variable_name_size) {
+ if (get_user(name_size, getnextvariablename.variable_name_size))
+ return -EFAULT;
+ ns = &name_size;
+ prev_name_size = name_size;
+ }
+
+ if (getnextvariablename.vendor_guid) {
+ if (copy_from_user(&vendor_guid,
+ getnextvariablename.vendor_guid,
+ sizeof(vendor_guid)))
+ return -EFAULT;
+ vd = &vendor_guid;
+ }
+
+ if (getnextvariablename.variable_name) {
+ size_t name_string_size = 0;
+
+ rv = get_ucs2_strsize_from_user(
+ getnextvariablename.variable_name,
+ &name_string_size);
+ if (rv)
+ return rv;
+ /*
+ * The name_size may be smaller than the real buffer size where
+ * variable name located in some use cases. The most typical
+ * case is passing a 0 to get the required buffer size for the
+ * 1st time call. So we need to copy the content from user
+ * space for at least the string size of variable name, or else
+ * the name passed to UEFI may not be terminated as we expected.
+ */
+ rv = copy_ucs2_from_user_len(&name,
+ getnextvariablename.variable_name,
+ prev_name_size > name_string_size ?
+ prev_name_size : name_string_size);
+ if (rv)
+ return rv;
+ }
+
+ status = efi.get_next_variable(ns, name, vd);
+
+ if (put_user(status, getnextvariablename.status)) {
+ rv = -EFAULT;
+ goto out;
+ }
+
+ if (status != EFI_SUCCESS) {
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ if (ns && put_user(*ns,
+ getnextvariablename.variable_name_size)) {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (name) {
+ if (copy_ucs2_to_user_len(getnextvariablename.variable_name,
+ name, prev_name_size)) {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+
+ if (ns) {
+ if (put_user(*ns, getnextvariablename.variable_name_size)) {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+
+ if (vd) {
+ if (copy_to_user(getnextvariablename.vendor_guid, vd,
+ sizeof(efi_guid_t)))
+ rv = -EFAULT;
+ }
+
+out:
+ kfree(name);
+ return rv;
+}
+
+static long efi_runtime_get_nexthighmonocount(unsigned long arg)
+{
+ struct efi_getnexthighmonotoniccount __user *getnexthighmonocount_user;
+ struct efi_getnexthighmonotoniccount getnexthighmonocount;
+ efi_status_t status;
+ u32 count;
+
+ getnexthighmonocount_user = (struct
+ efi_getnexthighmonotoniccount __user *)arg;
+
+ if (copy_from_user(&getnexthighmonocount,
+ getnexthighmonocount_user,
+ sizeof(getnexthighmonocount)))
+ return -EFAULT;
+
+ status = efi.get_next_high_mono_count(
+ getnexthighmonocount.high_count ? &count : NULL);
+
+ if (put_user(status, getnexthighmonocount.status))
+ return -EFAULT;
+
+ if (status != EFI_SUCCESS)
+ return -EINVAL;
+
+ if (getnexthighmonocount.high_count &&
+ put_user(count, getnexthighmonocount.high_count))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long efi_runtime_reset_system(unsigned long arg)
+{
+ struct efi_resetsystem __user *resetsystem_user;
+ struct efi_resetsystem resetsystem;
+ void *data = NULL;
+
+ resetsystem_user = (struct efi_resetsystem __user *)arg;
+ if (copy_from_user(&resetsystem, resetsystem_user,
+ sizeof(resetsystem)))
+ return -EFAULT;
+ if (resetsystem.data_size != 0) {
+ data = memdup_user((void *)resetsystem.data,
+ resetsystem.data_size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ }
+
+ efi.reset_system(resetsystem.reset_type, resetsystem.status,
+ resetsystem.data_size, (efi_char16_t *)data);
+
+ kfree(data);
+ return 0;
+}
+
+static long efi_runtime_query_variableinfo(unsigned long arg)
+{
+ struct efi_queryvariableinfo __user *queryvariableinfo_user;
+ struct efi_queryvariableinfo queryvariableinfo;
+ efi_status_t status;
+ u64 max_storage, remaining, max_size;
+
+ queryvariableinfo_user = (struct efi_queryvariableinfo __user *)arg;
+
+ if (copy_from_user(&queryvariableinfo, queryvariableinfo_user,
+ sizeof(queryvariableinfo)))
+ return -EFAULT;
+
+ status = efi.query_variable_info(queryvariableinfo.attributes,
+ &max_storage, &remaining, &max_size);
+
+ if (put_user(status, queryvariableinfo.status))
+ return -EFAULT;
+
+ if (status != EFI_SUCCESS)
+ return -EINVAL;
+
+ if (put_user(max_storage,
+ queryvariableinfo.maximum_variable_storage_size))
+ return -EFAULT;
+
+ if (put_user(remaining,
+ queryvariableinfo.remaining_variable_storage_size))
+ return -EFAULT;
+
+ if (put_user(max_size, queryvariableinfo.maximum_variable_size))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long efi_runtime_query_capsulecaps(unsigned long arg)
+{
+ struct efi_querycapsulecapabilities __user *qcaps_user;
+ struct efi_querycapsulecapabilities qcaps;
+ efi_capsule_header_t *capsules;
+ efi_status_t status;
+ u64 max_size;
+ int i, reset_type;
+ int rv = 0;
+
+ qcaps_user = (struct efi_querycapsulecapabilities __user *)arg;
+
+ if (copy_from_user(&qcaps, qcaps_user, sizeof(qcaps)))
+ return -EFAULT;
+
+ if (qcaps.capsule_count == ULONG_MAX)
+ return -EINVAL;
+
+ capsules = kcalloc(qcaps.capsule_count + 1,
+ sizeof(efi_capsule_header_t), GFP_KERNEL);
+ if (!capsules)
+ return -ENOMEM;
+
+ for (i = 0; i < qcaps.capsule_count; i++) {
+ efi_capsule_header_t *c;
+ /*
+ * We cannot dereference qcaps.capsule_header_array directly to
+ * obtain the address of the capsule as it resides in the
+ * user space
+ */
+ if (get_user(c, qcaps.capsule_header_array + i)) {
+ rv = -EFAULT;
+ goto out;
+ }
+ if (copy_from_user(&capsules[i], c,
+ sizeof(efi_capsule_header_t))) {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+
+ qcaps.capsule_header_array = &capsules;
+
+ status = efi.query_capsule_caps((efi_capsule_header_t **)
+ qcaps.capsule_header_array,
+ qcaps.capsule_count,
+ &max_size, &reset_type);
+
+ if (put_user(status, qcaps.status)) {
+ rv = -EFAULT;
+ goto out;
+ }
+
+ if (status != EFI_SUCCESS) {
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (put_user(max_size, qcaps.maximum_capsule_size)) {
+ rv = -EFAULT;
+ goto out;
+ }
+
+ if (put_user(reset_type, qcaps.reset_type))
+ rv = -EFAULT;
+
+out:
+ kfree(capsules);
+ return rv;
+}
+
+static long efi_test_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case EFI_RUNTIME_GET_VARIABLE:
+ return efi_runtime_get_variable(arg);
+
+ case EFI_RUNTIME_SET_VARIABLE:
+ return efi_runtime_set_variable(arg);
+
+ case EFI_RUNTIME_GET_TIME:
+ return efi_runtime_get_time(arg);
+
+ case EFI_RUNTIME_SET_TIME:
+ return efi_runtime_set_time(arg);
+
+ case EFI_RUNTIME_GET_WAKETIME:
+ return efi_runtime_get_waketime(arg);
+
+ case EFI_RUNTIME_SET_WAKETIME:
+ return efi_runtime_set_waketime(arg);
+
+ case EFI_RUNTIME_GET_NEXTVARIABLENAME:
+ return efi_runtime_get_nextvariablename(arg);
+
+ case EFI_RUNTIME_GET_NEXTHIGHMONOTONICCOUNT:
+ return efi_runtime_get_nexthighmonocount(arg);
+
+ case EFI_RUNTIME_QUERY_VARIABLEINFO:
+ return efi_runtime_query_variableinfo(arg);
+
+ case EFI_RUNTIME_QUERY_CAPSULECAPABILITIES:
+ return efi_runtime_query_capsulecaps(arg);
+
+ case EFI_RUNTIME_RESET_SYSTEM:
+ return efi_runtime_reset_system(arg);
+ }
+
+ return -ENOTTY;
+}
+
+static int efi_test_open(struct inode *inode, struct file *file)
+{
+ int ret = security_locked_down(LOCKDOWN_EFI_TEST);
+
+ if (ret)
+ return ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ /*
+ * nothing special to do here
+ * We do accept multiple open files at the same time as we
+ * synchronize on the per call operation.
+ */
+ return 0;
+}
+
+static int efi_test_close(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+/*
+ * The various file operations we support.
+ */
+static const struct file_operations efi_test_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = efi_test_ioctl,
+ .open = efi_test_open,
+ .release = efi_test_close,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice efi_test_dev = {
+ MISC_DYNAMIC_MINOR,
+ "efi_test",
+ &efi_test_fops
+};
+
+static int __init efi_test_init(void)
+{
+ int ret;
+
+ ret = misc_register(&efi_test_dev);
+ if (ret) {
+ pr_err("efi_test: can't misc_register on minor=%d\n",
+ MISC_DYNAMIC_MINOR);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit efi_test_exit(void)
+{
+ misc_deregister(&efi_test_dev);
+}
+
+module_init(efi_test_init);
+module_exit(efi_test_exit);
diff --git a/drivers/firmware/efi/test/efi_test.h b/drivers/firmware/efi/test/efi_test.h
new file mode 100644
index 000000000..f2446aa1c
--- /dev/null
+++ b/drivers/firmware/efi/test/efi_test.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * EFI Test driver Header
+ *
+ * Copyright(C) 2012-2016 Canonical Ltd.
+ *
+ */
+
+#ifndef _DRIVERS_FIRMWARE_EFI_TEST_H_
+#define _DRIVERS_FIRMWARE_EFI_TEST_H_
+
+#include <linux/efi.h>
+
+struct efi_getvariable {
+ efi_char16_t *variable_name;
+ efi_guid_t *vendor_guid;
+ u32 *attributes;
+ unsigned long *data_size;
+ void *data;
+ efi_status_t *status;
+} __packed;
+
+struct efi_setvariable {
+ efi_char16_t *variable_name;
+ efi_guid_t *vendor_guid;
+ u32 attributes;
+ unsigned long data_size;
+ void *data;
+ efi_status_t *status;
+} __packed;
+
+struct efi_getnextvariablename {
+ unsigned long *variable_name_size;
+ efi_char16_t *variable_name;
+ efi_guid_t *vendor_guid;
+ efi_status_t *status;
+} __packed;
+
+struct efi_queryvariableinfo {
+ u32 attributes;
+ u64 *maximum_variable_storage_size;
+ u64 *remaining_variable_storage_size;
+ u64 *maximum_variable_size;
+ efi_status_t *status;
+} __packed;
+
+struct efi_gettime {
+ efi_time_t *time;
+ efi_time_cap_t *capabilities;
+ efi_status_t *status;
+} __packed;
+
+struct efi_settime {
+ efi_time_t *time;
+ efi_status_t *status;
+} __packed;
+
+struct efi_getwakeuptime {
+ efi_bool_t *enabled;
+ efi_bool_t *pending;
+ efi_time_t *time;
+ efi_status_t *status;
+} __packed;
+
+struct efi_setwakeuptime {
+ efi_bool_t enabled;
+ efi_time_t *time;
+ efi_status_t *status;
+} __packed;
+
+struct efi_getnexthighmonotoniccount {
+ u32 *high_count;
+ efi_status_t *status;
+} __packed;
+
+struct efi_querycapsulecapabilities {
+ efi_capsule_header_t **capsule_header_array;
+ unsigned long capsule_count;
+ u64 *maximum_capsule_size;
+ int *reset_type;
+ efi_status_t *status;
+} __packed;
+
+struct efi_resetsystem {
+ int reset_type;
+ efi_status_t status;
+ unsigned long data_size;
+ efi_char16_t *data;
+} __packed;
+
+#define EFI_RUNTIME_GET_VARIABLE \
+ _IOWR('p', 0x01, struct efi_getvariable)
+#define EFI_RUNTIME_SET_VARIABLE \
+ _IOW('p', 0x02, struct efi_setvariable)
+
+#define EFI_RUNTIME_GET_TIME \
+ _IOR('p', 0x03, struct efi_gettime)
+#define EFI_RUNTIME_SET_TIME \
+ _IOW('p', 0x04, struct efi_settime)
+
+#define EFI_RUNTIME_GET_WAKETIME \
+ _IOR('p', 0x05, struct efi_getwakeuptime)
+#define EFI_RUNTIME_SET_WAKETIME \
+ _IOW('p', 0x06, struct efi_setwakeuptime)
+
+#define EFI_RUNTIME_GET_NEXTVARIABLENAME \
+ _IOWR('p', 0x07, struct efi_getnextvariablename)
+
+#define EFI_RUNTIME_QUERY_VARIABLEINFO \
+ _IOR('p', 0x08, struct efi_queryvariableinfo)
+
+#define EFI_RUNTIME_GET_NEXTHIGHMONOTONICCOUNT \
+ _IOR('p', 0x09, struct efi_getnexthighmonotoniccount)
+
+#define EFI_RUNTIME_QUERY_CAPSULECAPABILITIES \
+ _IOR('p', 0x0A, struct efi_querycapsulecapabilities)
+
+#define EFI_RUNTIME_RESET_SYSTEM \
+ _IOW('p', 0x0B, struct efi_resetsystem)
+
+#endif /* _DRIVERS_FIRMWARE_EFI_TEST_H_ */
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
new file mode 100644
index 000000000..e8d69bd54
--- /dev/null
+++ b/drivers/firmware/efi/tpm.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Google, Inc.
+ * Thiebaud Weksteen <tweek@google.com>
+ */
+
+#define TPM_MEMREMAP(start, size) early_memremap(start, size)
+#define TPM_MEMUNMAP(start, size) early_memunmap(start, size)
+
+#include <asm/early_ioremap.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/tpm_eventlog.h>
+
+int efi_tpm_final_log_size;
+EXPORT_SYMBOL(efi_tpm_final_log_size);
+
+static int __init tpm2_calc_event_log_size(void *data, int count, void *size_info)
+{
+ struct tcg_pcr_event2_head *header;
+ int event_size, size = 0;
+
+ while (count > 0) {
+ header = data + size;
+ event_size = __calc_tpm2_event_size(header, size_info, true);
+ if (event_size == 0)
+ return -1;
+ size += event_size;
+ count--;
+ }
+
+ return size;
+}
+
+/*
+ * Reserve the memory associated with the TPM Event Log configuration table.
+ */
+int __init efi_tpm_eventlog_init(void)
+{
+ struct linux_efi_tpm_eventlog *log_tbl;
+ struct efi_tcg2_final_events_table *final_tbl;
+ int tbl_size;
+ int ret = 0;
+
+ if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) {
+ /*
+ * We can't calculate the size of the final events without the
+ * first entry in the TPM log, so bail here.
+ */
+ return 0;
+ }
+
+ log_tbl = early_memremap(efi.tpm_log, sizeof(*log_tbl));
+ if (!log_tbl) {
+ pr_err("Failed to map TPM Event Log table @ 0x%lx\n",
+ efi.tpm_log);
+ efi.tpm_log = EFI_INVALID_TABLE_ADDR;
+ return -ENOMEM;
+ }
+
+ tbl_size = sizeof(*log_tbl) + log_tbl->size;
+ memblock_reserve(efi.tpm_log, tbl_size);
+
+ if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) {
+ pr_info("TPM Final Events table not present\n");
+ goto out;
+ } else if (log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
+ pr_warn(FW_BUG "TPM Final Events table invalid\n");
+ goto out;
+ }
+
+ final_tbl = early_memremap(efi.tpm_final_log, sizeof(*final_tbl));
+
+ if (!final_tbl) {
+ pr_err("Failed to map TPM Final Event Log table @ 0x%lx\n",
+ efi.tpm_final_log);
+ efi.tpm_final_log = EFI_INVALID_TABLE_ADDR;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ tbl_size = 0;
+ if (final_tbl->nr_events != 0) {
+ void *events = (void *)efi.tpm_final_log
+ + sizeof(final_tbl->version)
+ + sizeof(final_tbl->nr_events);
+
+ tbl_size = tpm2_calc_event_log_size(events,
+ final_tbl->nr_events,
+ log_tbl->log);
+ }
+
+ if (tbl_size < 0) {
+ pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
+ ret = -EINVAL;
+ goto out_calc;
+ }
+
+ memblock_reserve(efi.tpm_final_log,
+ tbl_size + sizeof(*final_tbl));
+ efi_tpm_final_log_size = tbl_size;
+
+out_calc:
+ early_memunmap(final_tbl, sizeof(*final_tbl));
+out:
+ early_memunmap(log_tbl, sizeof(*log_tbl));
+ return ret;
+}
+
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
new file mode 100644
index 000000000..cae590bd0
--- /dev/null
+++ b/drivers/firmware/efi/vars.c
@@ -0,0 +1,1222 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Originally from efivars.c
+ *
+ * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
+ * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
+ */
+
+#include <linux/capability.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/smp.h>
+#include <linux/efi.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/ucs2_string.h>
+
+/* Private pointer to registered efivars */
+static struct efivars *__efivars;
+
+/*
+ * efivars_lock protects three things:
+ * 1) efivarfs_list and efivars_sysfs_list
+ * 2) ->ops calls
+ * 3) (un)registration of __efivars
+ */
+static DEFINE_SEMAPHORE(efivars_lock);
+
+static bool
+validate_device_path(efi_char16_t *var_name, int match, u8 *buffer,
+ unsigned long len)
+{
+ struct efi_generic_dev_path *node;
+ int offset = 0;
+
+ node = (struct efi_generic_dev_path *)buffer;
+
+ if (len < sizeof(*node))
+ return false;
+
+ while (offset <= len - sizeof(*node) &&
+ node->length >= sizeof(*node) &&
+ node->length <= len - offset) {
+ offset += node->length;
+
+ if ((node->type == EFI_DEV_END_PATH ||
+ node->type == EFI_DEV_END_PATH2) &&
+ node->sub_type == EFI_DEV_END_ENTIRE)
+ return true;
+
+ node = (struct efi_generic_dev_path *)(buffer + offset);
+ }
+
+ /*
+ * If we're here then either node->length pointed past the end
+ * of the buffer or we reached the end of the buffer without
+ * finding a device path end node.
+ */
+ return false;
+}
+
+static bool
+validate_boot_order(efi_char16_t *var_name, int match, u8 *buffer,
+ unsigned long len)
+{
+ /* An array of 16-bit integers */
+ if ((len % 2) != 0)
+ return false;
+
+ return true;
+}
+
+static bool
+validate_load_option(efi_char16_t *var_name, int match, u8 *buffer,
+ unsigned long len)
+{
+ u16 filepathlength;
+ int i, desclength = 0, namelen;
+
+ namelen = ucs2_strnlen(var_name, EFI_VAR_NAME_LEN);
+
+ /* Either "Boot" or "Driver" followed by four digits of hex */
+ for (i = match; i < match+4; i++) {
+ if (var_name[i] > 127 ||
+ hex_to_bin(var_name[i] & 0xff) < 0)
+ return true;
+ }
+
+ /* Reject it if there's 4 digits of hex and then further content */
+ if (namelen > match + 4)
+ return false;
+
+ /* A valid entry must be at least 8 bytes */
+ if (len < 8)
+ return false;
+
+ filepathlength = buffer[4] | buffer[5] << 8;
+
+ /*
+ * There's no stored length for the description, so it has to be
+ * found by hand
+ */
+ desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
+
+ /* Each boot entry must have a descriptor */
+ if (!desclength)
+ return false;
+
+ /*
+ * If the sum of the length of the description, the claimed filepath
+ * length and the original header are greater than the length of the
+ * variable, it's malformed
+ */
+ if ((desclength + filepathlength + 6) > len)
+ return false;
+
+ /*
+ * And, finally, check the filepath
+ */
+ return validate_device_path(var_name, match, buffer + desclength + 6,
+ filepathlength);
+}
+
+static bool
+validate_uint16(efi_char16_t *var_name, int match, u8 *buffer,
+ unsigned long len)
+{
+ /* A single 16-bit integer */
+ if (len != 2)
+ return false;
+
+ return true;
+}
+
+static bool
+validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
+ unsigned long len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (buffer[i] > 127)
+ return false;
+
+ if (buffer[i] == 0)
+ return true;
+ }
+
+ return false;
+}
+
+struct variable_validate {
+ efi_guid_t vendor;
+ char *name;
+ bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
+ unsigned long len);
+};
+
+/*
+ * This is the list of variables we need to validate, as well as the
+ * whitelist for what we think is safe not to default to immutable.
+ *
+ * If it has a validate() method that's not NULL, it'll go into the
+ * validation routine. If not, it is assumed valid, but still used for
+ * whitelisting.
+ *
+ * Note that it's sorted by {vendor,name}, but globbed names must come after
+ * any other name with the same prefix.
+ */
+static const struct variable_validate variable_validate[] = {
+ { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
+ { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
+ { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
+ { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
+ { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
+ { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
+ { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
+ { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
+ { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
+ { LINUX_EFI_CRASH_GUID, "*", NULL },
+ { NULL_GUID, "", NULL },
+};
+
+/*
+ * Check if @var_name matches the pattern given in @match_name.
+ *
+ * @var_name: an array of @len non-NUL characters.
+ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
+ * final "*" character matches any trailing characters @var_name,
+ * including the case when there are none left in @var_name.
+ * @match: on output, the number of non-wildcard characters in @match_name
+ * that @var_name matches, regardless of the return value.
+ * @return: whether @var_name fully matches @match_name.
+ */
+static bool
+variable_matches(const char *var_name, size_t len, const char *match_name,
+ int *match)
+{
+ for (*match = 0; ; (*match)++) {
+ char c = match_name[*match];
+
+ switch (c) {
+ case '*':
+ /* Wildcard in @match_name means we've matched. */
+ return true;
+
+ case '\0':
+ /* @match_name has ended. Has @var_name too? */
+ return (*match == len);
+
+ default:
+ /*
+ * We've reached a non-wildcard char in @match_name.
+ * Continue only if there's an identical character in
+ * @var_name.
+ */
+ if (*match < len && c == var_name[*match])
+ continue;
+ return false;
+ }
+ }
+}
+
+bool
+efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
+ unsigned long data_size)
+{
+ int i;
+ unsigned long utf8_size;
+ u8 *utf8_name;
+
+ utf8_size = ucs2_utf8size(var_name);
+ utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
+ if (!utf8_name)
+ return false;
+
+ ucs2_as_utf8(utf8_name, var_name, utf8_size);
+ utf8_name[utf8_size] = '\0';
+
+ for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
+ const char *name = variable_validate[i].name;
+ int match = 0;
+
+ if (efi_guidcmp(vendor, variable_validate[i].vendor))
+ continue;
+
+ if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
+ if (variable_validate[i].validate == NULL)
+ break;
+ kfree(utf8_name);
+ return variable_validate[i].validate(var_name, match,
+ data, data_size);
+ }
+ }
+ kfree(utf8_name);
+ return true;
+}
+EXPORT_SYMBOL_GPL(efivar_validate);
+
+bool
+efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
+ size_t len)
+{
+ int i;
+ bool found = false;
+ int match = 0;
+
+ /*
+ * Check if our variable is in the validated variables list
+ */
+ for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
+ if (efi_guidcmp(variable_validate[i].vendor, vendor))
+ continue;
+
+ if (variable_matches(var_name, len,
+ variable_validate[i].name, &match)) {
+ found = true;
+ break;
+ }
+ }
+
+ /*
+ * If it's in our list, it is removable.
+ */
+ return found;
+}
+EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
+
+static efi_status_t
+check_var_size(u32 attributes, unsigned long size)
+{
+ const struct efivar_operations *fops;
+
+ if (!__efivars)
+ return EFI_UNSUPPORTED;
+
+ fops = __efivars->ops;
+
+ if (!fops->query_variable_store)
+ return EFI_UNSUPPORTED;
+
+ return fops->query_variable_store(attributes, size, false);
+}
+
+static efi_status_t
+check_var_size_nonblocking(u32 attributes, unsigned long size)
+{
+ const struct efivar_operations *fops;
+
+ if (!__efivars)
+ return EFI_UNSUPPORTED;
+
+ fops = __efivars->ops;
+
+ if (!fops->query_variable_store)
+ return EFI_UNSUPPORTED;
+
+ return fops->query_variable_store(attributes, size, true);
+}
+
+static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor,
+ struct list_head *head)
+{
+ struct efivar_entry *entry, *n;
+ unsigned long strsize1, strsize2;
+ bool found = false;
+
+ strsize1 = ucs2_strsize(variable_name, 1024);
+ list_for_each_entry_safe(entry, n, head, list) {
+ strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
+ if (strsize1 == strsize2 &&
+ !memcmp(variable_name, &(entry->var.VariableName),
+ strsize2) &&
+ !efi_guidcmp(entry->var.VendorGuid,
+ *vendor)) {
+ found = true;
+ break;
+ }
+ }
+ return found;
+}
+
+/*
+ * Returns the size of variable_name, in bytes, including the
+ * terminating NULL character, or variable_name_size if no NULL
+ * character is found among the first variable_name_size bytes.
+ */
+static unsigned long var_name_strnsize(efi_char16_t *variable_name,
+ unsigned long variable_name_size)
+{
+ unsigned long len;
+ efi_char16_t c;
+
+ /*
+ * The variable name is, by definition, a NULL-terminated
+ * string, so make absolutely sure that variable_name_size is
+ * the value we expect it to be. If not, return the real size.
+ */
+ for (len = 2; len <= variable_name_size; len += sizeof(c)) {
+ c = variable_name[(len / sizeof(c)) - 1];
+ if (!c)
+ break;
+ }
+
+ return min(len, variable_name_size);
+}
+
+/*
+ * Print a warning when duplicate EFI variables are encountered and
+ * disable the sysfs workqueue since the firmware is buggy.
+ */
+static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
+ unsigned long len16)
+{
+ size_t i, len8 = len16 / sizeof(efi_char16_t);
+ char *str8;
+
+ str8 = kzalloc(len8, GFP_KERNEL);
+ if (!str8)
+ return;
+
+ for (i = 0; i < len8; i++)
+ str8[i] = str16[i];
+
+ printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
+ str8, vendor_guid);
+ kfree(str8);
+}
+
+/**
+ * efivar_init - build the initial list of EFI variables
+ * @func: callback function to invoke for every variable
+ * @data: function-specific data to pass to @func
+ * @duplicates: error if we encounter duplicates on @head?
+ * @head: initialised head of variable list
+ *
+ * Get every EFI variable from the firmware and invoke @func. @func
+ * should call efivar_entry_add() to build the list of variables.
+ *
+ * Returns 0 on success, or a kernel error code on failure.
+ */
+int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ void *data, bool duplicates, struct list_head *head)
+{
+ const struct efivar_operations *ops;
+ unsigned long variable_name_size = 1024;
+ efi_char16_t *variable_name;
+ efi_status_t status;
+ efi_guid_t vendor_guid;
+ int err = 0;
+
+ if (!__efivars)
+ return -EFAULT;
+
+ ops = __efivars->ops;
+
+ variable_name = kzalloc(variable_name_size, GFP_KERNEL);
+ if (!variable_name) {
+ printk(KERN_ERR "efivars: Memory allocation failed.\n");
+ return -ENOMEM;
+ }
+
+ if (down_interruptible(&efivars_lock)) {
+ err = -EINTR;
+ goto free;
+ }
+
+ /*
+ * Per EFI spec, the maximum storage allocated for both
+ * the variable name and variable data is 1024 bytes.
+ */
+
+ do {
+ variable_name_size = 1024;
+
+ status = ops->get_next_variable(&variable_name_size,
+ variable_name,
+ &vendor_guid);
+ switch (status) {
+ case EFI_SUCCESS:
+ if (duplicates)
+ up(&efivars_lock);
+
+ variable_name_size = var_name_strnsize(variable_name,
+ variable_name_size);
+
+ /*
+ * Some firmware implementations return the
+ * same variable name on multiple calls to
+ * get_next_variable(). Terminate the loop
+ * immediately as there is no guarantee that
+ * we'll ever see a different variable name,
+ * and may end up looping here forever.
+ */
+ if (duplicates &&
+ variable_is_present(variable_name, &vendor_guid,
+ head)) {
+ dup_variable_bug(variable_name, &vendor_guid,
+ variable_name_size);
+ status = EFI_NOT_FOUND;
+ } else {
+ err = func(variable_name, vendor_guid,
+ variable_name_size, data);
+ if (err)
+ status = EFI_NOT_FOUND;
+ }
+
+ if (duplicates) {
+ if (down_interruptible(&efivars_lock)) {
+ err = -EINTR;
+ goto free;
+ }
+ }
+
+ break;
+ case EFI_UNSUPPORTED:
+ err = -EOPNOTSUPP;
+ status = EFI_NOT_FOUND;
+ break;
+ case EFI_NOT_FOUND:
+ break;
+ default:
+ printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n",
+ status);
+ status = EFI_NOT_FOUND;
+ break;
+ }
+
+ } while (status != EFI_NOT_FOUND);
+
+ up(&efivars_lock);
+free:
+ kfree(variable_name);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(efivar_init);
+
+/**
+ * efivar_entry_add - add entry to variable list
+ * @entry: entry to add to list
+ * @head: list head
+ *
+ * Returns 0 on success, or a kernel error code on failure.
+ */
+int efivar_entry_add(struct efivar_entry *entry, struct list_head *head)
+{
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+ list_add(&entry->list, head);
+ up(&efivars_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(efivar_entry_add);
+
+/**
+ * efivar_entry_remove - remove entry from variable list
+ * @entry: entry to remove from list
+ *
+ * Returns 0 on success, or a kernel error code on failure.
+ */
+int efivar_entry_remove(struct efivar_entry *entry)
+{
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+ list_del(&entry->list);
+ up(&efivars_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(efivar_entry_remove);
+
+/*
+ * efivar_entry_list_del_unlock - remove entry from variable list
+ * @entry: entry to remove
+ *
+ * Remove @entry from the variable list and release the list lock.
+ *
+ * NOTE: slightly weird locking semantics here - we expect to be
+ * called with the efivars lock already held, and we release it before
+ * returning. This is because this function is usually called after
+ * set_variable() while the lock is still held.
+ */
+static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
+{
+ list_del(&entry->list);
+ up(&efivars_lock);
+}
+
+/**
+ * __efivar_entry_delete - delete an EFI variable
+ * @entry: entry containing EFI variable to delete
+ *
+ * Delete the variable from the firmware but leave @entry on the
+ * variable list.
+ *
+ * This function differs from efivar_entry_delete() because it does
+ * not remove @entry from the variable list. Also, it is safe to be
+ * called from within a efivar_entry_iter_begin() and
+ * efivar_entry_iter_end() region, unlike efivar_entry_delete().
+ *
+ * Returns 0 on success, or a converted EFI status code if
+ * set_variable() fails.
+ */
+int __efivar_entry_delete(struct efivar_entry *entry)
+{
+ efi_status_t status;
+
+ if (!__efivars)
+ return -EINVAL;
+
+ status = __efivars->ops->set_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ 0, 0, NULL);
+
+ return efi_status_to_err(status);
+}
+EXPORT_SYMBOL_GPL(__efivar_entry_delete);
+
+/**
+ * efivar_entry_delete - delete variable and remove entry from list
+ * @entry: entry containing variable to delete
+ *
+ * Delete the variable from the firmware and remove @entry from the
+ * variable list. It is the caller's responsibility to free @entry
+ * once we return.
+ *
+ * Returns 0 on success, -EINTR if we can't grab the semaphore,
+ * converted EFI status code if set_variable() fails.
+ */
+int efivar_entry_delete(struct efivar_entry *entry)
+{
+ const struct efivar_operations *ops;
+ efi_status_t status;
+
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+
+ if (!__efivars) {
+ up(&efivars_lock);
+ return -EINVAL;
+ }
+ ops = __efivars->ops;
+ status = ops->set_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ 0, 0, NULL);
+ if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND)) {
+ up(&efivars_lock);
+ return efi_status_to_err(status);
+ }
+
+ efivar_entry_list_del_unlock(entry);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(efivar_entry_delete);
+
+/**
+ * efivar_entry_set - call set_variable()
+ * @entry: entry containing the EFI variable to write
+ * @attributes: variable attributes
+ * @size: size of @data buffer
+ * @data: buffer containing variable data
+ * @head: head of variable list
+ *
+ * Calls set_variable() for an EFI variable. If creating a new EFI
+ * variable, this function is usually followed by efivar_entry_add().
+ *
+ * Before writing the variable, the remaining EFI variable storage
+ * space is checked to ensure there is enough room available.
+ *
+ * If @head is not NULL a lookup is performed to determine whether
+ * the entry is already on the list.
+ *
+ * Returns 0 on success, -EINTR if we can't grab the semaphore,
+ * -EEXIST if a lookup is performed and the entry already exists on
+ * the list, or a converted EFI status code if set_variable() fails.
+ */
+int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
+ unsigned long size, void *data, struct list_head *head)
+{
+ const struct efivar_operations *ops;
+ efi_status_t status;
+ efi_char16_t *name = entry->var.VariableName;
+ efi_guid_t vendor = entry->var.VendorGuid;
+
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+
+ if (!__efivars) {
+ up(&efivars_lock);
+ return -EINVAL;
+ }
+ ops = __efivars->ops;
+ if (head && efivar_entry_find(name, vendor, head, false)) {
+ up(&efivars_lock);
+ return -EEXIST;
+ }
+
+ status = check_var_size(attributes, size + ucs2_strsize(name, 1024));
+ if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
+ status = ops->set_variable(name, &vendor,
+ attributes, size, data);
+
+ up(&efivars_lock);
+
+ return efi_status_to_err(status);
+
+}
+EXPORT_SYMBOL_GPL(efivar_entry_set);
+
+/*
+ * efivar_entry_set_nonblocking - call set_variable_nonblocking()
+ *
+ * This function is guaranteed to not block and is suitable for calling
+ * from crash/panic handlers.
+ *
+ * Crucially, this function will not block if it cannot acquire
+ * efivars_lock. Instead, it returns -EBUSY.
+ */
+static int
+efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
+ u32 attributes, unsigned long size, void *data)
+{
+ const struct efivar_operations *ops;
+ efi_status_t status;
+
+ if (down_trylock(&efivars_lock))
+ return -EBUSY;
+
+ if (!__efivars) {
+ up(&efivars_lock);
+ return -EINVAL;
+ }
+
+ status = check_var_size_nonblocking(attributes,
+ size + ucs2_strsize(name, 1024));
+ if (status != EFI_SUCCESS) {
+ up(&efivars_lock);
+ return -ENOSPC;
+ }
+
+ ops = __efivars->ops;
+ status = ops->set_variable_nonblocking(name, &vendor, attributes,
+ size, data);
+
+ up(&efivars_lock);
+ return efi_status_to_err(status);
+}
+
+/**
+ * efivar_entry_set_safe - call set_variable() if enough space in firmware
+ * @name: buffer containing the variable name
+ * @vendor: variable vendor guid
+ * @attributes: variable attributes
+ * @block: can we block in this context?
+ * @size: size of @data buffer
+ * @data: buffer containing variable data
+ *
+ * Ensures there is enough free storage in the firmware for this variable, and
+ * if so, calls set_variable(). If creating a new EFI variable, this function
+ * is usually followed by efivar_entry_add().
+ *
+ * Returns 0 on success, -ENOSPC if the firmware does not have enough
+ * space for set_variable() to succeed, or a converted EFI status code
+ * if set_variable() fails.
+ */
+int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
+ bool block, unsigned long size, void *data)
+{
+ const struct efivar_operations *ops;
+ efi_status_t status;
+ unsigned long varsize;
+
+ if (!__efivars)
+ return -EINVAL;
+
+ ops = __efivars->ops;
+ if (!ops->query_variable_store)
+ return -ENOSYS;
+
+ /*
+ * If the EFI variable backend provides a non-blocking
+ * ->set_variable() operation and we're in a context where we
+ * cannot block, then we need to use it to avoid live-locks,
+ * since the implication is that the regular ->set_variable()
+ * will block.
+ *
+ * If no ->set_variable_nonblocking() is provided then
+ * ->set_variable() is assumed to be non-blocking.
+ */
+ if (!block && ops->set_variable_nonblocking)
+ return efivar_entry_set_nonblocking(name, vendor, attributes,
+ size, data);
+
+ varsize = size + ucs2_strsize(name, 1024);
+ if (!block) {
+ if (down_trylock(&efivars_lock))
+ return -EBUSY;
+ status = check_var_size_nonblocking(attributes, varsize);
+ } else {
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+ status = check_var_size(attributes, varsize);
+ }
+
+ if (status != EFI_SUCCESS) {
+ up(&efivars_lock);
+ return -ENOSPC;
+ }
+
+ status = ops->set_variable(name, &vendor, attributes, size, data);
+
+ up(&efivars_lock);
+
+ return efi_status_to_err(status);
+}
+EXPORT_SYMBOL_GPL(efivar_entry_set_safe);
+
+/**
+ * efivar_entry_find - search for an entry
+ * @name: the EFI variable name
+ * @guid: the EFI variable vendor's guid
+ * @head: head of the variable list
+ * @remove: should we remove the entry from the list?
+ *
+ * Search for an entry on the variable list that has the EFI variable
+ * name @name and vendor guid @guid. If an entry is found on the list
+ * and @remove is true, the entry is removed from the list.
+ *
+ * The caller MUST call efivar_entry_iter_begin() and
+ * efivar_entry_iter_end() before and after the invocation of this
+ * function, respectively.
+ *
+ * Returns the entry if found on the list, %NULL otherwise.
+ */
+struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
+ struct list_head *head, bool remove)
+{
+ struct efivar_entry *entry, *n;
+ int strsize1, strsize2;
+ bool found = false;
+
+ list_for_each_entry_safe(entry, n, head, list) {
+ strsize1 = ucs2_strsize(name, 1024);
+ strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
+ if (strsize1 == strsize2 &&
+ !memcmp(name, &(entry->var.VariableName), strsize1) &&
+ !efi_guidcmp(guid, entry->var.VendorGuid)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return NULL;
+
+ if (remove) {
+ if (entry->scanning) {
+ /*
+ * The entry will be deleted
+ * after scanning is completed.
+ */
+ entry->deleting = true;
+ } else
+ list_del(&entry->list);
+ }
+
+ return entry;
+}
+EXPORT_SYMBOL_GPL(efivar_entry_find);
+
+/**
+ * efivar_entry_size - obtain the size of a variable
+ * @entry: entry for this variable
+ * @size: location to store the variable's size
+ */
+int efivar_entry_size(struct efivar_entry *entry, unsigned long *size)
+{
+ const struct efivar_operations *ops;
+ efi_status_t status;
+
+ *size = 0;
+
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+ if (!__efivars) {
+ up(&efivars_lock);
+ return -EINVAL;
+ }
+ ops = __efivars->ops;
+ status = ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid, NULL, size, NULL);
+ up(&efivars_lock);
+
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return efi_status_to_err(status);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(efivar_entry_size);
+
+/**
+ * __efivar_entry_get - call get_variable()
+ * @entry: read data for this variable
+ * @attributes: variable attributes
+ * @size: size of @data buffer
+ * @data: buffer to store variable data
+ *
+ * The caller MUST call efivar_entry_iter_begin() and
+ * efivar_entry_iter_end() before and after the invocation of this
+ * function, respectively.
+ */
+int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+ unsigned long *size, void *data)
+{
+ efi_status_t status;
+
+ if (!__efivars)
+ return -EINVAL;
+
+ status = __efivars->ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ attributes, size, data);
+
+ return efi_status_to_err(status);
+}
+EXPORT_SYMBOL_GPL(__efivar_entry_get);
+
+/**
+ * efivar_entry_get - call get_variable()
+ * @entry: read data for this variable
+ * @attributes: variable attributes
+ * @size: size of @data buffer
+ * @data: buffer to store variable data
+ */
+int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+ unsigned long *size, void *data)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+
+ if (!__efivars) {
+ up(&efivars_lock);
+ return -EINVAL;
+ }
+
+ status = __efivars->ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ attributes, size, data);
+ up(&efivars_lock);
+
+ return efi_status_to_err(status);
+}
+EXPORT_SYMBOL_GPL(efivar_entry_get);
+
+/**
+ * efivar_entry_set_get_size - call set_variable() and get new size (atomic)
+ * @entry: entry containing variable to set and get
+ * @attributes: attributes of variable to be written
+ * @size: size of data buffer
+ * @data: buffer containing data to write
+ * @set: did the set_variable() call succeed?
+ *
+ * This is a pretty special (complex) function. See efivarfs_file_write().
+ *
+ * Atomically call set_variable() for @entry and if the call is
+ * successful, return the new size of the variable from get_variable()
+ * in @size. The success of set_variable() is indicated by @set.
+ *
+ * Returns 0 on success, -EINVAL if the variable data is invalid,
+ * -ENOSPC if the firmware does not have enough available space, or a
+ * converted EFI status code if either of set_variable() or
+ * get_variable() fail.
+ *
+ * If the EFI variable does not exist when calling set_variable()
+ * (EFI_NOT_FOUND), @entry is removed from the variable list.
+ */
+int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
+ unsigned long *size, void *data, bool *set)
+{
+ const struct efivar_operations *ops;
+ efi_char16_t *name = entry->var.VariableName;
+ efi_guid_t *vendor = &entry->var.VendorGuid;
+ efi_status_t status;
+ int err;
+
+ *set = false;
+
+ if (efivar_validate(*vendor, name, data, *size) == false)
+ return -EINVAL;
+
+ /*
+ * The lock here protects the get_variable call, the conditional
+ * set_variable call, and removal of the variable from the efivars
+ * list (in the case of an authenticated delete).
+ */
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+
+ if (!__efivars) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Ensure that the available space hasn't shrunk below the safe level
+ */
+ status = check_var_size(attributes, *size + ucs2_strsize(name, 1024));
+ if (status != EFI_SUCCESS) {
+ if (status != EFI_UNSUPPORTED) {
+ err = efi_status_to_err(status);
+ goto out;
+ }
+
+ if (*size > 65536) {
+ err = -ENOSPC;
+ goto out;
+ }
+ }
+
+ ops = __efivars->ops;
+
+ status = ops->set_variable(name, vendor, attributes, *size, data);
+ if (status != EFI_SUCCESS) {
+ err = efi_status_to_err(status);
+ goto out;
+ }
+
+ *set = true;
+
+ /*
+ * Writing to the variable may have caused a change in size (which
+ * could either be an append or an overwrite), or the variable to be
+ * deleted. Perform a GetVariable() so we can tell what actually
+ * happened.
+ */
+ *size = 0;
+ status = ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ NULL, size, NULL);
+
+ if (status == EFI_NOT_FOUND)
+ efivar_entry_list_del_unlock(entry);
+ else
+ up(&efivars_lock);
+
+ if (status && status != EFI_BUFFER_TOO_SMALL)
+ return efi_status_to_err(status);
+
+ return 0;
+
+out:
+ up(&efivars_lock);
+ return err;
+
+}
+EXPORT_SYMBOL_GPL(efivar_entry_set_get_size);
+
+/**
+ * efivar_entry_iter_begin - begin iterating the variable list
+ *
+ * Lock the variable list to prevent entry insertion and removal until
+ * efivar_entry_iter_end() is called. This function is usually used in
+ * conjunction with __efivar_entry_iter() or efivar_entry_iter().
+ */
+int efivar_entry_iter_begin(void)
+{
+ return down_interruptible(&efivars_lock);
+}
+EXPORT_SYMBOL_GPL(efivar_entry_iter_begin);
+
+/**
+ * efivar_entry_iter_end - finish iterating the variable list
+ *
+ * Unlock the variable list and allow modifications to the list again.
+ */
+void efivar_entry_iter_end(void)
+{
+ up(&efivars_lock);
+}
+EXPORT_SYMBOL_GPL(efivar_entry_iter_end);
+
+/**
+ * __efivar_entry_iter - iterate over variable list
+ * @func: callback function
+ * @head: head of the variable list
+ * @data: function-specific data to pass to callback
+ * @prev: entry to begin iterating from
+ *
+ * Iterate over the list of EFI variables and call @func with every
+ * entry on the list. It is safe for @func to remove entries in the
+ * list via efivar_entry_delete().
+ *
+ * You MUST call efivar_entry_iter_begin() before this function, and
+ * efivar_entry_iter_end() afterwards.
+ *
+ * It is possible to begin iteration from an arbitrary entry within
+ * the list by passing @prev. @prev is updated on return to point to
+ * the last entry passed to @func. To begin iterating from the
+ * beginning of the list @prev must be %NULL.
+ *
+ * The restrictions for @func are the same as documented for
+ * efivar_entry_iter().
+ */
+int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
+ struct list_head *head, void *data,
+ struct efivar_entry **prev)
+{
+ struct efivar_entry *entry, *n;
+ int err = 0;
+
+ if (!prev || !*prev) {
+ list_for_each_entry_safe(entry, n, head, list) {
+ err = func(entry, data);
+ if (err)
+ break;
+ }
+
+ if (prev)
+ *prev = entry;
+
+ return err;
+ }
+
+
+ list_for_each_entry_safe_continue((*prev), n, head, list) {
+ err = func(*prev, data);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(__efivar_entry_iter);
+
+/**
+ * efivar_entry_iter - iterate over variable list
+ * @func: callback function
+ * @head: head of variable list
+ * @data: function-specific data to pass to callback
+ *
+ * Iterate over the list of EFI variables and call @func with every
+ * entry on the list. It is safe for @func to remove entries in the
+ * list via efivar_entry_delete() while iterating.
+ *
+ * Some notes for the callback function:
+ * - a non-zero return value indicates an error and terminates the loop
+ * - @func is called from atomic context
+ */
+int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
+ struct list_head *head, void *data)
+{
+ int err = 0;
+
+ err = efivar_entry_iter_begin();
+ if (err)
+ return err;
+ err = __efivar_entry_iter(func, head, data, NULL);
+ efivar_entry_iter_end();
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(efivar_entry_iter);
+
+/**
+ * efivars_kobject - get the kobject for the registered efivars
+ *
+ * If efivars_register() has not been called we return NULL,
+ * otherwise return the kobject used at registration time.
+ */
+struct kobject *efivars_kobject(void)
+{
+ if (!__efivars)
+ return NULL;
+
+ return __efivars->kobject;
+}
+EXPORT_SYMBOL_GPL(efivars_kobject);
+
+/**
+ * efivars_register - register an efivars
+ * @efivars: efivars to register
+ * @ops: efivars operations
+ * @kobject: @efivars-specific kobject
+ *
+ * Only a single efivars can be registered at any time.
+ */
+int efivars_register(struct efivars *efivars,
+ const struct efivar_operations *ops,
+ struct kobject *kobject)
+{
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+
+ efivars->ops = ops;
+ efivars->kobject = kobject;
+
+ __efivars = efivars;
+
+ pr_info("Registered efivars operations\n");
+
+ up(&efivars_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(efivars_register);
+
+/**
+ * efivars_unregister - unregister an efivars
+ * @efivars: efivars to unregister
+ *
+ * The caller must have already removed every entry from the list,
+ * failure to do so is an error.
+ */
+int efivars_unregister(struct efivars *efivars)
+{
+ int rv;
+
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+
+ if (!__efivars) {
+ printk(KERN_ERR "efivars not registered\n");
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (__efivars != efivars) {
+ rv = -EINVAL;
+ goto out;
+ }
+
+ pr_info("Unregistered efivars operations\n");
+ __efivars = NULL;
+
+ rv = 0;
+out:
+ up(&efivars_lock);
+ return rv;
+}
+EXPORT_SYMBOL_GPL(efivars_unregister);
+
+int efivar_supports_writes(void)
+{
+ return __efivars && __efivars->ops->set_variable;
+}
+EXPORT_SYMBOL_GPL(efivar_supports_writes);
diff --git a/drivers/firmware/efi/x86_fake_mem.c b/drivers/firmware/efi/x86_fake_mem.c
new file mode 100644
index 000000000..0bafcc1bb
--- /dev/null
+++ b/drivers/firmware/efi/x86_fake_mem.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights reserved. */
+#include <linux/efi.h>
+#include <asm/e820/api.h>
+#include "fake_mem.h"
+
+void __init efi_fake_memmap_early(void)
+{
+ int i;
+
+ /*
+ * The late efi_fake_mem() call can handle all requests if
+ * EFI_MEMORY_SP support is disabled.
+ */
+ if (!efi_soft_reserve_enabled())
+ return;
+
+ if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
+ return;
+
+ /*
+ * Given that efi_fake_memmap() needs to perform memblock
+ * allocations it needs to run after e820__memblock_setup().
+ * However, if efi_fake_mem specifies EFI_MEMORY_SP for a given
+ * address range that potentially needs to mark the memory as
+ * reserved prior to e820__memblock_setup(). Update e820
+ * directly if EFI_MEMORY_SP is specified for an
+ * EFI_CONVENTIONAL_MEMORY descriptor.
+ */
+ for (i = 0; i < nr_fake_mem; i++) {
+ struct efi_mem_range *mem = &efi_fake_mems[i];
+ efi_memory_desc_t *md;
+ u64 m_start, m_end;
+
+ if ((mem->attribute & EFI_MEMORY_SP) == 0)
+ continue;
+
+ m_start = mem->range.start;
+ m_end = mem->range.end;
+ for_each_efi_memory_desc(md) {
+ u64 start, end, size;
+
+ if (md->type != EFI_CONVENTIONAL_MEMORY)
+ continue;
+
+ start = md->phys_addr;
+ end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+
+ if (m_start <= end && m_end >= start)
+ /* fake range overlaps descriptor */;
+ else
+ continue;
+
+ /*
+ * Trim the boundary of the e820 update to the
+ * descriptor in case the fake range overlaps
+ * !EFI_CONVENTIONAL_MEMORY
+ */
+ start = max(start, m_start);
+ end = min(end, m_end);
+ size = end - start + 1;
+
+ if (end <= start)
+ continue;
+
+ /*
+ * Ensure each efi_fake_mem instance results in
+ * a unique e820 resource
+ */
+ e820__range_remove(start, size, E820_TYPE_RAM, 1);
+ e820__range_add(start, size, E820_TYPE_SOFT_RESERVED);
+ e820__update_table(e820_table);
+ }
+ }
+}
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
new file mode 100644
index 000000000..983e07dc0
--- /dev/null
+++ b/drivers/firmware/google/Kconfig
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig GOOGLE_FIRMWARE
+ bool "Google Firmware Drivers"
+ default n
+ help
+ These firmware drivers are used by Google servers,
+ Chromebooks and other devices using coreboot firmware.
+ If in doubt, say "N".
+
+if GOOGLE_FIRMWARE
+
+config GOOGLE_SMI
+ tristate "SMI interface for Google platforms"
+ depends on X86 && ACPI && DMI
+ help
+ Say Y here if you want to enable SMI callbacks for Google
+ platforms. This provides an interface for writing to and
+ clearing the event log. If CONFIG_EFI is also enabled this
+ driver provides an interface for reading and writing NVRAM
+ variables.
+
+config GOOGLE_COREBOOT_TABLE
+ tristate "Coreboot Table Access"
+ depends on HAS_IOMEM && (ACPI || OF)
+ help
+ This option enables the coreboot_table module, which provides other
+ firmware modules access to the coreboot table. The coreboot table
+ pointer is accessed through the ACPI "GOOGCB00" object or the
+ device tree node /firmware/coreboot.
+ If unsure say N.
+
+config GOOGLE_COREBOOT_TABLE_ACPI
+ tristate
+ select GOOGLE_COREBOOT_TABLE
+
+config GOOGLE_COREBOOT_TABLE_OF
+ tristate
+ select GOOGLE_COREBOOT_TABLE
+
+config GOOGLE_MEMCONSOLE
+ tristate
+ depends on GOOGLE_MEMCONSOLE_X86_LEGACY || GOOGLE_MEMCONSOLE_COREBOOT
+
+config GOOGLE_MEMCONSOLE_X86_LEGACY
+ tristate "Firmware Memory Console - X86 Legacy support"
+ depends on X86 && ACPI && DMI
+ select GOOGLE_MEMCONSOLE
+ help
+ This option enables the kernel to search for a firmware log in
+ the EBDA on Google servers. If found, this log is exported to
+ userland in the file /sys/firmware/log.
+
+config GOOGLE_FRAMEBUFFER_COREBOOT
+ tristate "Coreboot Framebuffer"
+ depends on FB_SIMPLE
+ depends on GOOGLE_COREBOOT_TABLE
+ help
+ This option enables the kernel to search for a framebuffer in
+ the coreboot table. If found, it is registered with simplefb.
+
+config GOOGLE_MEMCONSOLE_COREBOOT
+ tristate "Firmware Memory Console"
+ depends on GOOGLE_COREBOOT_TABLE
+ select GOOGLE_MEMCONSOLE
+ help
+ This option enables the kernel to search for a firmware log in
+ the coreboot table. If found, this log is exported to userland
+ in the file /sys/firmware/log.
+
+config GOOGLE_VPD
+ tristate "Vital Product Data"
+ depends on GOOGLE_COREBOOT_TABLE
+ help
+ This option enables the kernel to expose the content of Google VPD
+ under /sys/firmware/vpd.
+
+endif # GOOGLE_FIRMWARE
diff --git a/drivers/firmware/google/Makefile b/drivers/firmware/google/Makefile
new file mode 100644
index 000000000..d17caded5
--- /dev/null
+++ b/drivers/firmware/google/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_GOOGLE_SMI) += gsmi.o
+obj-$(CONFIG_GOOGLE_COREBOOT_TABLE) += coreboot_table.o
+obj-$(CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT) += framebuffer-coreboot.o
+obj-$(CONFIG_GOOGLE_MEMCONSOLE) += memconsole.o
+obj-$(CONFIG_GOOGLE_MEMCONSOLE_COREBOOT) += memconsole-coreboot.o
+obj-$(CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY) += memconsole-x86-legacy.o
+
+vpd-sysfs-y := vpd.o vpd_decode.o
+obj-$(CONFIG_GOOGLE_VPD) += vpd-sysfs.o
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
new file mode 100644
index 000000000..568074148
--- /dev/null
+++ b/drivers/firmware/google/coreboot_table.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * coreboot_table.c
+ *
+ * Module providing coreboot table access.
+ *
+ * Copyright 2017 Google Inc.
+ * Copyright 2017 Samuel Holland <samuel@sholland.org>
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "coreboot_table.h"
+
+#define CB_DEV(d) container_of(d, struct coreboot_device, dev)
+#define CB_DRV(d) container_of(d, struct coreboot_driver, drv)
+
+static int coreboot_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct coreboot_device *device = CB_DEV(dev);
+ struct coreboot_driver *driver = CB_DRV(drv);
+
+ return device->entry.tag == driver->tag;
+}
+
+static int coreboot_bus_probe(struct device *dev)
+{
+ int ret = -ENODEV;
+ struct coreboot_device *device = CB_DEV(dev);
+ struct coreboot_driver *driver = CB_DRV(dev->driver);
+
+ if (driver->probe)
+ ret = driver->probe(device);
+
+ return ret;
+}
+
+static int coreboot_bus_remove(struct device *dev)
+{
+ int ret = 0;
+ struct coreboot_device *device = CB_DEV(dev);
+ struct coreboot_driver *driver = CB_DRV(dev->driver);
+
+ if (driver->remove)
+ ret = driver->remove(device);
+
+ return ret;
+}
+
+static struct bus_type coreboot_bus_type = {
+ .name = "coreboot",
+ .match = coreboot_bus_match,
+ .probe = coreboot_bus_probe,
+ .remove = coreboot_bus_remove,
+};
+
+static void coreboot_device_release(struct device *dev)
+{
+ struct coreboot_device *device = CB_DEV(dev);
+
+ kfree(device);
+}
+
+int coreboot_driver_register(struct coreboot_driver *driver)
+{
+ driver->drv.bus = &coreboot_bus_type;
+
+ return driver_register(&driver->drv);
+}
+EXPORT_SYMBOL(coreboot_driver_register);
+
+void coreboot_driver_unregister(struct coreboot_driver *driver)
+{
+ driver_unregister(&driver->drv);
+}
+EXPORT_SYMBOL(coreboot_driver_unregister);
+
+static int coreboot_table_populate(struct device *dev, void *ptr)
+{
+ int i, ret;
+ void *ptr_entry;
+ struct coreboot_device *device;
+ struct coreboot_table_entry *entry;
+ struct coreboot_table_header *header = ptr;
+
+ ptr_entry = ptr + header->header_bytes;
+ for (i = 0; i < header->table_entries; i++) {
+ entry = ptr_entry;
+
+ device = kzalloc(sizeof(struct device) + entry->size, GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ dev_set_name(&device->dev, "coreboot%d", i);
+ device->dev.parent = dev;
+ device->dev.bus = &coreboot_bus_type;
+ device->dev.release = coreboot_device_release;
+ memcpy(&device->entry, ptr_entry, entry->size);
+
+ ret = device_register(&device->dev);
+ if (ret) {
+ put_device(&device->dev);
+ return ret;
+ }
+
+ ptr_entry += entry->size;
+ }
+
+ return 0;
+}
+
+static int coreboot_table_probe(struct platform_device *pdev)
+{
+ resource_size_t len;
+ struct coreboot_table_header *header;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ void *ptr;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ len = resource_size(res);
+ if (!res->start || !len)
+ return -EINVAL;
+
+ /* Check just the header first to make sure things are sane */
+ header = memremap(res->start, sizeof(*header), MEMREMAP_WB);
+ if (!header)
+ return -ENOMEM;
+
+ len = header->header_bytes + header->table_bytes;
+ ret = strncmp(header->signature, "LBIO", sizeof(header->signature));
+ memunmap(header);
+ if (ret) {
+ dev_warn(dev, "coreboot table missing or corrupt!\n");
+ return -ENODEV;
+ }
+
+ ptr = memremap(res->start, len, MEMREMAP_WB);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = coreboot_table_populate(dev, ptr);
+
+ memunmap(ptr);
+
+ return ret;
+}
+
+static int __cb_dev_unregister(struct device *dev, void *dummy)
+{
+ device_unregister(dev);
+ return 0;
+}
+
+static int coreboot_table_remove(struct platform_device *pdev)
+{
+ bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister);
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_coreboot_acpi_match[] = {
+ { "GOOGCB00", 0 },
+ { "BOOT0000", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_coreboot_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id coreboot_of_match[] = {
+ { .compatible = "coreboot" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, coreboot_of_match);
+#endif
+
+static struct platform_driver coreboot_table_driver = {
+ .probe = coreboot_table_probe,
+ .remove = coreboot_table_remove,
+ .driver = {
+ .name = "coreboot_table",
+ .acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
+ .of_match_table = of_match_ptr(coreboot_of_match),
+ },
+};
+
+static int __init coreboot_table_driver_init(void)
+{
+ int ret;
+
+ ret = bus_register(&coreboot_bus_type);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&coreboot_table_driver);
+ if (ret) {
+ bus_unregister(&coreboot_bus_type);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit coreboot_table_driver_exit(void)
+{
+ platform_driver_unregister(&coreboot_table_driver);
+ bus_unregister(&coreboot_bus_type);
+}
+
+module_init(coreboot_table_driver_init);
+module_exit(coreboot_table_driver_exit);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
new file mode 100644
index 000000000..7b7b4a6ee
--- /dev/null
+++ b/drivers/firmware/google/coreboot_table.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * coreboot_table.h
+ *
+ * Internal header for coreboot table access.
+ *
+ * Copyright 2014 Gerd Hoffmann <kraxel@redhat.com>
+ * Copyright 2017 Google Inc.
+ * Copyright 2017 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef __COREBOOT_TABLE_H
+#define __COREBOOT_TABLE_H
+
+#include <linux/device.h>
+
+/* Coreboot table header structure */
+struct coreboot_table_header {
+ char signature[4];
+ u32 header_bytes;
+ u32 header_checksum;
+ u32 table_bytes;
+ u32 table_checksum;
+ u32 table_entries;
+};
+
+/* List of coreboot entry structures that is used */
+/* Generic */
+struct coreboot_table_entry {
+ u32 tag;
+ u32 size;
+};
+
+/* Points to a CBMEM entry */
+struct lb_cbmem_ref {
+ u32 tag;
+ u32 size;
+
+ u64 cbmem_addr;
+};
+
+/* Describes framebuffer setup by coreboot */
+struct lb_framebuffer {
+ u32 tag;
+ u32 size;
+
+ u64 physical_address;
+ u32 x_resolution;
+ u32 y_resolution;
+ u32 bytes_per_line;
+ u8 bits_per_pixel;
+ u8 red_mask_pos;
+ u8 red_mask_size;
+ u8 green_mask_pos;
+ u8 green_mask_size;
+ u8 blue_mask_pos;
+ u8 blue_mask_size;
+ u8 reserved_mask_pos;
+ u8 reserved_mask_size;
+};
+
+/* A device, additionally with information from coreboot. */
+struct coreboot_device {
+ struct device dev;
+ union {
+ struct coreboot_table_entry entry;
+ struct lb_cbmem_ref cbmem_ref;
+ struct lb_framebuffer framebuffer;
+ };
+};
+
+/* A driver for handling devices described in coreboot tables. */
+struct coreboot_driver {
+ int (*probe)(struct coreboot_device *);
+ int (*remove)(struct coreboot_device *);
+ struct device_driver drv;
+ u32 tag;
+};
+
+/* Register a driver that uses the data from a coreboot table. */
+int coreboot_driver_register(struct coreboot_driver *driver);
+
+/* Unregister a driver that uses the data from a coreboot table. */
+void coreboot_driver_unregister(struct coreboot_driver *driver);
+
+/* module_coreboot_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_coreboot_driver(__coreboot_driver) \
+ module_driver(__coreboot_driver, coreboot_driver_register, \
+ coreboot_driver_unregister)
+
+#endif /* __COREBOOT_TABLE_H */
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
new file mode 100644
index 000000000..922c079d1
--- /dev/null
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * framebuffer-coreboot.c
+ *
+ * Memory based framebuffer accessed through coreboot table.
+ *
+ * Copyright 2012-2013 David Herrmann <dh.herrmann@gmail.com>
+ * Copyright 2017 Google Inc.
+ * Copyright 2017 Samuel Holland <samuel@sholland.org>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/platform_device.h>
+
+#include "coreboot_table.h"
+
+#define CB_TAG_FRAMEBUFFER 0x12
+
+static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
+
+static int framebuffer_probe(struct coreboot_device *dev)
+{
+ int i;
+ u32 length;
+ struct lb_framebuffer *fb = &dev->framebuffer;
+ struct platform_device *pdev;
+ struct resource res;
+ struct simplefb_platform_data pdata = {
+ .width = fb->x_resolution,
+ .height = fb->y_resolution,
+ .stride = fb->bytes_per_line,
+ .format = NULL,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (fb->bits_per_pixel == formats[i].bits_per_pixel &&
+ fb->red_mask_pos == formats[i].red.offset &&
+ fb->red_mask_size == formats[i].red.length &&
+ fb->green_mask_pos == formats[i].green.offset &&
+ fb->green_mask_size == formats[i].green.length &&
+ fb->blue_mask_pos == formats[i].blue.offset &&
+ fb->blue_mask_size == formats[i].blue.length)
+ pdata.format = formats[i].name;
+ }
+ if (!pdata.format)
+ return -ENODEV;
+
+ memset(&res, 0, sizeof(res));
+ res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res.name = "Coreboot Framebuffer";
+ res.start = fb->physical_address;
+ length = PAGE_ALIGN(fb->y_resolution * fb->bytes_per_line);
+ res.end = res.start + length - 1;
+ if (res.end <= res.start)
+ return -EINVAL;
+
+ pdev = platform_device_register_resndata(&dev->dev,
+ "simple-framebuffer", 0,
+ &res, 1, &pdata,
+ sizeof(pdata));
+ if (IS_ERR(pdev))
+ pr_warn("coreboot: could not register framebuffer\n");
+ else
+ dev_set_drvdata(&dev->dev, pdev);
+
+ return PTR_ERR_OR_ZERO(pdev);
+}
+
+static int framebuffer_remove(struct coreboot_device *dev)
+{
+ struct platform_device *pdev = dev_get_drvdata(&dev->dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static struct coreboot_driver framebuffer_driver = {
+ .probe = framebuffer_probe,
+ .remove = framebuffer_remove,
+ .drv = {
+ .name = "framebuffer",
+ },
+ .tag = CB_TAG_FRAMEBUFFER,
+};
+module_coreboot_driver(framebuffer_driver);
+
+MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
new file mode 100644
index 000000000..407cac71c
--- /dev/null
+++ b/drivers/firmware/google/gsmi.c
@@ -0,0 +1,1081 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2010 Google Inc. All Rights Reserved.
+ * Author: dlaurie@google.com (Duncan Laurie)
+ *
+ * Re-worked to expose sysfs APIs by mikew@google.com (Mike Waychison)
+ *
+ * EFI SMI interface for Google platforms
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/ioctl.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/dmi.h>
+#include <linux/kdebug.h>
+#include <linux/reboot.h>
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/ucs2_string.h>
+#include <linux/suspend.h>
+
+#define GSMI_SHUTDOWN_CLEAN 0 /* Clean Shutdown */
+/* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */
+#define GSMI_SHUTDOWN_NMIWDT 1 /* NMI Watchdog */
+#define GSMI_SHUTDOWN_PANIC 2 /* Panic */
+#define GSMI_SHUTDOWN_OOPS 3 /* Oops */
+#define GSMI_SHUTDOWN_DIE 4 /* Die -- No longer meaningful */
+#define GSMI_SHUTDOWN_MCE 5 /* Machine Check */
+#define GSMI_SHUTDOWN_SOFTWDT 6 /* Software Watchdog */
+#define GSMI_SHUTDOWN_MBE 7 /* Uncorrected ECC */
+#define GSMI_SHUTDOWN_TRIPLE 8 /* Triple Fault */
+
+#define DRIVER_VERSION "1.0"
+#define GSMI_GUID_SIZE 16
+#define GSMI_BUF_SIZE 1024
+#define GSMI_BUF_ALIGN sizeof(u64)
+#define GSMI_CALLBACK 0xef
+
+/* SMI return codes */
+#define GSMI_SUCCESS 0x00
+#define GSMI_UNSUPPORTED2 0x03
+#define GSMI_LOG_FULL 0x0b
+#define GSMI_VAR_NOT_FOUND 0x0e
+#define GSMI_HANDSHAKE_SPIN 0x7d
+#define GSMI_HANDSHAKE_CF 0x7e
+#define GSMI_HANDSHAKE_NONE 0x7f
+#define GSMI_INVALID_PARAMETER 0x82
+#define GSMI_UNSUPPORTED 0x83
+#define GSMI_BUFFER_TOO_SMALL 0x85
+#define GSMI_NOT_READY 0x86
+#define GSMI_DEVICE_ERROR 0x87
+#define GSMI_NOT_FOUND 0x8e
+
+#define QUIRKY_BOARD_HASH 0x78a30a50
+
+/* Internally used commands passed to the firmware */
+#define GSMI_CMD_GET_NVRAM_VAR 0x01
+#define GSMI_CMD_GET_NEXT_VAR 0x02
+#define GSMI_CMD_SET_NVRAM_VAR 0x03
+#define GSMI_CMD_SET_EVENT_LOG 0x08
+#define GSMI_CMD_CLEAR_EVENT_LOG 0x09
+#define GSMI_CMD_LOG_S0IX_SUSPEND 0x0a
+#define GSMI_CMD_LOG_S0IX_RESUME 0x0b
+#define GSMI_CMD_CLEAR_CONFIG 0x20
+#define GSMI_CMD_HANDSHAKE_TYPE 0xC1
+#define GSMI_CMD_RESERVED 0xff
+
+/* Magic entry type for kernel events */
+#define GSMI_LOG_ENTRY_TYPE_KERNEL 0xDEAD
+
+/* SMI buffers must be in 32bit physical address space */
+struct gsmi_buf {
+ u8 *start; /* start of buffer */
+ size_t length; /* length of buffer */
+ dma_addr_t handle; /* dma allocation handle */
+ u32 address; /* physical address of buffer */
+};
+
+static struct gsmi_device {
+ struct platform_device *pdev; /* platform device */
+ struct gsmi_buf *name_buf; /* variable name buffer */
+ struct gsmi_buf *data_buf; /* generic data buffer */
+ struct gsmi_buf *param_buf; /* parameter buffer */
+ spinlock_t lock; /* serialize access to SMIs */
+ u16 smi_cmd; /* SMI command port */
+ int handshake_type; /* firmware handler interlock type */
+ struct dma_pool *dma_pool; /* DMA buffer pool */
+} gsmi_dev;
+
+/* Packed structures for communicating with the firmware */
+struct gsmi_nvram_var_param {
+ efi_guid_t guid;
+ u32 name_ptr;
+ u32 attributes;
+ u32 data_len;
+ u32 data_ptr;
+} __packed;
+
+struct gsmi_get_next_var_param {
+ u8 guid[GSMI_GUID_SIZE];
+ u32 name_ptr;
+ u32 name_len;
+} __packed;
+
+struct gsmi_set_eventlog_param {
+ u32 data_ptr;
+ u32 data_len;
+ u32 type;
+} __packed;
+
+/* Event log formats */
+struct gsmi_log_entry_type_1 {
+ u16 type;
+ u32 instance;
+} __packed;
+
+/*
+ * Some platforms don't have explicit SMI handshake
+ * and need to wait for SMI to complete.
+ */
+#define GSMI_DEFAULT_SPINCOUNT 0x10000
+static unsigned int spincount = GSMI_DEFAULT_SPINCOUNT;
+module_param(spincount, uint, 0600);
+MODULE_PARM_DESC(spincount,
+ "The number of loop iterations to use when using the spin handshake.");
+
+/*
+ * Platforms might not support S0ix logging in their GSMI handlers. In order to
+ * avoid any side-effects of generating an SMI for S0ix logging, use the S0ix
+ * related GSMI commands only for those platforms that explicitly enable this
+ * option.
+ */
+static bool s0ix_logging_enable;
+module_param(s0ix_logging_enable, bool, 0600);
+
+static struct gsmi_buf *gsmi_buf_alloc(void)
+{
+ struct gsmi_buf *smibuf;
+
+ smibuf = kzalloc(sizeof(*smibuf), GFP_KERNEL);
+ if (!smibuf) {
+ printk(KERN_ERR "gsmi: out of memory\n");
+ return NULL;
+ }
+
+ /* allocate buffer in 32bit address space */
+ smibuf->start = dma_pool_alloc(gsmi_dev.dma_pool, GFP_KERNEL,
+ &smibuf->handle);
+ if (!smibuf->start) {
+ printk(KERN_ERR "gsmi: failed to allocate name buffer\n");
+ kfree(smibuf);
+ return NULL;
+ }
+
+ /* fill in the buffer handle */
+ smibuf->length = GSMI_BUF_SIZE;
+ smibuf->address = (u32)virt_to_phys(smibuf->start);
+
+ return smibuf;
+}
+
+static void gsmi_buf_free(struct gsmi_buf *smibuf)
+{
+ if (smibuf) {
+ if (smibuf->start)
+ dma_pool_free(gsmi_dev.dma_pool, smibuf->start,
+ smibuf->handle);
+ kfree(smibuf);
+ }
+}
+
+/*
+ * Make a call to gsmi func(sub). GSMI error codes are translated to
+ * in-kernel errnos (0 on success, -ERRNO on error).
+ */
+static int gsmi_exec(u8 func, u8 sub)
+{
+ u16 cmd = (sub << 8) | func;
+ u16 result = 0;
+ int rc = 0;
+
+ /*
+ * AH : Subfunction number
+ * AL : Function number
+ * EBX : Parameter block address
+ * DX : SMI command port
+ *
+ * Three protocols here. See also the comment in gsmi_init().
+ */
+ if (gsmi_dev.handshake_type == GSMI_HANDSHAKE_CF) {
+ /*
+ * If handshake_type == HANDSHAKE_CF then set CF on the
+ * way in and wait for the handler to clear it; this avoids
+ * corrupting register state on those chipsets which have
+ * a delay between writing the SMI trigger register and
+ * entering SMM.
+ */
+ asm volatile (
+ "stc\n"
+ "outb %%al, %%dx\n"
+ "1: jc 1b\n"
+ : "=a" (result)
+ : "0" (cmd),
+ "d" (gsmi_dev.smi_cmd),
+ "b" (gsmi_dev.param_buf->address)
+ : "memory", "cc"
+ );
+ } else if (gsmi_dev.handshake_type == GSMI_HANDSHAKE_SPIN) {
+ /*
+ * If handshake_type == HANDSHAKE_SPIN we spin a
+ * hundred-ish usecs to ensure the SMI has triggered.
+ */
+ asm volatile (
+ "outb %%al, %%dx\n"
+ "1: loop 1b\n"
+ : "=a" (result)
+ : "0" (cmd),
+ "d" (gsmi_dev.smi_cmd),
+ "b" (gsmi_dev.param_buf->address),
+ "c" (spincount)
+ : "memory", "cc"
+ );
+ } else {
+ /*
+ * If handshake_type == HANDSHAKE_NONE we do nothing;
+ * either we don't need to or it's legacy firmware that
+ * doesn't understand the CF protocol.
+ */
+ asm volatile (
+ "outb %%al, %%dx\n\t"
+ : "=a" (result)
+ : "0" (cmd),
+ "d" (gsmi_dev.smi_cmd),
+ "b" (gsmi_dev.param_buf->address)
+ : "memory", "cc"
+ );
+ }
+
+ /* check return code from SMI handler */
+ switch (result) {
+ case GSMI_SUCCESS:
+ break;
+ case GSMI_VAR_NOT_FOUND:
+ /* not really an error, but let the caller know */
+ rc = 1;
+ break;
+ case GSMI_INVALID_PARAMETER:
+ printk(KERN_ERR "gsmi: exec 0x%04x: Invalid parameter\n", cmd);
+ rc = -EINVAL;
+ break;
+ case GSMI_BUFFER_TOO_SMALL:
+ printk(KERN_ERR "gsmi: exec 0x%04x: Buffer too small\n", cmd);
+ rc = -ENOMEM;
+ break;
+ case GSMI_UNSUPPORTED:
+ case GSMI_UNSUPPORTED2:
+ if (sub != GSMI_CMD_HANDSHAKE_TYPE)
+ printk(KERN_ERR "gsmi: exec 0x%04x: Not supported\n",
+ cmd);
+ rc = -ENOSYS;
+ break;
+ case GSMI_NOT_READY:
+ printk(KERN_ERR "gsmi: exec 0x%04x: Not ready\n", cmd);
+ rc = -EBUSY;
+ break;
+ case GSMI_DEVICE_ERROR:
+ printk(KERN_ERR "gsmi: exec 0x%04x: Device error\n", cmd);
+ rc = -EFAULT;
+ break;
+ case GSMI_NOT_FOUND:
+ printk(KERN_ERR "gsmi: exec 0x%04x: Data not found\n", cmd);
+ rc = -ENOENT;
+ break;
+ case GSMI_LOG_FULL:
+ printk(KERN_ERR "gsmi: exec 0x%04x: Log full\n", cmd);
+ rc = -ENOSPC;
+ break;
+ case GSMI_HANDSHAKE_CF:
+ case GSMI_HANDSHAKE_SPIN:
+ case GSMI_HANDSHAKE_NONE:
+ rc = result;
+ break;
+ default:
+ printk(KERN_ERR "gsmi: exec 0x%04x: Unknown error 0x%04x\n",
+ cmd, result);
+ rc = -ENXIO;
+ }
+
+ return rc;
+}
+
+#ifdef CONFIG_EFI
+
+static struct efivars efivars;
+
+static efi_status_t gsmi_get_variable(efi_char16_t *name,
+ efi_guid_t *vendor, u32 *attr,
+ unsigned long *data_size,
+ void *data)
+{
+ struct gsmi_nvram_var_param param = {
+ .name_ptr = gsmi_dev.name_buf->address,
+ .data_ptr = gsmi_dev.data_buf->address,
+ .data_len = (u32)*data_size,
+ };
+ efi_status_t ret = EFI_SUCCESS;
+ unsigned long flags;
+ size_t name_len = ucs2_strnlen(name, GSMI_BUF_SIZE / 2);
+ int rc;
+
+ if (name_len >= GSMI_BUF_SIZE / 2)
+ return EFI_BAD_BUFFER_SIZE;
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ /* Vendor guid */
+ memcpy(&param.guid, vendor, sizeof(param.guid));
+
+ /* variable name, already in UTF-16 */
+ memset(gsmi_dev.name_buf->start, 0, gsmi_dev.name_buf->length);
+ memcpy(gsmi_dev.name_buf->start, name, name_len * 2);
+
+ /* data pointer */
+ memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
+
+ /* parameter buffer */
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+ memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+ rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_GET_NVRAM_VAR);
+ if (rc < 0) {
+ printk(KERN_ERR "gsmi: Get Variable failed\n");
+ ret = EFI_LOAD_ERROR;
+ } else if (rc == 1) {
+ /* variable was not found */
+ ret = EFI_NOT_FOUND;
+ } else {
+ /* Get the arguments back */
+ memcpy(&param, gsmi_dev.param_buf->start, sizeof(param));
+
+ /* The size reported is the min of all of our buffers */
+ *data_size = min_t(unsigned long, *data_size,
+ gsmi_dev.data_buf->length);
+ *data_size = min_t(unsigned long, *data_size, param.data_len);
+
+ /* Copy data back to return buffer. */
+ memcpy(data, gsmi_dev.data_buf->start, *data_size);
+
+ /* All variables are have the following attributes */
+ if (attr)
+ *attr = EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS;
+ }
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+ return ret;
+}
+
+static efi_status_t gsmi_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name,
+ efi_guid_t *vendor)
+{
+ struct gsmi_get_next_var_param param = {
+ .name_ptr = gsmi_dev.name_buf->address,
+ .name_len = gsmi_dev.name_buf->length,
+ };
+ efi_status_t ret = EFI_SUCCESS;
+ int rc;
+ unsigned long flags;
+
+ /* For the moment, only support buffers that exactly match in size */
+ if (*name_size != GSMI_BUF_SIZE)
+ return EFI_BAD_BUFFER_SIZE;
+
+ /* Let's make sure the thing is at least null-terminated */
+ if (ucs2_strnlen(name, GSMI_BUF_SIZE / 2) == GSMI_BUF_SIZE / 2)
+ return EFI_INVALID_PARAMETER;
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ /* guid */
+ memcpy(&param.guid, vendor, sizeof(param.guid));
+
+ /* variable name, already in UTF-16 */
+ memcpy(gsmi_dev.name_buf->start, name, *name_size);
+
+ /* parameter buffer */
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+ memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+ rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_GET_NEXT_VAR);
+ if (rc < 0) {
+ printk(KERN_ERR "gsmi: Get Next Variable Name failed\n");
+ ret = EFI_LOAD_ERROR;
+ } else if (rc == 1) {
+ /* variable not found -- end of list */
+ ret = EFI_NOT_FOUND;
+ } else {
+ /* copy variable data back to return buffer */
+ memcpy(&param, gsmi_dev.param_buf->start, sizeof(param));
+
+ /* Copy the name back */
+ memcpy(name, gsmi_dev.name_buf->start, GSMI_BUF_SIZE);
+ *name_size = ucs2_strnlen(name, GSMI_BUF_SIZE / 2) * 2;
+
+ /* copy guid to return buffer */
+ memcpy(vendor, &param.guid, sizeof(param.guid));
+ ret = EFI_SUCCESS;
+ }
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+ return ret;
+}
+
+static efi_status_t gsmi_set_variable(efi_char16_t *name,
+ efi_guid_t *vendor,
+ u32 attr,
+ unsigned long data_size,
+ void *data)
+{
+ struct gsmi_nvram_var_param param = {
+ .name_ptr = gsmi_dev.name_buf->address,
+ .data_ptr = gsmi_dev.data_buf->address,
+ .data_len = (u32)data_size,
+ .attributes = EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS,
+ };
+ size_t name_len = ucs2_strnlen(name, GSMI_BUF_SIZE / 2);
+ efi_status_t ret = EFI_SUCCESS;
+ int rc;
+ unsigned long flags;
+
+ if (name_len >= GSMI_BUF_SIZE / 2)
+ return EFI_BAD_BUFFER_SIZE;
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ /* guid */
+ memcpy(&param.guid, vendor, sizeof(param.guid));
+
+ /* variable name, already in UTF-16 */
+ memset(gsmi_dev.name_buf->start, 0, gsmi_dev.name_buf->length);
+ memcpy(gsmi_dev.name_buf->start, name, name_len * 2);
+
+ /* data pointer */
+ memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
+ memcpy(gsmi_dev.data_buf->start, data, data_size);
+
+ /* parameter buffer */
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+ memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+ rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_NVRAM_VAR);
+ if (rc < 0) {
+ printk(KERN_ERR "gsmi: Set Variable failed\n");
+ ret = EFI_INVALID_PARAMETER;
+ }
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+ return ret;
+}
+
+static const struct efivar_operations efivar_ops = {
+ .get_variable = gsmi_get_variable,
+ .set_variable = gsmi_set_variable,
+ .get_next_variable = gsmi_get_next_variable,
+};
+
+#endif /* CONFIG_EFI */
+
+static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct gsmi_set_eventlog_param param = {
+ .data_ptr = gsmi_dev.data_buf->address,
+ };
+ int rc = 0;
+ unsigned long flags;
+
+ /* Pull the type out */
+ if (count < sizeof(u32))
+ return -EINVAL;
+ param.type = *(u32 *)buf;
+ buf += sizeof(u32);
+
+ /* The remaining buffer is the data payload */
+ if ((count - sizeof(u32)) > gsmi_dev.data_buf->length)
+ return -EINVAL;
+ param.data_len = count - sizeof(u32);
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ /* data pointer */
+ memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
+ memcpy(gsmi_dev.data_buf->start, buf, param.data_len);
+
+ /* parameter buffer */
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+ memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+ rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_EVENT_LOG);
+ if (rc < 0)
+ printk(KERN_ERR "gsmi: Set Event Log failed\n");
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+ return (rc == 0) ? count : rc;
+
+}
+
+static struct bin_attribute eventlog_bin_attr = {
+ .attr = {.name = "append_to_eventlog", .mode = 0200},
+ .write = eventlog_write,
+};
+
+static ssize_t gsmi_clear_eventlog_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ unsigned long flags;
+ unsigned long val;
+ struct {
+ u32 percentage;
+ u32 data_type;
+ } param;
+
+ rc = kstrtoul(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ /*
+ * Value entered is a percentage, 0 through 100, anything else
+ * is invalid.
+ */
+ if (val > 100)
+ return -EINVAL;
+
+ /* data_type here selects the smbios event log. */
+ param.percentage = val;
+ param.data_type = 0;
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ /* parameter buffer */
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+ memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+ rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_CLEAR_EVENT_LOG);
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+ if (rc)
+ return rc;
+ return count;
+}
+
+static struct kobj_attribute gsmi_clear_eventlog_attr = {
+ .attr = {.name = "clear_eventlog", .mode = 0200},
+ .store = gsmi_clear_eventlog_store,
+};
+
+static ssize_t gsmi_clear_config_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ /* clear parameter buffer */
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+
+ rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_CLEAR_CONFIG);
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+ if (rc)
+ return rc;
+ return count;
+}
+
+static struct kobj_attribute gsmi_clear_config_attr = {
+ .attr = {.name = "clear_config", .mode = 0200},
+ .store = gsmi_clear_config_store,
+};
+
+static const struct attribute *gsmi_attrs[] = {
+ &gsmi_clear_config_attr.attr,
+ &gsmi_clear_eventlog_attr.attr,
+ NULL,
+};
+
+static int gsmi_shutdown_reason(int reason)
+{
+ struct gsmi_log_entry_type_1 entry = {
+ .type = GSMI_LOG_ENTRY_TYPE_KERNEL,
+ .instance = reason,
+ };
+ struct gsmi_set_eventlog_param param = {
+ .data_len = sizeof(entry),
+ .type = 1,
+ };
+ static int saved_reason;
+ int rc = 0;
+ unsigned long flags;
+
+ /* avoid duplicate entries in the log */
+ if (saved_reason & (1 << reason))
+ return 0;
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ saved_reason |= (1 << reason);
+
+ /* data pointer */
+ memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
+ memcpy(gsmi_dev.data_buf->start, &entry, sizeof(entry));
+
+ /* parameter buffer */
+ param.data_ptr = gsmi_dev.data_buf->address;
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+ memcpy(gsmi_dev.param_buf->start, &param, sizeof(param));
+
+ rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_EVENT_LOG);
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+ if (rc < 0)
+ printk(KERN_ERR "gsmi: Log Shutdown Reason failed\n");
+ else
+ printk(KERN_EMERG "gsmi: Log Shutdown Reason 0x%02x\n",
+ reason);
+
+ return rc;
+}
+
+static int gsmi_reboot_callback(struct notifier_block *nb,
+ unsigned long reason, void *arg)
+{
+ gsmi_shutdown_reason(GSMI_SHUTDOWN_CLEAN);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block gsmi_reboot_notifier = {
+ .notifier_call = gsmi_reboot_callback
+};
+
+static int gsmi_die_callback(struct notifier_block *nb,
+ unsigned long reason, void *arg)
+{
+ if (reason == DIE_OOPS)
+ gsmi_shutdown_reason(GSMI_SHUTDOWN_OOPS);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block gsmi_die_notifier = {
+ .notifier_call = gsmi_die_callback
+};
+
+static int gsmi_panic_callback(struct notifier_block *nb,
+ unsigned long reason, void *arg)
+{
+
+ /*
+ * Panic callbacks are executed with all other CPUs stopped,
+ * so we must not attempt to spin waiting for gsmi_dev.lock
+ * to be released.
+ */
+ if (spin_is_locked(&gsmi_dev.lock))
+ return NOTIFY_DONE;
+
+ gsmi_shutdown_reason(GSMI_SHUTDOWN_PANIC);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block gsmi_panic_notifier = {
+ .notifier_call = gsmi_panic_callback,
+};
+
+/*
+ * This hash function was blatantly copied from include/linux/hash.h.
+ * It is used by this driver to obfuscate a board name that requires a
+ * quirk within this driver.
+ *
+ * Please do not remove this copy of the function as any changes to the
+ * global utility hash_64() function would break this driver's ability
+ * to identify a board and provide the appropriate quirk -- mikew@google.com
+ */
+static u64 __init local_hash_64(u64 val, unsigned bits)
+{
+ u64 hash = val;
+
+ /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
+ u64 n = hash;
+ n <<= 18;
+ hash -= n;
+ n <<= 33;
+ hash -= n;
+ n <<= 3;
+ hash += n;
+ n <<= 3;
+ hash -= n;
+ n <<= 4;
+ hash += n;
+ n <<= 2;
+ hash += n;
+
+ /* High bits are more random, so use them. */
+ return hash >> (64 - bits);
+}
+
+static u32 __init hash_oem_table_id(char s[8])
+{
+ u64 input;
+ memcpy(&input, s, 8);
+ return local_hash_64(input, 32);
+}
+
+static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
+ {
+ .ident = "Google Board",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
+ },
+ },
+ {
+ .ident = "Coreboot Firmware",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ },
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, gsmi_dmi_table);
+
+static __init int gsmi_system_valid(void)
+{
+ u32 hash;
+ u16 cmd, result;
+
+ if (!dmi_check_system(gsmi_dmi_table))
+ return -ENODEV;
+
+ /*
+ * Only newer firmware supports the gsmi interface. All older
+ * firmware that didn't support this interface used to plug the
+ * table name in the first four bytes of the oem_table_id field.
+ * Newer firmware doesn't do that though, so use that as the
+ * discriminant factor. We have to do this in order to
+ * whitewash our board names out of the public driver.
+ */
+ if (!strncmp(acpi_gbl_FADT.header.oem_table_id, "FACP", 4)) {
+ printk(KERN_INFO "gsmi: Board is too old\n");
+ return -ENODEV;
+ }
+
+ /* Disable on board with 1.0 BIOS due to Google bug 2602657 */
+ hash = hash_oem_table_id(acpi_gbl_FADT.header.oem_table_id);
+ if (hash == QUIRKY_BOARD_HASH) {
+ const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
+ if (strncmp(bios_ver, "1.0", 3) == 0) {
+ pr_info("gsmi: disabled on this board's BIOS %s\n",
+ bios_ver);
+ return -ENODEV;
+ }
+ }
+
+ /* check for valid SMI command port in ACPI FADT */
+ if (acpi_gbl_FADT.smi_command == 0) {
+ pr_info("gsmi: missing smi_command\n");
+ return -ENODEV;
+ }
+
+ /* Test the smihandler with a bogus command. If it leaves the
+ * calling argument in %ax untouched, there is no handler for
+ * GSMI commands.
+ */
+ cmd = GSMI_CALLBACK | GSMI_CMD_RESERVED << 8;
+ asm volatile (
+ "outb %%al, %%dx\n\t"
+ : "=a" (result)
+ : "0" (cmd),
+ "d" (acpi_gbl_FADT.smi_command)
+ : "memory", "cc"
+ );
+ if (cmd == result) {
+ pr_info("gsmi: no gsmi handler in firmware\n");
+ return -ENODEV;
+ }
+
+ /* Found */
+ return 0;
+}
+
+static struct kobject *gsmi_kobj;
+
+static const struct platform_device_info gsmi_dev_info = {
+ .name = "gsmi",
+ .id = -1,
+ /* SMI callbacks require 32bit addresses */
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+#ifdef CONFIG_PM
+static void gsmi_log_s0ix_info(u8 cmd)
+{
+ unsigned long flags;
+
+ /*
+ * If platform has not enabled S0ix logging, then no action is
+ * necessary.
+ */
+ if (!s0ix_logging_enable)
+ return;
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+
+ gsmi_exec(GSMI_CALLBACK, cmd);
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+}
+
+static int gsmi_log_s0ix_suspend(struct device *dev)
+{
+ /*
+ * If system is not suspending via firmware using the standard ACPI Sx
+ * types, then make a GSMI call to log the suspend info.
+ */
+ if (!pm_suspend_via_firmware())
+ gsmi_log_s0ix_info(GSMI_CMD_LOG_S0IX_SUSPEND);
+
+ /*
+ * Always return success, since we do not want suspend
+ * to fail just because of logging failure.
+ */
+ return 0;
+}
+
+static int gsmi_log_s0ix_resume(struct device *dev)
+{
+ /*
+ * If system did not resume via firmware, then make a GSMI call to log
+ * the resume info and wake source.
+ */
+ if (!pm_resume_via_firmware())
+ gsmi_log_s0ix_info(GSMI_CMD_LOG_S0IX_RESUME);
+
+ /*
+ * Always return success, since we do not want resume
+ * to fail just because of logging failure.
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops gsmi_pm_ops = {
+ .suspend_noirq = gsmi_log_s0ix_suspend,
+ .resume_noirq = gsmi_log_s0ix_resume,
+};
+
+static int gsmi_platform_driver_probe(struct platform_device *dev)
+{
+ return 0;
+}
+
+static struct platform_driver gsmi_driver_info = {
+ .driver = {
+ .name = "gsmi",
+ .pm = &gsmi_pm_ops,
+ },
+ .probe = gsmi_platform_driver_probe,
+};
+#endif
+
+static __init int gsmi_init(void)
+{
+ unsigned long flags;
+ int ret;
+
+ ret = gsmi_system_valid();
+ if (ret)
+ return ret;
+
+ gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command;
+
+#ifdef CONFIG_PM
+ ret = platform_driver_register(&gsmi_driver_info);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "gsmi: unable to register platform driver\n");
+ return ret;
+ }
+#endif
+
+ /* register device */
+ gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
+ if (IS_ERR(gsmi_dev.pdev)) {
+ printk(KERN_ERR "gsmi: unable to register platform device\n");
+ return PTR_ERR(gsmi_dev.pdev);
+ }
+
+ /* SMI access needs to be serialized */
+ spin_lock_init(&gsmi_dev.lock);
+
+ ret = -ENOMEM;
+ gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev,
+ GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0);
+ if (!gsmi_dev.dma_pool)
+ goto out_err;
+
+ /*
+ * pre-allocate buffers because sometimes we are called when
+ * this is not feasible: oops, panic, die, mce, etc
+ */
+ gsmi_dev.name_buf = gsmi_buf_alloc();
+ if (!gsmi_dev.name_buf) {
+ printk(KERN_ERR "gsmi: failed to allocate name buffer\n");
+ goto out_err;
+ }
+
+ gsmi_dev.data_buf = gsmi_buf_alloc();
+ if (!gsmi_dev.data_buf) {
+ printk(KERN_ERR "gsmi: failed to allocate data buffer\n");
+ goto out_err;
+ }
+
+ gsmi_dev.param_buf = gsmi_buf_alloc();
+ if (!gsmi_dev.param_buf) {
+ printk(KERN_ERR "gsmi: failed to allocate param buffer\n");
+ goto out_err;
+ }
+
+ /*
+ * Determine type of handshake used to serialize the SMI
+ * entry. See also gsmi_exec().
+ *
+ * There's a "behavior" present on some chipsets where writing the
+ * SMI trigger register in the southbridge doesn't result in an
+ * immediate SMI. Rather, the processor can execute "a few" more
+ * instructions before the SMI takes effect. To ensure synchronous
+ * behavior, implement a handshake between the kernel driver and the
+ * firmware handler to spin until released. This ioctl determines
+ * the type of handshake.
+ *
+ * NONE: The firmware handler does not implement any
+ * handshake. Either it doesn't need to, or it's legacy firmware
+ * that doesn't know it needs to and never will.
+ *
+ * CF: The firmware handler will clear the CF in the saved
+ * state before returning. The driver may set the CF and test for
+ * it to clear before proceeding.
+ *
+ * SPIN: The firmware handler does not implement any handshake
+ * but the driver should spin for a hundred or so microseconds
+ * to ensure the SMI has triggered.
+ *
+ * Finally, the handler will return -ENOSYS if
+ * GSMI_CMD_HANDSHAKE_TYPE is unimplemented, which implies
+ * HANDSHAKE_NONE.
+ */
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+ gsmi_dev.handshake_type = GSMI_HANDSHAKE_SPIN;
+ gsmi_dev.handshake_type =
+ gsmi_exec(GSMI_CALLBACK, GSMI_CMD_HANDSHAKE_TYPE);
+ if (gsmi_dev.handshake_type == -ENOSYS)
+ gsmi_dev.handshake_type = GSMI_HANDSHAKE_NONE;
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+
+ /* Remove and clean up gsmi if the handshake could not complete. */
+ if (gsmi_dev.handshake_type == -ENXIO) {
+ printk(KERN_INFO "gsmi version " DRIVER_VERSION
+ " failed to load\n");
+ ret = -ENODEV;
+ goto out_err;
+ }
+
+ /* Register in the firmware directory */
+ ret = -ENOMEM;
+ gsmi_kobj = kobject_create_and_add("gsmi", firmware_kobj);
+ if (!gsmi_kobj) {
+ printk(KERN_INFO "gsmi: Failed to create firmware kobj\n");
+ goto out_err;
+ }
+
+ /* Setup eventlog access */
+ ret = sysfs_create_bin_file(gsmi_kobj, &eventlog_bin_attr);
+ if (ret) {
+ printk(KERN_INFO "gsmi: Failed to setup eventlog");
+ goto out_err;
+ }
+
+ /* Other attributes */
+ ret = sysfs_create_files(gsmi_kobj, gsmi_attrs);
+ if (ret) {
+ printk(KERN_INFO "gsmi: Failed to add attrs");
+ goto out_remove_bin_file;
+ }
+
+#ifdef CONFIG_EFI
+ ret = efivars_register(&efivars, &efivar_ops, gsmi_kobj);
+ if (ret) {
+ printk(KERN_INFO "gsmi: Failed to register efivars\n");
+ sysfs_remove_files(gsmi_kobj, gsmi_attrs);
+ goto out_remove_bin_file;
+ }
+#endif
+
+ register_reboot_notifier(&gsmi_reboot_notifier);
+ register_die_notifier(&gsmi_die_notifier);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &gsmi_panic_notifier);
+
+ printk(KERN_INFO "gsmi version " DRIVER_VERSION " loaded\n");
+
+ return 0;
+
+out_remove_bin_file:
+ sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
+out_err:
+ kobject_put(gsmi_kobj);
+ gsmi_buf_free(gsmi_dev.param_buf);
+ gsmi_buf_free(gsmi_dev.data_buf);
+ gsmi_buf_free(gsmi_dev.name_buf);
+ dma_pool_destroy(gsmi_dev.dma_pool);
+ platform_device_unregister(gsmi_dev.pdev);
+ pr_info("gsmi: failed to load: %d\n", ret);
+#ifdef CONFIG_PM
+ platform_driver_unregister(&gsmi_driver_info);
+#endif
+ return ret;
+}
+
+static void __exit gsmi_exit(void)
+{
+ unregister_reboot_notifier(&gsmi_reboot_notifier);
+ unregister_die_notifier(&gsmi_die_notifier);
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &gsmi_panic_notifier);
+#ifdef CONFIG_EFI
+ efivars_unregister(&efivars);
+#endif
+
+ sysfs_remove_files(gsmi_kobj, gsmi_attrs);
+ sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
+ kobject_put(gsmi_kobj);
+ gsmi_buf_free(gsmi_dev.param_buf);
+ gsmi_buf_free(gsmi_dev.data_buf);
+ gsmi_buf_free(gsmi_dev.name_buf);
+ dma_pool_destroy(gsmi_dev.dma_pool);
+ platform_device_unregister(gsmi_dev.pdev);
+#ifdef CONFIG_PM
+ platform_driver_unregister(&gsmi_driver_info);
+#endif
+}
+
+module_init(gsmi_init);
+module_exit(gsmi_exit);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c
new file mode 100644
index 000000000..d17e4d6ac
--- /dev/null
+++ b/drivers/firmware/google/memconsole-coreboot.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * memconsole-coreboot.c
+ *
+ * Memory based BIOS console accessed through coreboot table.
+ *
+ * Copyright 2017 Google Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "memconsole.h"
+#include "coreboot_table.h"
+
+#define CB_TAG_CBMEM_CONSOLE 0x17
+
+/* CBMEM firmware console log descriptor. */
+struct cbmem_cons {
+ u32 size_dont_access_after_boot;
+ u32 cursor;
+ u8 body[];
+} __packed;
+
+#define CURSOR_MASK ((1 << 28) - 1)
+#define OVERFLOW (1 << 31)
+
+static struct cbmem_cons *cbmem_console;
+static u32 cbmem_console_size;
+
+/*
+ * The cbmem_console structure is read again on every access because it may
+ * change at any time if runtime firmware logs new messages. This may rarely
+ * lead to race conditions where the firmware overwrites the beginning of the
+ * ring buffer with more lines after we have already read |cursor|. It should be
+ * rare and harmless enough that we don't spend extra effort working around it.
+ */
+static ssize_t memconsole_coreboot_read(char *buf, loff_t pos, size_t count)
+{
+ u32 cursor = cbmem_console->cursor & CURSOR_MASK;
+ u32 flags = cbmem_console->cursor & ~CURSOR_MASK;
+ u32 size = cbmem_console_size;
+ struct seg { /* describes ring buffer segments in logical order */
+ u32 phys; /* physical offset from start of mem buffer */
+ u32 len; /* length of segment */
+ } seg[2] = { {0}, {0} };
+ size_t done = 0;
+ int i;
+
+ if (flags & OVERFLOW) {
+ if (cursor > size) /* Shouldn't really happen, but... */
+ cursor = 0;
+ seg[0] = (struct seg){.phys = cursor, .len = size - cursor};
+ seg[1] = (struct seg){.phys = 0, .len = cursor};
+ } else {
+ seg[0] = (struct seg){.phys = 0, .len = min(cursor, size)};
+ }
+
+ for (i = 0; i < ARRAY_SIZE(seg) && count > done; i++) {
+ done += memory_read_from_buffer(buf + done, count - done, &pos,
+ cbmem_console->body + seg[i].phys, seg[i].len);
+ pos -= seg[i].len;
+ }
+ return done;
+}
+
+static int memconsole_probe(struct coreboot_device *dev)
+{
+ struct cbmem_cons *tmp_cbmc;
+
+ tmp_cbmc = memremap(dev->cbmem_ref.cbmem_addr,
+ sizeof(*tmp_cbmc), MEMREMAP_WB);
+
+ if (!tmp_cbmc)
+ return -ENOMEM;
+
+ /* Read size only once to prevent overrun attack through /dev/mem. */
+ cbmem_console_size = tmp_cbmc->size_dont_access_after_boot;
+ cbmem_console = devm_memremap(&dev->dev, dev->cbmem_ref.cbmem_addr,
+ cbmem_console_size + sizeof(*cbmem_console),
+ MEMREMAP_WB);
+ memunmap(tmp_cbmc);
+
+ if (IS_ERR(cbmem_console))
+ return PTR_ERR(cbmem_console);
+
+ memconsole_setup(memconsole_coreboot_read);
+
+ return memconsole_sysfs_init();
+}
+
+static int memconsole_remove(struct coreboot_device *dev)
+{
+ memconsole_exit();
+
+ return 0;
+}
+
+static struct coreboot_driver memconsole_driver = {
+ .probe = memconsole_probe,
+ .remove = memconsole_remove,
+ .drv = {
+ .name = "memconsole",
+ },
+ .tag = CB_TAG_CBMEM_CONSOLE,
+};
+module_coreboot_driver(memconsole_driver);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole-x86-legacy.c b/drivers/firmware/google/memconsole-x86-legacy.c
new file mode 100644
index 000000000..3d3c4f6b8
--- /dev/null
+++ b/drivers/firmware/google/memconsole-x86-legacy.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * memconsole-x86-legacy.c
+ *
+ * EBDA specific parts of the memory based BIOS console.
+ *
+ * Copyright 2017 Google Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <linux/mm.h>
+#include <asm/bios_ebda.h>
+#include <linux/acpi.h>
+
+#include "memconsole.h"
+
+#define BIOS_MEMCONSOLE_V1_MAGIC 0xDEADBABE
+#define BIOS_MEMCONSOLE_V2_MAGIC (('M')|('C'<<8)|('O'<<16)|('N'<<24))
+
+struct biosmemcon_ebda {
+ u32 signature;
+ union {
+ struct {
+ u8 enabled;
+ u32 buffer_addr;
+ u16 start;
+ u16 end;
+ u16 num_chars;
+ u8 wrapped;
+ } __packed v1;
+ struct {
+ u32 buffer_addr;
+ /* Misdocumented as number of pages! */
+ u16 num_bytes;
+ u16 start;
+ u16 end;
+ } __packed v2;
+ };
+} __packed;
+
+static char *memconsole_baseaddr;
+static size_t memconsole_length;
+
+static ssize_t memconsole_read(char *buf, loff_t pos, size_t count)
+{
+ return memory_read_from_buffer(buf, count, &pos, memconsole_baseaddr,
+ memconsole_length);
+}
+
+static void found_v1_header(struct biosmemcon_ebda *hdr)
+{
+ pr_info("memconsole: BIOS console v1 EBDA structure found at %p\n",
+ hdr);
+ pr_info("memconsole: BIOS console buffer at 0x%.8x, start = %d, end = %d, num = %d\n",
+ hdr->v1.buffer_addr, hdr->v1.start,
+ hdr->v1.end, hdr->v1.num_chars);
+
+ memconsole_baseaddr = phys_to_virt(hdr->v1.buffer_addr);
+ memconsole_length = hdr->v1.num_chars;
+ memconsole_setup(memconsole_read);
+}
+
+static void found_v2_header(struct biosmemcon_ebda *hdr)
+{
+ pr_info("memconsole: BIOS console v2 EBDA structure found at %p\n",
+ hdr);
+ pr_info("memconsole: BIOS console buffer at 0x%.8x, start = %d, end = %d, num_bytes = %d\n",
+ hdr->v2.buffer_addr, hdr->v2.start,
+ hdr->v2.end, hdr->v2.num_bytes);
+
+ memconsole_baseaddr = phys_to_virt(hdr->v2.buffer_addr + hdr->v2.start);
+ memconsole_length = hdr->v2.end - hdr->v2.start;
+ memconsole_setup(memconsole_read);
+}
+
+/*
+ * Search through the EBDA for the BIOS Memory Console, and
+ * set the global variables to point to it. Return true if found.
+ */
+static bool memconsole_ebda_init(void)
+{
+ unsigned int address;
+ size_t length, cur;
+
+ address = get_bios_ebda();
+ if (!address) {
+ pr_info("memconsole: BIOS EBDA non-existent.\n");
+ return false;
+ }
+
+ /* EBDA length is byte 0 of EBDA (in KB) */
+ length = *(u8 *)phys_to_virt(address);
+ length <<= 10; /* convert to bytes */
+
+ /*
+ * Search through EBDA for BIOS memory console structure
+ * note: signature is not necessarily dword-aligned
+ */
+ for (cur = 0; cur < length; cur++) {
+ struct biosmemcon_ebda *hdr = phys_to_virt(address + cur);
+
+ /* memconsole v1 */
+ if (hdr->signature == BIOS_MEMCONSOLE_V1_MAGIC) {
+ found_v1_header(hdr);
+ return true;
+ }
+
+ /* memconsole v2 */
+ if (hdr->signature == BIOS_MEMCONSOLE_V2_MAGIC) {
+ found_v2_header(hdr);
+ return true;
+ }
+ }
+
+ pr_info("memconsole: BIOS console EBDA structure not found!\n");
+ return false;
+}
+
+static const struct dmi_system_id memconsole_dmi_table[] __initconst = {
+ {
+ .ident = "Google Board",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
+ },
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, memconsole_dmi_table);
+
+static bool __init memconsole_find(void)
+{
+ if (!dmi_check_system(memconsole_dmi_table))
+ return false;
+
+ return memconsole_ebda_init();
+}
+
+static int __init memconsole_x86_init(void)
+{
+ if (!memconsole_find())
+ return -ENODEV;
+
+ return memconsole_sysfs_init();
+}
+
+static void __exit memconsole_x86_exit(void)
+{
+ memconsole_exit();
+}
+
+module_init(memconsole_x86_init);
+module_exit(memconsole_x86_exit);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
new file mode 100644
index 000000000..44d314ad6
--- /dev/null
+++ b/drivers/firmware/google/memconsole.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * memconsole.c
+ *
+ * Architecture-independent parts of the memory based BIOS console.
+ *
+ * Copyright 2017 Google Inc.
+ */
+
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+
+#include "memconsole.h"
+
+static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count)
+{
+ ssize_t (*memconsole_read_func)(char *, loff_t, size_t);
+
+ memconsole_read_func = bin_attr->private;
+ if (WARN_ON_ONCE(!memconsole_read_func))
+ return -EIO;
+
+ return memconsole_read_func(buf, pos, count);
+}
+
+static struct bin_attribute memconsole_bin_attr = {
+ .attr = {.name = "log", .mode = 0444},
+ .read = memconsole_read,
+};
+
+void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t))
+{
+ memconsole_bin_attr.private = read_func;
+}
+EXPORT_SYMBOL(memconsole_setup);
+
+int memconsole_sysfs_init(void)
+{
+ return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
+}
+EXPORT_SYMBOL(memconsole_sysfs_init);
+
+void memconsole_exit(void)
+{
+ sysfs_remove_bin_file(firmware_kobj, &memconsole_bin_attr);
+}
+EXPORT_SYMBOL(memconsole_exit);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole.h b/drivers/firmware/google/memconsole.h
new file mode 100644
index 000000000..aaff2b72b
--- /dev/null
+++ b/drivers/firmware/google/memconsole.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * memconsole.h
+ *
+ * Internal headers of the memory based BIOS console.
+ *
+ * Copyright 2017 Google Inc.
+ */
+
+#ifndef __FIRMWARE_GOOGLE_MEMCONSOLE_H
+#define __FIRMWARE_GOOGLE_MEMCONSOLE_H
+
+#include <linux/types.h>
+
+/*
+ * memconsole_setup
+ *
+ * Initialize the memory console, passing the function to handle read accesses.
+ */
+void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t));
+
+/*
+ * memconsole_sysfs_init
+ *
+ * Update memory console length and create binary file
+ * for firmware object.
+ */
+int memconsole_sysfs_init(void);
+
+/* memconsole_exit
+ *
+ * Unmap the console buffer.
+ */
+void memconsole_exit(void);
+
+#endif /* __FIRMWARE_GOOGLE_MEMCONSOLE_H */
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
new file mode 100644
index 000000000..d23c5c69a
--- /dev/null
+++ b/drivers/firmware/google/vpd.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vpd.c
+ *
+ * Driver for exporting VPD content to sysfs.
+ *
+ * Copyright 2017 Google Inc.
+ */
+
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "coreboot_table.h"
+#include "vpd_decode.h"
+
+#define CB_TAG_VPD 0x2c
+#define VPD_CBMEM_MAGIC 0x43524f53
+
+static struct kobject *vpd_kobj;
+
+struct vpd_cbmem {
+ u32 magic;
+ u32 version;
+ u32 ro_size;
+ u32 rw_size;
+ u8 blob[];
+};
+
+struct vpd_section {
+ bool enabled;
+ const char *name;
+ char *raw_name; /* the string name_raw */
+ struct kobject *kobj; /* vpd/name directory */
+ char *baseaddr;
+ struct bin_attribute bin_attr; /* vpd/name_raw bin_attribute */
+ struct list_head attribs; /* key/value in vpd_attrib_info list */
+};
+
+struct vpd_attrib_info {
+ char *key;
+ const char *value;
+ struct bin_attribute bin_attr;
+ struct list_head list;
+};
+
+static struct vpd_section ro_vpd;
+static struct vpd_section rw_vpd;
+
+static ssize_t vpd_attrib_read(struct file *filp, struct kobject *kobp,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct vpd_attrib_info *info = bin_attr->private;
+
+ return memory_read_from_buffer(buf, count, &pos, info->value,
+ info->bin_attr.size);
+}
+
+/*
+ * vpd_section_check_key_name()
+ *
+ * The VPD specification supports only [a-zA-Z0-9_]+ characters in key names but
+ * old firmware versions may have entries like "S/N" which are problematic when
+ * exporting them as sysfs attributes. These keys present in old firmwares are
+ * ignored.
+ *
+ * Returns VPD_OK for a valid key name, VPD_FAIL otherwise.
+ *
+ * @key: The key name to check
+ * @key_len: key name length
+ */
+static int vpd_section_check_key_name(const u8 *key, s32 key_len)
+{
+ int c;
+
+ while (key_len-- > 0) {
+ c = *key++;
+
+ if (!isalnum(c) && c != '_')
+ return VPD_FAIL;
+ }
+
+ return VPD_OK;
+}
+
+static int vpd_section_attrib_add(const u8 *key, u32 key_len,
+ const u8 *value, u32 value_len,
+ void *arg)
+{
+ int ret;
+ struct vpd_section *sec = arg;
+ struct vpd_attrib_info *info;
+
+ /*
+ * Return VPD_OK immediately to decode next entry if the current key
+ * name contains invalid characters.
+ */
+ if (vpd_section_check_key_name(key, key_len) != VPD_OK)
+ return VPD_OK;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->key = kstrndup(key, key_len, GFP_KERNEL);
+ if (!info->key) {
+ ret = -ENOMEM;
+ goto free_info;
+ }
+
+ sysfs_bin_attr_init(&info->bin_attr);
+ info->bin_attr.attr.name = info->key;
+ info->bin_attr.attr.mode = 0444;
+ info->bin_attr.size = value_len;
+ info->bin_attr.read = vpd_attrib_read;
+ info->bin_attr.private = info;
+
+ info->value = value;
+
+ INIT_LIST_HEAD(&info->list);
+
+ ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr);
+ if (ret)
+ goto free_info_key;
+
+ list_add_tail(&info->list, &sec->attribs);
+ return 0;
+
+free_info_key:
+ kfree(info->key);
+free_info:
+ kfree(info);
+
+ return ret;
+}
+
+static void vpd_section_attrib_destroy(struct vpd_section *sec)
+{
+ struct vpd_attrib_info *info;
+ struct vpd_attrib_info *temp;
+
+ list_for_each_entry_safe(info, temp, &sec->attribs, list) {
+ sysfs_remove_bin_file(sec->kobj, &info->bin_attr);
+ kfree(info->key);
+ kfree(info);
+ }
+}
+
+static ssize_t vpd_section_read(struct file *filp, struct kobject *kobp,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct vpd_section *sec = bin_attr->private;
+
+ return memory_read_from_buffer(buf, count, &pos, sec->baseaddr,
+ sec->bin_attr.size);
+}
+
+static int vpd_section_create_attribs(struct vpd_section *sec)
+{
+ s32 consumed;
+ int ret;
+
+ consumed = 0;
+ do {
+ ret = vpd_decode_string(sec->bin_attr.size, sec->baseaddr,
+ &consumed, vpd_section_attrib_add, sec);
+ } while (ret == VPD_OK);
+
+ return 0;
+}
+
+static int vpd_section_init(const char *name, struct vpd_section *sec,
+ phys_addr_t physaddr, size_t size)
+{
+ int err;
+
+ sec->baseaddr = memremap(physaddr, size, MEMREMAP_WB);
+ if (!sec->baseaddr)
+ return -ENOMEM;
+
+ sec->name = name;
+
+ /* We want to export the raw partition with name ${name}_raw */
+ sec->raw_name = kasprintf(GFP_KERNEL, "%s_raw", name);
+ if (!sec->raw_name) {
+ err = -ENOMEM;
+ goto err_memunmap;
+ }
+
+ sysfs_bin_attr_init(&sec->bin_attr);
+ sec->bin_attr.attr.name = sec->raw_name;
+ sec->bin_attr.attr.mode = 0444;
+ sec->bin_attr.size = size;
+ sec->bin_attr.read = vpd_section_read;
+ sec->bin_attr.private = sec;
+
+ err = sysfs_create_bin_file(vpd_kobj, &sec->bin_attr);
+ if (err)
+ goto err_free_raw_name;
+
+ sec->kobj = kobject_create_and_add(name, vpd_kobj);
+ if (!sec->kobj) {
+ err = -EINVAL;
+ goto err_sysfs_remove;
+ }
+
+ INIT_LIST_HEAD(&sec->attribs);
+ vpd_section_create_attribs(sec);
+
+ sec->enabled = true;
+
+ return 0;
+
+err_sysfs_remove:
+ sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
+err_free_raw_name:
+ kfree(sec->raw_name);
+err_memunmap:
+ memunmap(sec->baseaddr);
+ return err;
+}
+
+static int vpd_section_destroy(struct vpd_section *sec)
+{
+ if (sec->enabled) {
+ vpd_section_attrib_destroy(sec);
+ kobject_put(sec->kobj);
+ sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
+ kfree(sec->raw_name);
+ memunmap(sec->baseaddr);
+ sec->enabled = false;
+ }
+
+ return 0;
+}
+
+static int vpd_sections_init(phys_addr_t physaddr)
+{
+ struct vpd_cbmem *temp;
+ struct vpd_cbmem header;
+ int ret = 0;
+
+ temp = memremap(physaddr, sizeof(struct vpd_cbmem), MEMREMAP_WB);
+ if (!temp)
+ return -ENOMEM;
+
+ memcpy(&header, temp, sizeof(struct vpd_cbmem));
+ memunmap(temp);
+
+ if (header.magic != VPD_CBMEM_MAGIC)
+ return -ENODEV;
+
+ if (header.ro_size) {
+ ret = vpd_section_init("ro", &ro_vpd,
+ physaddr + sizeof(struct vpd_cbmem),
+ header.ro_size);
+ if (ret)
+ return ret;
+ }
+
+ if (header.rw_size) {
+ ret = vpd_section_init("rw", &rw_vpd,
+ physaddr + sizeof(struct vpd_cbmem) +
+ header.ro_size, header.rw_size);
+ if (ret) {
+ vpd_section_destroy(&ro_vpd);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int vpd_probe(struct coreboot_device *dev)
+{
+ int ret;
+
+ vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
+ if (!vpd_kobj)
+ return -ENOMEM;
+
+ ret = vpd_sections_init(dev->cbmem_ref.cbmem_addr);
+ if (ret) {
+ kobject_put(vpd_kobj);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vpd_remove(struct coreboot_device *dev)
+{
+ vpd_section_destroy(&ro_vpd);
+ vpd_section_destroy(&rw_vpd);
+
+ kobject_put(vpd_kobj);
+
+ return 0;
+}
+
+static struct coreboot_driver vpd_driver = {
+ .probe = vpd_probe,
+ .remove = vpd_remove,
+ .drv = {
+ .name = "vpd",
+ },
+ .tag = CB_TAG_VPD,
+};
+module_coreboot_driver(vpd_driver);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c
new file mode 100644
index 000000000..5c6f2a74f
--- /dev/null
+++ b/drivers/firmware/google/vpd_decode.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vpd_decode.c
+ *
+ * Google VPD decoding routines.
+ *
+ * Copyright 2017 Google Inc.
+ */
+
+#include "vpd_decode.h"
+
+static int vpd_decode_len(const u32 max_len, const u8 *in,
+ u32 *length, u32 *decoded_len)
+{
+ u8 more;
+ int i = 0;
+
+ if (!length || !decoded_len)
+ return VPD_FAIL;
+
+ *length = 0;
+ do {
+ if (i >= max_len)
+ return VPD_FAIL;
+
+ more = in[i] & 0x80;
+ *length <<= 7;
+ *length |= in[i] & 0x7f;
+ ++i;
+ } while (more);
+
+ *decoded_len = i;
+ return VPD_OK;
+}
+
+static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
+ u32 *_consumed, const u8 **entry, u32 *entry_len)
+{
+ u32 decoded_len;
+ u32 consumed = *_consumed;
+
+ if (vpd_decode_len(max_len - consumed, &input_buf[consumed],
+ entry_len, &decoded_len) != VPD_OK)
+ return VPD_FAIL;
+ if (max_len - consumed < decoded_len)
+ return VPD_FAIL;
+
+ consumed += decoded_len;
+ *entry = input_buf + consumed;
+
+ /* entry_len is untrusted data and must be checked again. */
+ if (max_len - consumed < *entry_len)
+ return VPD_FAIL;
+
+ consumed += *entry_len;
+ *_consumed = consumed;
+ return VPD_OK;
+}
+
+int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
+ vpd_decode_callback callback, void *callback_arg)
+{
+ int type;
+ u32 key_len;
+ u32 value_len;
+ const u8 *key;
+ const u8 *value;
+
+ /* type */
+ if (*consumed >= max_len)
+ return VPD_FAIL;
+
+ type = input_buf[*consumed];
+
+ switch (type) {
+ case VPD_TYPE_INFO:
+ case VPD_TYPE_STRING:
+ (*consumed)++;
+
+ if (vpd_decode_entry(max_len, input_buf, consumed, &key,
+ &key_len) != VPD_OK)
+ return VPD_FAIL;
+
+ if (vpd_decode_entry(max_len, input_buf, consumed, &value,
+ &value_len) != VPD_OK)
+ return VPD_FAIL;
+
+ if (type == VPD_TYPE_STRING)
+ return callback(key, key_len, value, value_len,
+ callback_arg);
+ break;
+
+ default:
+ return VPD_FAIL;
+ }
+
+ return VPD_OK;
+}
diff --git a/drivers/firmware/google/vpd_decode.h b/drivers/firmware/google/vpd_decode.h
new file mode 100644
index 000000000..8dbe41cac
--- /dev/null
+++ b/drivers/firmware/google/vpd_decode.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vpd_decode.h
+ *
+ * Google VPD decoding routines.
+ *
+ * Copyright 2017 Google Inc.
+ */
+
+#ifndef __VPD_DECODE_H
+#define __VPD_DECODE_H
+
+#include <linux/types.h>
+
+enum {
+ VPD_OK = 0,
+ VPD_FAIL,
+};
+
+enum {
+ VPD_TYPE_TERMINATOR = 0,
+ VPD_TYPE_STRING,
+ VPD_TYPE_INFO = 0xfe,
+ VPD_TYPE_IMPLICIT_TERMINATOR = 0xff,
+};
+
+/* Callback for vpd_decode_string to invoke. */
+typedef int vpd_decode_callback(const u8 *key, u32 key_len,
+ const u8 *value, u32 value_len,
+ void *arg);
+
+/*
+ * vpd_decode_string
+ *
+ * Given the encoded string, this function invokes callback with extracted
+ * (key, value). The *consumed will be plused the number of bytes consumed in
+ * this function.
+ *
+ * The input_buf points to the first byte of the input buffer.
+ *
+ * The *consumed starts from 0, which is actually the next byte to be decoded.
+ * It can be non-zero to be used in multiple calls.
+ *
+ * If one entry is successfully decoded, sends it to callback and returns the
+ * result.
+ */
+int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
+ vpd_decode_callback callback, void *callback_arg);
+
+#endif /* __VPD_DECODE_H */
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
new file mode 100644
index 000000000..c027d99f2
--- /dev/null
+++ b/drivers/firmware/imx/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config IMX_DSP
+ tristate "IMX DSP Protocol driver"
+ depends on IMX_MBOX
+ help
+ This enables DSP IPC protocol between host AP (Linux)
+ and the firmware running on DSP.
+ DSP exists on some i.MX8 processors (e.g i.MX8QM, i.MX8QXP).
+
+ It acts like a doorbell. Client might use shared memory to
+ exchange information with DSP side.
+
+config IMX_SCU
+ bool "IMX SCU Protocol driver"
+ depends on IMX_MBOX
+ select SOC_BUS
+ help
+ The System Controller Firmware (SCFW) is a low-level system function
+ which runs on a dedicated Cortex-M core to provide power, clock, and
+ resource management. It exists on some i.MX8 processors. e.g. i.MX8QM
+ (QM, QP), and i.MX8QX (QXP, DX).
+
+ This driver manages the IPC interface between host CPU and the
+ SCU firmware running on M4.
+
+config IMX_SCU_PD
+ bool "IMX SCU Power Domain driver"
+ depends on IMX_SCU
+ help
+ The System Controller Firmware (SCFW) based power domain driver.
diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile
new file mode 100644
index 000000000..b76acbade
--- /dev/null
+++ b/drivers/firmware/imx/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_IMX_DSP) += imx-dsp.o
+obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o imx-scu-soc.o
+obj-$(CONFIG_IMX_SCU_PD) += scu-pd.o
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
new file mode 100644
index 000000000..4265e9dbe
--- /dev/null
+++ b/drivers/firmware/imx/imx-dsp.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019 NXP
+ * Author: Daniel Baluta <daniel.baluta@nxp.com>
+ *
+ * Implementation of the DSP IPC interface (host side)
+ */
+
+#include <linux/firmware/imx/dsp.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * imx_dsp_ring_doorbell - triggers an interrupt on the other side (DSP)
+ *
+ * @dsp: DSP IPC handle
+ * @chan_idx: index of the channel where to trigger the interrupt
+ *
+ * Returns non-negative value for success, negative value for error
+ */
+int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc, unsigned int idx)
+{
+ int ret;
+ struct imx_dsp_chan *dsp_chan;
+
+ if (idx >= DSP_MU_CHAN_NUM)
+ return -EINVAL;
+
+ dsp_chan = &ipc->chans[idx];
+ ret = mbox_send_message(dsp_chan->ch, NULL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(imx_dsp_ring_doorbell);
+
+/*
+ * imx_dsp_handle_rx - rx callback used by imx mailbox
+ *
+ * @c: mbox client
+ * @msg: message received
+ *
+ * Users of DSP IPC will need to privde handle_reply and handle_request
+ * callbacks.
+ */
+static void imx_dsp_handle_rx(struct mbox_client *c, void *msg)
+{
+ struct imx_dsp_chan *chan = container_of(c, struct imx_dsp_chan, cl);
+
+ if (chan->idx == 0) {
+ chan->ipc->ops->handle_reply(chan->ipc);
+ } else {
+ chan->ipc->ops->handle_request(chan->ipc);
+ imx_dsp_ring_doorbell(chan->ipc, 1);
+ }
+}
+
+static int imx_dsp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_dsp_ipc *dsp_ipc;
+ struct imx_dsp_chan *dsp_chan;
+ struct mbox_client *cl;
+ char *chan_name;
+ int ret;
+ int i, j;
+
+ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
+ dsp_ipc = devm_kzalloc(dev, sizeof(*dsp_ipc), GFP_KERNEL);
+ if (!dsp_ipc)
+ return -ENOMEM;
+
+ for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
+ if (i < 2)
+ chan_name = kasprintf(GFP_KERNEL, "txdb%d", i);
+ else
+ chan_name = kasprintf(GFP_KERNEL, "rxdb%d", i - 2);
+
+ if (!chan_name)
+ return -ENOMEM;
+
+ dsp_chan = &dsp_ipc->chans[i];
+ cl = &dsp_chan->cl;
+ cl->dev = dev;
+ cl->tx_block = false;
+ cl->knows_txdone = true;
+ cl->rx_callback = imx_dsp_handle_rx;
+
+ dsp_chan->ipc = dsp_ipc;
+ dsp_chan->idx = i % 2;
+ dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
+ if (IS_ERR(dsp_chan->ch)) {
+ ret = PTR_ERR(dsp_chan->ch);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request mbox chan %s ret %d\n",
+ chan_name, ret);
+ goto out;
+ }
+
+ dev_dbg(dev, "request mbox chan %s\n", chan_name);
+ /* chan_name is not used anymore by framework */
+ kfree(chan_name);
+ }
+
+ dsp_ipc->dev = dev;
+
+ dev_set_drvdata(dev, dsp_ipc);
+
+ dev_info(dev, "NXP i.MX DSP IPC initialized\n");
+
+ return 0;
+out:
+ kfree(chan_name);
+ for (j = 0; j < i; j++) {
+ dsp_chan = &dsp_ipc->chans[j];
+ mbox_free_channel(dsp_chan->ch);
+ }
+
+ return ret;
+}
+
+static int imx_dsp_remove(struct platform_device *pdev)
+{
+ struct imx_dsp_chan *dsp_chan;
+ struct imx_dsp_ipc *dsp_ipc;
+ int i;
+
+ dsp_ipc = dev_get_drvdata(&pdev->dev);
+
+ for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
+ dsp_chan = &dsp_ipc->chans[i];
+ mbox_free_channel(dsp_chan->ch);
+ }
+
+ return 0;
+}
+
+static struct platform_driver imx_dsp_driver = {
+ .driver = {
+ .name = "imx-dsp",
+ },
+ .probe = imx_dsp_probe,
+ .remove = imx_dsp_remove,
+};
+builtin_platform_driver(imx_dsp_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@nxp.com>");
+MODULE_DESCRIPTION("IMX DSP IPC protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c
new file mode 100644
index 000000000..d9dcc2094
--- /dev/null
+++ b/drivers/firmware/imx/imx-scu-irq.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019 NXP
+ *
+ * Implementation of the SCU IRQ functions using MU.
+ *
+ */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/mailbox_client.h>
+#include <linux/suspend.h>
+
+#define IMX_SC_IRQ_FUNC_ENABLE 1
+#define IMX_SC_IRQ_FUNC_STATUS 2
+#define IMX_SC_IRQ_NUM_GROUP 4
+
+static u32 mu_resource_id;
+
+struct imx_sc_msg_irq_get_status {
+ struct imx_sc_rpc_msg hdr;
+ union {
+ struct {
+ u16 resource;
+ u8 group;
+ u8 reserved;
+ } __packed req;
+ struct {
+ u32 status;
+ } resp;
+ } data;
+};
+
+struct imx_sc_msg_irq_enable {
+ struct imx_sc_rpc_msg hdr;
+ u32 mask;
+ u16 resource;
+ u8 group;
+ u8 enable;
+} __packed;
+
+static struct imx_sc_ipc *imx_sc_irq_ipc_handle;
+static struct work_struct imx_sc_irq_work;
+static ATOMIC_NOTIFIER_HEAD(imx_scu_irq_notifier_chain);
+
+int imx_scu_irq_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(
+ &imx_scu_irq_notifier_chain, nb);
+}
+EXPORT_SYMBOL(imx_scu_irq_register_notifier);
+
+int imx_scu_irq_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(
+ &imx_scu_irq_notifier_chain, nb);
+}
+EXPORT_SYMBOL(imx_scu_irq_unregister_notifier);
+
+static int imx_scu_irq_notifier_call_chain(unsigned long status, u8 *group)
+{
+ return atomic_notifier_call_chain(&imx_scu_irq_notifier_chain,
+ status, (void *)group);
+}
+
+static void imx_scu_irq_work_handler(struct work_struct *work)
+{
+ struct imx_sc_msg_irq_get_status msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ u32 irq_status;
+ int ret;
+ u8 i;
+
+ for (i = 0; i < IMX_SC_IRQ_NUM_GROUP; i++) {
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_IRQ;
+ hdr->func = IMX_SC_IRQ_FUNC_STATUS;
+ hdr->size = 2;
+
+ msg.data.req.resource = mu_resource_id;
+ msg.data.req.group = i;
+
+ ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true);
+ if (ret) {
+ pr_err("get irq group %d status failed, ret %d\n",
+ i, ret);
+ return;
+ }
+
+ irq_status = msg.data.resp.status;
+ if (!irq_status)
+ continue;
+
+ pm_system_wakeup();
+ imx_scu_irq_notifier_call_chain(irq_status, &i);
+ }
+}
+
+int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable)
+{
+ struct imx_sc_msg_irq_enable msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ if (!imx_sc_irq_ipc_handle)
+ return -EPROBE_DEFER;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_IRQ;
+ hdr->func = IMX_SC_IRQ_FUNC_ENABLE;
+ hdr->size = 3;
+
+ msg.resource = mu_resource_id;
+ msg.group = group;
+ msg.mask = mask;
+ msg.enable = enable;
+
+ ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true);
+ if (ret)
+ pr_err("enable irq failed, group %d, mask %d, ret %d\n",
+ group, mask, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(imx_scu_irq_group_enable);
+
+static void imx_scu_irq_callback(struct mbox_client *c, void *msg)
+{
+ schedule_work(&imx_sc_irq_work);
+}
+
+int imx_scu_enable_general_irq_channel(struct device *dev)
+{
+ struct of_phandle_args spec;
+ struct mbox_client *cl;
+ struct mbox_chan *ch;
+ int ret = 0, i = 0;
+
+ ret = imx_scu_get_handle(&imx_sc_irq_ipc_handle);
+ if (ret)
+ return ret;
+
+ cl = devm_kzalloc(dev, sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return -ENOMEM;
+
+ cl->dev = dev;
+ cl->rx_callback = imx_scu_irq_callback;
+
+ /* SCU general IRQ uses general interrupt channel 3 */
+ ch = mbox_request_channel_byname(cl, "gip3");
+ if (IS_ERR(ch)) {
+ ret = PTR_ERR(ch);
+ dev_err(dev, "failed to request mbox chan gip3, ret %d\n", ret);
+ devm_kfree(dev, cl);
+ return ret;
+ }
+
+ INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler);
+
+ if (!of_parse_phandle_with_args(dev->of_node, "mboxes",
+ "#mbox-cells", 0, &spec))
+ i = of_alias_get_id(spec.np, "mu");
+
+ /* use mu1 as general mu irq channel if failed */
+ if (i < 0)
+ i = 1;
+
+ mu_resource_id = IMX_SC_R_MU_0A + i;
+
+ return ret;
+}
+EXPORT_SYMBOL(imx_scu_enable_general_irq_channel);
diff --git a/drivers/firmware/imx/imx-scu-soc.c b/drivers/firmware/imx/imx-scu-soc.c
new file mode 100644
index 000000000..2f32353de
--- /dev/null
+++ b/drivers/firmware/imx/imx-scu-soc.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+static struct imx_sc_ipc *imx_sc_soc_ipc_handle;
+
+struct imx_sc_msg_misc_get_soc_id {
+ struct imx_sc_rpc_msg hdr;
+ union {
+ struct {
+ u32 control;
+ u16 resource;
+ } __packed req;
+ struct {
+ u32 id;
+ } resp;
+ } data;
+} __packed __aligned(4);
+
+struct imx_sc_msg_misc_get_soc_uid {
+ struct imx_sc_rpc_msg hdr;
+ u32 uid_low;
+ u32 uid_high;
+} __packed;
+
+static int imx_scu_soc_uid(u64 *soc_uid)
+{
+ struct imx_sc_msg_misc_get_soc_uid msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_MISC;
+ hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID;
+ hdr->size = 1;
+
+ ret = imx_scu_call_rpc(imx_sc_soc_ipc_handle, &msg, true);
+ if (ret) {
+ pr_err("%s: get soc uid failed, ret %d\n", __func__, ret);
+ return ret;
+ }
+
+ *soc_uid = msg.uid_high;
+ *soc_uid <<= 32;
+ *soc_uid |= msg.uid_low;
+
+ return 0;
+}
+
+static int imx_scu_soc_id(void)
+{
+ struct imx_sc_msg_misc_get_soc_id msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_MISC;
+ hdr->func = IMX_SC_MISC_FUNC_GET_CONTROL;
+ hdr->size = 3;
+
+ msg.data.req.control = IMX_SC_C_ID;
+ msg.data.req.resource = IMX_SC_R_SYSTEM;
+
+ ret = imx_scu_call_rpc(imx_sc_soc_ipc_handle, &msg, true);
+ if (ret) {
+ pr_err("%s: get soc info failed, ret %d\n", __func__, ret);
+ return ret;
+ }
+
+ return msg.data.resp.id;
+}
+
+int imx_scu_soc_init(struct device *dev)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ int id, ret;
+ u64 uid = 0;
+ u32 val;
+
+ ret = imx_scu_get_handle(&imx_sc_soc_ipc_handle);
+ if (ret)
+ return ret;
+
+ soc_dev_attr = devm_kzalloc(dev, sizeof(*soc_dev_attr),
+ GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->family = "Freescale i.MX";
+
+ ret = of_property_read_string(of_root,
+ "model",
+ &soc_dev_attr->machine);
+ if (ret)
+ return ret;
+
+ id = imx_scu_soc_id();
+ if (id < 0)
+ return -EINVAL;
+
+ ret = imx_scu_soc_uid(&uid);
+ if (ret < 0)
+ return -EINVAL;
+
+ /* format soc_id value passed from SCU firmware */
+ val = id & 0x1f;
+ soc_dev_attr->soc_id = devm_kasprintf(dev, GFP_KERNEL, "0x%x", val);
+ if (!soc_dev_attr->soc_id)
+ return -ENOMEM;
+
+ /* format revision value passed from SCU firmware */
+ val = (id >> 5) & 0xf;
+ val = (((val >> 2) + 1) << 4) | (val & 0x3);
+ soc_dev_attr->revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d",
+ (val >> 4) & 0xf, val & 0xf);
+ if (!soc_dev_attr->revision)
+ return -ENOMEM;
+
+ soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL,
+ "%016llX", uid);
+ if (!soc_dev_attr->serial_number)
+ return -ENOMEM;
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ return 0;
+}
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
new file mode 100644
index 000000000..dca79cacc
--- /dev/null
+++ b/drivers/firmware/imx/imx-scu.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ * Author: Dong Aisheng <aisheng.dong@nxp.com>
+ *
+ * Implementation of the SCU IPC functions using MUs (client side).
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#define SCU_MU_CHAN_NUM 8
+#define MAX_RX_TIMEOUT (msecs_to_jiffies(30))
+
+struct imx_sc_chan {
+ struct imx_sc_ipc *sc_ipc;
+
+ struct mbox_client cl;
+ struct mbox_chan *ch;
+ int idx;
+ struct completion tx_done;
+};
+
+struct imx_sc_ipc {
+ /* SCU uses 4 Tx and 4 Rx channels */
+ struct imx_sc_chan chans[SCU_MU_CHAN_NUM];
+ struct device *dev;
+ struct mutex lock;
+ struct completion done;
+ bool fast_ipc;
+
+ /* temporarily store the SCU msg */
+ u32 *msg;
+ u8 rx_size;
+ u8 count;
+};
+
+/*
+ * This type is used to indicate error response for most functions.
+ */
+enum imx_sc_error_codes {
+ IMX_SC_ERR_NONE = 0, /* Success */
+ IMX_SC_ERR_VERSION = 1, /* Incompatible API version */
+ IMX_SC_ERR_CONFIG = 2, /* Configuration error */
+ IMX_SC_ERR_PARM = 3, /* Bad parameter */
+ IMX_SC_ERR_NOACCESS = 4, /* Permission error (no access) */
+ IMX_SC_ERR_LOCKED = 5, /* Permission error (locked) */
+ IMX_SC_ERR_UNAVAILABLE = 6, /* Unavailable (out of resources) */
+ IMX_SC_ERR_NOTFOUND = 7, /* Not found */
+ IMX_SC_ERR_NOPOWER = 8, /* No power */
+ IMX_SC_ERR_IPC = 9, /* Generic IPC error */
+ IMX_SC_ERR_BUSY = 10, /* Resource is currently busy/active */
+ IMX_SC_ERR_FAIL = 11, /* General I/O failure */
+ IMX_SC_ERR_LAST
+};
+
+static int imx_sc_linux_errmap[IMX_SC_ERR_LAST] = {
+ 0, /* IMX_SC_ERR_NONE */
+ -EINVAL, /* IMX_SC_ERR_VERSION */
+ -EINVAL, /* IMX_SC_ERR_CONFIG */
+ -EINVAL, /* IMX_SC_ERR_PARM */
+ -EACCES, /* IMX_SC_ERR_NOACCESS */
+ -EACCES, /* IMX_SC_ERR_LOCKED */
+ -ERANGE, /* IMX_SC_ERR_UNAVAILABLE */
+ -EEXIST, /* IMX_SC_ERR_NOTFOUND */
+ -EPERM, /* IMX_SC_ERR_NOPOWER */
+ -EPIPE, /* IMX_SC_ERR_IPC */
+ -EBUSY, /* IMX_SC_ERR_BUSY */
+ -EIO, /* IMX_SC_ERR_FAIL */
+};
+
+static struct imx_sc_ipc *imx_sc_ipc_handle;
+
+static inline int imx_sc_to_linux_errno(int errno)
+{
+ if (errno >= IMX_SC_ERR_NONE && errno < IMX_SC_ERR_LAST)
+ return imx_sc_linux_errmap[errno];
+ return -EIO;
+}
+
+/*
+ * Get the default handle used by SCU
+ */
+int imx_scu_get_handle(struct imx_sc_ipc **ipc)
+{
+ if (!imx_sc_ipc_handle)
+ return -EPROBE_DEFER;
+
+ *ipc = imx_sc_ipc_handle;
+ return 0;
+}
+EXPORT_SYMBOL(imx_scu_get_handle);
+
+/* Callback called when the word of a message is ack-ed, eg read by SCU */
+static void imx_scu_tx_done(struct mbox_client *cl, void *mssg, int r)
+{
+ struct imx_sc_chan *sc_chan = container_of(cl, struct imx_sc_chan, cl);
+
+ complete(&sc_chan->tx_done);
+}
+
+static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
+{
+ struct imx_sc_chan *sc_chan = container_of(c, struct imx_sc_chan, cl);
+ struct imx_sc_ipc *sc_ipc = sc_chan->sc_ipc;
+ struct imx_sc_rpc_msg *hdr;
+ u32 *data = msg;
+ int i;
+
+ if (!sc_ipc->msg) {
+ dev_warn(sc_ipc->dev, "unexpected rx idx %d 0x%08x, ignore!\n",
+ sc_chan->idx, *data);
+ return;
+ }
+
+ if (sc_ipc->fast_ipc) {
+ hdr = msg;
+ sc_ipc->rx_size = hdr->size;
+ sc_ipc->msg[0] = *data++;
+
+ for (i = 1; i < sc_ipc->rx_size; i++)
+ sc_ipc->msg[i] = *data++;
+
+ complete(&sc_ipc->done);
+
+ return;
+ }
+
+ if (sc_chan->idx == 0) {
+ hdr = msg;
+ sc_ipc->rx_size = hdr->size;
+ dev_dbg(sc_ipc->dev, "msg rx size %u\n", sc_ipc->rx_size);
+ if (sc_ipc->rx_size > 4)
+ dev_warn(sc_ipc->dev, "RPC does not support receiving over 4 words: %u\n",
+ sc_ipc->rx_size);
+ }
+
+ sc_ipc->msg[sc_chan->idx] = *data;
+ sc_ipc->count++;
+
+ dev_dbg(sc_ipc->dev, "mu %u msg %u 0x%x\n", sc_chan->idx,
+ sc_ipc->count, *data);
+
+ if ((sc_ipc->rx_size != 0) && (sc_ipc->count == sc_ipc->rx_size))
+ complete(&sc_ipc->done);
+}
+
+static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
+{
+ struct imx_sc_rpc_msg hdr = *(struct imx_sc_rpc_msg *)msg;
+ struct imx_sc_chan *sc_chan;
+ u32 *data = msg;
+ int ret;
+ int size;
+ int i;
+
+ /* Check size */
+ if (hdr.size > IMX_SC_RPC_MAX_MSG)
+ return -EINVAL;
+
+ dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr.svc,
+ hdr.func, hdr.size);
+
+ size = sc_ipc->fast_ipc ? 1 : hdr.size;
+ for (i = 0; i < size; i++) {
+ sc_chan = &sc_ipc->chans[i % 4];
+
+ /*
+ * SCU requires that all messages words are written
+ * sequentially but linux MU driver implements multiple
+ * independent channels for each register so ordering between
+ * different channels must be ensured by SCU API interface.
+ *
+ * Wait for tx_done before every send to ensure that no
+ * queueing happens at the mailbox channel level.
+ */
+ if (!sc_ipc->fast_ipc) {
+ wait_for_completion(&sc_chan->tx_done);
+ reinit_completion(&sc_chan->tx_done);
+ }
+
+ ret = mbox_send_message(sc_chan->ch, &data[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * RPC command/response
+ */
+int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp)
+{
+ uint8_t saved_svc, saved_func;
+ struct imx_sc_rpc_msg *hdr;
+ int ret;
+
+ if (WARN_ON(!sc_ipc || !msg))
+ return -EINVAL;
+
+ mutex_lock(&sc_ipc->lock);
+ reinit_completion(&sc_ipc->done);
+
+ if (have_resp) {
+ sc_ipc->msg = msg;
+ saved_svc = ((struct imx_sc_rpc_msg *)msg)->svc;
+ saved_func = ((struct imx_sc_rpc_msg *)msg)->func;
+ }
+ sc_ipc->count = 0;
+ ret = imx_scu_ipc_write(sc_ipc, msg);
+ if (ret < 0) {
+ dev_err(sc_ipc->dev, "RPC send msg failed: %d\n", ret);
+ goto out;
+ }
+
+ if (have_resp) {
+ if (!wait_for_completion_timeout(&sc_ipc->done,
+ MAX_RX_TIMEOUT)) {
+ dev_err(sc_ipc->dev, "RPC send msg timeout\n");
+ mutex_unlock(&sc_ipc->lock);
+ return -ETIMEDOUT;
+ }
+
+ /* response status is stored in hdr->func field */
+ hdr = msg;
+ ret = hdr->func;
+ /*
+ * Some special SCU firmware APIs do NOT have return value
+ * in hdr->func, but they do have response data, those special
+ * APIs are defined as void function in SCU firmware, so they
+ * should be treated as return success always.
+ */
+ if ((saved_svc == IMX_SC_RPC_SVC_MISC) &&
+ (saved_func == IMX_SC_MISC_FUNC_UNIQUE_ID ||
+ saved_func == IMX_SC_MISC_FUNC_GET_BUTTON_STATUS))
+ ret = 0;
+ }
+
+out:
+ sc_ipc->msg = NULL;
+ mutex_unlock(&sc_ipc->lock);
+
+ dev_dbg(sc_ipc->dev, "RPC SVC done\n");
+
+ return imx_sc_to_linux_errno(ret);
+}
+EXPORT_SYMBOL(imx_scu_call_rpc);
+
+static int imx_scu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_sc_ipc *sc_ipc;
+ struct imx_sc_chan *sc_chan;
+ struct mbox_client *cl;
+ char *chan_name;
+ struct of_phandle_args args;
+ int num_channel;
+ int ret;
+ int i;
+
+ sc_ipc = devm_kzalloc(dev, sizeof(*sc_ipc), GFP_KERNEL);
+ if (!sc_ipc)
+ return -ENOMEM;
+
+ ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes",
+ "#mbox-cells", 0, &args);
+ if (ret)
+ return ret;
+
+ sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
+
+ num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
+ for (i = 0; i < num_channel; i++) {
+ if (i < num_channel / 2)
+ chan_name = kasprintf(GFP_KERNEL, "tx%d", i);
+ else
+ chan_name = kasprintf(GFP_KERNEL, "rx%d",
+ i - num_channel / 2);
+
+ if (!chan_name)
+ return -ENOMEM;
+
+ sc_chan = &sc_ipc->chans[i];
+ cl = &sc_chan->cl;
+ cl->dev = dev;
+ cl->tx_block = false;
+ cl->knows_txdone = true;
+ cl->rx_callback = imx_scu_rx_callback;
+
+ if (!sc_ipc->fast_ipc) {
+ /* Initial tx_done completion as "done" */
+ cl->tx_done = imx_scu_tx_done;
+ init_completion(&sc_chan->tx_done);
+ complete(&sc_chan->tx_done);
+ }
+
+ sc_chan->sc_ipc = sc_ipc;
+ sc_chan->idx = i % (num_channel / 2);
+ sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
+ if (IS_ERR(sc_chan->ch)) {
+ ret = PTR_ERR(sc_chan->ch);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request mbox chan %s ret %d\n",
+ chan_name, ret);
+ kfree(chan_name);
+ return ret;
+ }
+
+ dev_dbg(dev, "request mbox chan %s\n", chan_name);
+ /* chan_name is not used anymore by framework */
+ kfree(chan_name);
+ }
+
+ sc_ipc->dev = dev;
+ mutex_init(&sc_ipc->lock);
+ init_completion(&sc_ipc->done);
+
+ imx_sc_ipc_handle = sc_ipc;
+
+ ret = imx_scu_soc_init(dev);
+ if (ret)
+ dev_warn(dev, "failed to initialize SoC info: %d\n", ret);
+
+ ret = imx_scu_enable_general_irq_channel(dev);
+ if (ret)
+ dev_warn(dev,
+ "failed to enable general irq channel: %d\n", ret);
+
+ dev_info(dev, "NXP i.MX SCU Initialized\n");
+
+ return devm_of_platform_populate(dev);
+}
+
+static const struct of_device_id imx_scu_match[] = {
+ { .compatible = "fsl,imx-scu", },
+ { /* Sentinel */ }
+};
+
+static struct platform_driver imx_scu_driver = {
+ .driver = {
+ .name = "imx-scu",
+ .of_match_table = imx_scu_match,
+ },
+ .probe = imx_scu_probe,
+};
+builtin_platform_driver(imx_scu_driver);
+
+MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
+MODULE_DESCRIPTION("IMX SCU firmware protocol driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/imx/misc.c b/drivers/firmware/imx/misc.c
new file mode 100644
index 000000000..d073cb3ce
--- /dev/null
+++ b/drivers/firmware/imx/misc.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ * Author: Dong Aisheng <aisheng.dong@nxp.com>
+ *
+ * File containing client-side RPC functions for the MISC service. These
+ * function are ported to clients that communicate to the SC.
+ *
+ */
+
+#include <linux/firmware/imx/svc/misc.h>
+
+struct imx_sc_msg_req_misc_set_ctrl {
+ struct imx_sc_rpc_msg hdr;
+ u32 ctrl;
+ u32 val;
+ u16 resource;
+} __packed __aligned(4);
+
+struct imx_sc_msg_req_cpu_start {
+ struct imx_sc_rpc_msg hdr;
+ u32 address_hi;
+ u32 address_lo;
+ u16 resource;
+ u8 enable;
+} __packed __aligned(4);
+
+struct imx_sc_msg_req_misc_get_ctrl {
+ struct imx_sc_rpc_msg hdr;
+ u32 ctrl;
+ u16 resource;
+} __packed __aligned(4);
+
+struct imx_sc_msg_resp_misc_get_ctrl {
+ struct imx_sc_rpc_msg hdr;
+ u32 val;
+} __packed __aligned(4);
+
+/*
+ * This function sets a miscellaneous control value.
+ *
+ * @param[in] ipc IPC handle
+ * @param[in] resource resource the control is associated with
+ * @param[in] ctrl control to change
+ * @param[in] val value to apply to the control
+ *
+ * @return Returns 0 for success and < 0 for errors.
+ */
+
+int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource,
+ u8 ctrl, u32 val)
+{
+ struct imx_sc_msg_req_misc_set_ctrl msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = (uint8_t)IMX_SC_RPC_SVC_MISC;
+ hdr->func = (uint8_t)IMX_SC_MISC_FUNC_SET_CONTROL;
+ hdr->size = 4;
+
+ msg.ctrl = ctrl;
+ msg.val = val;
+ msg.resource = resource;
+
+ return imx_scu_call_rpc(ipc, &msg, true);
+}
+EXPORT_SYMBOL(imx_sc_misc_set_control);
+
+/*
+ * This function gets a miscellaneous control value.
+ *
+ * @param[in] ipc IPC handle
+ * @param[in] resource resource the control is associated with
+ * @param[in] ctrl control to get
+ * @param[out] val pointer to return the control value
+ *
+ * @return Returns 0 for success and < 0 for errors.
+ */
+
+int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
+ u8 ctrl, u32 *val)
+{
+ struct imx_sc_msg_req_misc_get_ctrl msg;
+ struct imx_sc_msg_resp_misc_get_ctrl *resp;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = (uint8_t)IMX_SC_RPC_SVC_MISC;
+ hdr->func = (uint8_t)IMX_SC_MISC_FUNC_GET_CONTROL;
+ hdr->size = 3;
+
+ msg.ctrl = ctrl;
+ msg.resource = resource;
+
+ ret = imx_scu_call_rpc(ipc, &msg, true);
+ if (ret)
+ return ret;
+
+ resp = (struct imx_sc_msg_resp_misc_get_ctrl *)&msg;
+ if (val != NULL)
+ *val = resp->val;
+
+ return 0;
+}
+EXPORT_SYMBOL(imx_sc_misc_get_control);
+
+/*
+ * This function starts/stops a CPU identified by @resource
+ *
+ * @param[in] ipc IPC handle
+ * @param[in] resource resource the control is associated with
+ * @param[in] enable true for start, false for stop
+ * @param[in] phys_addr initial instruction address to be executed
+ *
+ * @return Returns 0 for success and < 0 for errors.
+ */
+int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
+ bool enable, u64 phys_addr)
+{
+ struct imx_sc_msg_req_cpu_start msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_PM;
+ hdr->func = IMX_SC_PM_FUNC_CPU_START;
+ hdr->size = 4;
+
+ msg.address_hi = phys_addr >> 32;
+ msg.address_lo = phys_addr;
+ msg.resource = resource;
+ msg.enable = enable;
+
+ return imx_scu_call_rpc(ipc, &msg, true);
+}
+EXPORT_SYMBOL(imx_sc_pm_cpu_start);
diff --git a/drivers/firmware/imx/rm.c b/drivers/firmware/imx/rm.c
new file mode 100644
index 000000000..a12db6ff3
--- /dev/null
+++ b/drivers/firmware/imx/rm.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2020 NXP
+ *
+ * File containing client-side RPC functions for the RM service. These
+ * function are ported to clients that communicate to the SC.
+ */
+
+#include <linux/firmware/imx/svc/rm.h>
+
+struct imx_sc_msg_rm_rsrc_owned {
+ struct imx_sc_rpc_msg hdr;
+ u16 resource;
+} __packed __aligned(4);
+
+/*
+ * This function check @resource is owned by current partition or not
+ *
+ * @param[in] ipc IPC handle
+ * @param[in] resource resource the control is associated with
+ *
+ * @return Returns 0 for not owned and 1 for owned.
+ */
+bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource)
+{
+ struct imx_sc_msg_rm_rsrc_owned msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_RM;
+ hdr->func = IMX_SC_RM_FUNC_IS_RESOURCE_OWNED;
+ hdr->size = 2;
+
+ msg.resource = resource;
+
+ /*
+ * SCU firmware only returns value 0 or 1
+ * for resource owned check which means not owned or owned.
+ * So it is always successful.
+ */
+ imx_scu_call_rpc(ipc, &msg, true);
+
+ return hdr->func;
+}
+EXPORT_SYMBOL(imx_sc_rm_is_resource_owned);
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
new file mode 100644
index 000000000..946eea292
--- /dev/null
+++ b/drivers/firmware/imx/scu-pd.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ *
+ * Implementation of the SCU based Power Domains
+ *
+ * NOTE: a better implementation suggested by Ulf Hansson is using a
+ * single global power domain and implement the ->attach|detach_dev()
+ * callback for the genpd and use the regular of_genpd_add_provider_simple().
+ * From within the ->attach_dev(), we could get the OF node for
+ * the device that is being attached and then parse the power-domain
+ * cell containing the "resource id" and store that in the per device
+ * struct generic_pm_domain_data (we have void pointer there for
+ * storing these kind of things).
+ *
+ * Additionally, we need to implement the ->stop() and ->start()
+ * callbacks of genpd, which is where you "power on/off" devices,
+ * rather than using the above ->power_on|off() callbacks.
+ *
+ * However, there're two known issues:
+ * 1. The ->attach_dev() of power domain infrastructure still does
+ * not support multi domains case as the struct device *dev passed
+ * in is a virtual PD device, it does not help for parsing the real
+ * device resource id from device tree, so it's unware of which
+ * real sub power domain of device should be attached.
+ *
+ * The framework needs some proper extension to support multi power
+ * domain cases.
+ *
+ * 2. It also breaks most of current drivers as the driver probe sequence
+ * behavior changed if removing ->power_on|off() callback and use
+ * ->start() and ->stop() instead. genpd_dev_pm_attach will only power
+ * up the domain and attach device, but will not call .start() which
+ * relies on device runtime pm. That means the device power is still
+ * not up before running driver probe function. For SCU enabled
+ * platforms, all device drivers accessing registers/clock without power
+ * domain enabled will trigger a HW access error. That means we need fix
+ * most drivers probe sequence with proper runtime pm.
+ *
+ * In summary, we need fix above two issue before being able to switch to
+ * the "single global power domain" way.
+ *
+ */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/firmware/imx/svc/rm.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+
+/* SCU Power Mode Protocol definition */
+struct imx_sc_msg_req_set_resource_power_mode {
+ struct imx_sc_rpc_msg hdr;
+ u16 resource;
+ u8 mode;
+} __packed __aligned(4);
+
+#define IMX_SCU_PD_NAME_SIZE 20
+struct imx_sc_pm_domain {
+ struct generic_pm_domain pd;
+ char name[IMX_SCU_PD_NAME_SIZE];
+ u32 rsrc;
+};
+
+struct imx_sc_pd_range {
+ char *name;
+ u32 rsrc;
+ u8 num;
+
+ /* add domain index */
+ bool postfix;
+ u8 start_from;
+};
+
+struct imx_sc_pd_soc {
+ const struct imx_sc_pd_range *pd_ranges;
+ u8 num_ranges;
+};
+
+static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
+ /* LSIO SS */
+ { "pwm", IMX_SC_R_PWM_0, 8, true, 0 },
+ { "gpio", IMX_SC_R_GPIO_0, 8, true, 0 },
+ { "gpt", IMX_SC_R_GPT_0, 5, true, 0 },
+ { "kpp", IMX_SC_R_KPP, 1, false, 0 },
+ { "fspi", IMX_SC_R_FSPI_0, 2, true, 0 },
+ { "mu_a", IMX_SC_R_MU_0A, 14, true, 0 },
+ { "mu_b", IMX_SC_R_MU_5B, 9, true, 5 },
+
+ /* CONN SS */
+ { "usb", IMX_SC_R_USB_0, 2, true, 0 },
+ { "usb0phy", IMX_SC_R_USB_0_PHY, 1, false, 0 },
+ { "usb2", IMX_SC_R_USB_2, 1, false, 0 },
+ { "usb2phy", IMX_SC_R_USB_2_PHY, 1, false, 0 },
+ { "sdhc", IMX_SC_R_SDHC_0, 3, true, 0 },
+ { "enet", IMX_SC_R_ENET_0, 2, true, 0 },
+ { "nand", IMX_SC_R_NAND, 1, false, 0 },
+ { "mlb", IMX_SC_R_MLB_0, 1, true, 0 },
+
+ /* AUDIO SS */
+ { "audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, false, 0 },
+ { "audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, false, 0 },
+ { "audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, false, 0 },
+ { "audio-clk-1", IMX_SC_R_AUDIO_CLK_1, 1, false, 0 },
+ { "dma0-ch", IMX_SC_R_DMA_0_CH0, 16, true, 0 },
+ { "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 },
+ { "dma2-ch", IMX_SC_R_DMA_2_CH0, 5, true, 0 },
+ { "asrc0", IMX_SC_R_ASRC_0, 1, false, 0 },
+ { "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 },
+ { "esai0", IMX_SC_R_ESAI_0, 1, false, 0 },
+ { "spdif0", IMX_SC_R_SPDIF_0, 1, false, 0 },
+ { "spdif1", IMX_SC_R_SPDIF_1, 1, false, 0 },
+ { "sai", IMX_SC_R_SAI_0, 3, true, 0 },
+ { "sai3", IMX_SC_R_SAI_3, 1, false, 0 },
+ { "sai4", IMX_SC_R_SAI_4, 1, false, 0 },
+ { "sai5", IMX_SC_R_SAI_5, 1, false, 0 },
+ { "sai6", IMX_SC_R_SAI_6, 1, false, 0 },
+ { "sai7", IMX_SC_R_SAI_7, 1, false, 0 },
+ { "amix", IMX_SC_R_AMIX, 1, false, 0 },
+ { "mqs0", IMX_SC_R_MQS_0, 1, false, 0 },
+ { "dsp", IMX_SC_R_DSP, 1, false, 0 },
+ { "dsp-ram", IMX_SC_R_DSP_RAM, 1, false, 0 },
+
+ /* DMA SS */
+ { "can", IMX_SC_R_CAN_0, 3, true, 0 },
+ { "ftm", IMX_SC_R_FTM_0, 2, true, 0 },
+ { "lpi2c", IMX_SC_R_I2C_0, 4, true, 0 },
+ { "adc", IMX_SC_R_ADC_0, 1, true, 0 },
+ { "lcd", IMX_SC_R_LCD_0, 1, true, 0 },
+ { "lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, true, 0 },
+ { "lpuart", IMX_SC_R_UART_0, 4, true, 0 },
+ { "lpspi", IMX_SC_R_SPI_0, 4, true, 0 },
+ { "irqstr_dsp", IMX_SC_R_IRQSTR_DSP, 1, false, 0 },
+
+ /* VPU SS */
+ { "vpu", IMX_SC_R_VPU, 1, false, 0 },
+ { "vpu-pid", IMX_SC_R_VPU_PID0, 8, true, 0 },
+ { "vpu-dec0", IMX_SC_R_VPU_DEC_0, 1, false, 0 },
+ { "vpu-enc0", IMX_SC_R_VPU_ENC_0, 1, false, 0 },
+
+ /* GPU SS */
+ { "gpu0-pid", IMX_SC_R_GPU_0_PID0, 4, true, 0 },
+
+ /* HSIO SS */
+ { "pcie-b", IMX_SC_R_PCIE_B, 1, false, 0 },
+ { "serdes-1", IMX_SC_R_SERDES_1, 1, false, 0 },
+ { "hsio-gpio", IMX_SC_R_HSIO_GPIO, 1, false, 0 },
+
+ /* MIPI SS */
+ { "mipi0", IMX_SC_R_MIPI_0, 1, false, 0 },
+ { "mipi0-pwm0", IMX_SC_R_MIPI_0_PWM_0, 1, false, 0 },
+ { "mipi0-i2c", IMX_SC_R_MIPI_0_I2C_0, 2, true, 0 },
+
+ /* LVDS SS */
+ { "lvds0", IMX_SC_R_LVDS_0, 1, false, 0 },
+
+ /* DC SS */
+ { "dc0", IMX_SC_R_DC_0, 1, false, 0 },
+ { "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
+
+ /* CM40 SS */
+ { "cm40-i2c", IMX_SC_R_M4_0_I2C, 1, false, 0 },
+ { "cm40-intmux", IMX_SC_R_M4_0_INTMUX, 1, false, 0 },
+ { "cm40-pid", IMX_SC_R_M4_0_PID0, 5, true, 0},
+ { "cm40-mu-a1", IMX_SC_R_M4_0_MU_1A, 1, false, 0},
+ { "cm40-lpuart", IMX_SC_R_M4_0_UART, 1, false, 0},
+
+ /* CM41 SS */
+ { "cm41-i2c", IMX_SC_R_M4_1_I2C, 1, false, 0 },
+ { "cm41-intmux", IMX_SC_R_M4_1_INTMUX, 1, false, 0 },
+ { "cm41-pid", IMX_SC_R_M4_1_PID0, 5, true, 0},
+ { "cm41-mu-a1", IMX_SC_R_M4_1_MU_1A, 1, false, 0},
+ { "cm41-lpuart", IMX_SC_R_M4_1_UART, 1, false, 0},
+};
+
+static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
+ .pd_ranges = imx8qxp_scu_pd_ranges,
+ .num_ranges = ARRAY_SIZE(imx8qxp_scu_pd_ranges),
+};
+
+static struct imx_sc_ipc *pm_ipc_handle;
+
+static inline struct imx_sc_pm_domain *
+to_imx_sc_pd(struct generic_pm_domain *genpd)
+{
+ return container_of(genpd, struct imx_sc_pm_domain, pd);
+}
+
+static int imx_sc_pd_power(struct generic_pm_domain *domain, bool power_on)
+{
+ struct imx_sc_msg_req_set_resource_power_mode msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ struct imx_sc_pm_domain *pd;
+ int ret;
+
+ pd = to_imx_sc_pd(domain);
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_PM;
+ hdr->func = IMX_SC_PM_FUNC_SET_RESOURCE_POWER_MODE;
+ hdr->size = 2;
+
+ msg.resource = pd->rsrc;
+ msg.mode = power_on ? IMX_SC_PM_PW_MODE_ON : IMX_SC_PM_PW_MODE_LP;
+
+ ret = imx_scu_call_rpc(pm_ipc_handle, &msg, true);
+ if (ret)
+ dev_err(&domain->dev, "failed to power %s resource %d ret %d\n",
+ power_on ? "up" : "off", pd->rsrc, ret);
+
+ return ret;
+}
+
+static int imx_sc_pd_power_on(struct generic_pm_domain *domain)
+{
+ return imx_sc_pd_power(domain, true);
+}
+
+static int imx_sc_pd_power_off(struct generic_pm_domain *domain)
+{
+ return imx_sc_pd_power(domain, false);
+}
+
+static struct generic_pm_domain *imx_scu_pd_xlate(struct of_phandle_args *spec,
+ void *data)
+{
+ struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
+ struct genpd_onecell_data *pd_data = data;
+ unsigned int i;
+
+ for (i = 0; i < pd_data->num_domains; i++) {
+ struct imx_sc_pm_domain *sc_pd;
+
+ sc_pd = to_imx_sc_pd(pd_data->domains[i]);
+ if (sc_pd->rsrc == spec->args[0]) {
+ domain = &sc_pd->pd;
+ break;
+ }
+ }
+
+ return domain;
+}
+
+static struct imx_sc_pm_domain *
+imx_scu_add_pm_domain(struct device *dev, int idx,
+ const struct imx_sc_pd_range *pd_ranges)
+{
+ struct imx_sc_pm_domain *sc_pd;
+ int ret;
+
+ if (!imx_sc_rm_is_resource_owned(pm_ipc_handle, pd_ranges->rsrc + idx))
+ return NULL;
+
+ sc_pd = devm_kzalloc(dev, sizeof(*sc_pd), GFP_KERNEL);
+ if (!sc_pd)
+ return ERR_PTR(-ENOMEM);
+
+ sc_pd->rsrc = pd_ranges->rsrc + idx;
+ sc_pd->pd.power_off = imx_sc_pd_power_off;
+ sc_pd->pd.power_on = imx_sc_pd_power_on;
+
+ if (pd_ranges->postfix)
+ snprintf(sc_pd->name, sizeof(sc_pd->name),
+ "%s%i", pd_ranges->name, pd_ranges->start_from + idx);
+ else
+ snprintf(sc_pd->name, sizeof(sc_pd->name),
+ "%s", pd_ranges->name);
+
+ sc_pd->pd.name = sc_pd->name;
+
+ if (sc_pd->rsrc >= IMX_SC_R_LAST) {
+ dev_warn(dev, "invalid pd %s rsrc id %d found",
+ sc_pd->name, sc_pd->rsrc);
+
+ devm_kfree(dev, sc_pd);
+ return NULL;
+ }
+
+ ret = pm_genpd_init(&sc_pd->pd, NULL, true);
+ if (ret) {
+ dev_warn(dev, "failed to init pd %s rsrc id %d",
+ sc_pd->name, sc_pd->rsrc);
+ devm_kfree(dev, sc_pd);
+ return NULL;
+ }
+
+ return sc_pd;
+}
+
+static int imx_scu_init_pm_domains(struct device *dev,
+ const struct imx_sc_pd_soc *pd_soc)
+{
+ const struct imx_sc_pd_range *pd_ranges = pd_soc->pd_ranges;
+ struct generic_pm_domain **domains;
+ struct genpd_onecell_data *pd_data;
+ struct imx_sc_pm_domain *sc_pd;
+ u32 count = 0;
+ int i, j;
+
+ for (i = 0; i < pd_soc->num_ranges; i++)
+ count += pd_ranges[i].num;
+
+ domains = devm_kcalloc(dev, count, sizeof(*domains), GFP_KERNEL);
+ if (!domains)
+ return -ENOMEM;
+
+ pd_data = devm_kzalloc(dev, sizeof(*pd_data), GFP_KERNEL);
+ if (!pd_data)
+ return -ENOMEM;
+
+ count = 0;
+ for (i = 0; i < pd_soc->num_ranges; i++) {
+ for (j = 0; j < pd_ranges[i].num; j++) {
+ sc_pd = imx_scu_add_pm_domain(dev, j, &pd_ranges[i]);
+ if (IS_ERR_OR_NULL(sc_pd))
+ continue;
+
+ domains[count++] = &sc_pd->pd;
+ dev_dbg(dev, "added power domain %s\n", sc_pd->pd.name);
+ }
+ }
+
+ pd_data->domains = domains;
+ pd_data->num_domains = count;
+ pd_data->xlate = imx_scu_pd_xlate;
+
+ of_genpd_add_provider_onecell(dev->of_node, pd_data);
+
+ return 0;
+}
+
+static int imx_sc_pd_probe(struct platform_device *pdev)
+{
+ const struct imx_sc_pd_soc *pd_soc;
+ int ret;
+
+ ret = imx_scu_get_handle(&pm_ipc_handle);
+ if (ret)
+ return ret;
+
+ pd_soc = of_device_get_match_data(&pdev->dev);
+ if (!pd_soc)
+ return -ENODEV;
+
+ return imx_scu_init_pm_domains(&pdev->dev, pd_soc);
+}
+
+static const struct of_device_id imx_sc_pd_match[] = {
+ { .compatible = "fsl,imx8qxp-scu-pd", &imx8qxp_scu_pd},
+ { .compatible = "fsl,scu-pd", &imx8qxp_scu_pd},
+ { /* sentinel */ }
+};
+
+static struct platform_driver imx_sc_pd_driver = {
+ .driver = {
+ .name = "imx-scu-pd",
+ .of_match_table = imx_sc_pd_match,
+ },
+ .probe = imx_sc_pd_probe,
+};
+builtin_platform_driver(imx_sc_pd_driver);
+
+MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
+MODULE_DESCRIPTION("IMX SCU Power Domain driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
new file mode 100644
index 000000000..7127a04bc
--- /dev/null
+++ b/drivers/firmware/iscsi_ibft.c
@@ -0,0 +1,894 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2007-2010 Red Hat, Inc.
+ * by Peter Jones <pjones@redhat.com>
+ * Copyright 2008 IBM, Inc.
+ * by Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Copyright 2008
+ * by Konrad Rzeszutek <ketuzsezr@darnok.org>
+ *
+ * This code exposes the iSCSI Boot Format Table to userland via sysfs.
+ *
+ * Changelog:
+ *
+ * 06 Jan 2010 - Peter Jones <pjones@redhat.com>
+ * New changelog entries are in the git log from now on. Not here.
+ *
+ * 14 Mar 2008 - Konrad Rzeszutek <ketuzsezr@darnok.org>
+ * Updated comments and copyrights. (v0.4.9)
+ *
+ * 11 Feb 2008 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Converted to using ibft_addr. (v0.4.8)
+ *
+ * 8 Feb 2008 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Combined two functions in one: reserve_ibft_region. (v0.4.7)
+ *
+ * 30 Jan 2008 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Added logic to handle IPv6 addresses. (v0.4.6)
+ *
+ * 25 Jan 2008 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Added logic to handle badly not-to-spec iBFT. (v0.4.5)
+ *
+ * 4 Jan 2008 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Added __init to function declarations. (v0.4.4)
+ *
+ * 21 Dec 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Updated kobject registration, combined unregister functions in one
+ * and code and style cleanup. (v0.4.3)
+ *
+ * 5 Dec 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Added end-markers to enums and re-organized kobject registration. (v0.4.2)
+ *
+ * 4 Dec 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Created 'device' sysfs link to the NIC and style cleanup. (v0.4.1)
+ *
+ * 28 Nov 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Added sysfs-ibft documentation, moved 'find_ibft' function to
+ * in its own file and added text attributes for every struct field. (v0.4)
+ *
+ * 21 Nov 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Added text attributes emulating OpenFirmware /proc/device-tree naming.
+ * Removed binary /sysfs interface (v0.3)
+ *
+ * 29 Aug 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Added functionality in setup.c to reserve iBFT region. (v0.2)
+ *
+ * 27 Aug 2007 - Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * First version exposing iBFT data via a binary /sysfs. (v0.1)
+ */
+
+
+#include <linux/blkdev.h>
+#include <linux/capability.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/iscsi_ibft.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/iscsi_boot_sysfs.h>
+
+#define IBFT_ISCSI_VERSION "0.5.0"
+#define IBFT_ISCSI_DATE "2010-Feb-25"
+
+MODULE_AUTHOR("Peter Jones <pjones@redhat.com> and "
+ "Konrad Rzeszutek <ketuzsezr@darnok.org>");
+MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(IBFT_ISCSI_VERSION);
+
+#ifndef CONFIG_ISCSI_IBFT_FIND
+struct acpi_table_ibft *ibft_addr;
+#endif
+
+struct ibft_hdr {
+ u8 id;
+ u8 version;
+ u16 length;
+ u8 index;
+ u8 flags;
+} __attribute__((__packed__));
+
+struct ibft_control {
+ struct ibft_hdr hdr;
+ u16 extensions;
+ u16 initiator_off;
+ u16 nic0_off;
+ u16 tgt0_off;
+ u16 nic1_off;
+ u16 tgt1_off;
+ u16 expansion[];
+} __attribute__((__packed__));
+
+struct ibft_initiator {
+ struct ibft_hdr hdr;
+ char isns_server[16];
+ char slp_server[16];
+ char pri_radius_server[16];
+ char sec_radius_server[16];
+ u16 initiator_name_len;
+ u16 initiator_name_off;
+} __attribute__((__packed__));
+
+struct ibft_nic {
+ struct ibft_hdr hdr;
+ char ip_addr[16];
+ u8 subnet_mask_prefix;
+ u8 origin;
+ char gateway[16];
+ char primary_dns[16];
+ char secondary_dns[16];
+ char dhcp[16];
+ u16 vlan;
+ char mac[6];
+ u16 pci_bdf;
+ u16 hostname_len;
+ u16 hostname_off;
+} __attribute__((__packed__));
+
+struct ibft_tgt {
+ struct ibft_hdr hdr;
+ char ip_addr[16];
+ u16 port;
+ char lun[8];
+ u8 chap_type;
+ u8 nic_assoc;
+ u16 tgt_name_len;
+ u16 tgt_name_off;
+ u16 chap_name_len;
+ u16 chap_name_off;
+ u16 chap_secret_len;
+ u16 chap_secret_off;
+ u16 rev_chap_name_len;
+ u16 rev_chap_name_off;
+ u16 rev_chap_secret_len;
+ u16 rev_chap_secret_off;
+} __attribute__((__packed__));
+
+/*
+ * The kobject different types and its names.
+ *
+*/
+enum ibft_id {
+ id_reserved = 0, /* We don't support. */
+ id_control = 1, /* Should show up only once and is not exported. */
+ id_initiator = 2,
+ id_nic = 3,
+ id_target = 4,
+ id_extensions = 5, /* We don't support. */
+ id_end_marker,
+};
+
+/*
+ * The kobject and attribute structures.
+ */
+
+struct ibft_kobject {
+ struct acpi_table_ibft *header;
+ union {
+ struct ibft_initiator *initiator;
+ struct ibft_nic *nic;
+ struct ibft_tgt *tgt;
+ struct ibft_hdr *hdr;
+ };
+};
+
+static struct iscsi_boot_kset *boot_kset;
+
+/* fully null address */
+static const char nulls[16];
+
+/* IPv4-mapped IPv6 ::ffff:0.0.0.0 */
+static const char mapped_nulls[16] = { 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00 };
+
+static int address_not_null(u8 *ip)
+{
+ return (memcmp(ip, nulls, 16) && memcmp(ip, mapped_nulls, 16));
+}
+
+/*
+ * Helper functions to parse data properly.
+ */
+static ssize_t sprintf_ipaddr(char *buf, u8 *ip)
+{
+ char *str = buf;
+
+ if (ip[0] == 0 && ip[1] == 0 && ip[2] == 0 && ip[3] == 0 &&
+ ip[4] == 0 && ip[5] == 0 && ip[6] == 0 && ip[7] == 0 &&
+ ip[8] == 0 && ip[9] == 0 && ip[10] == 0xff && ip[11] == 0xff) {
+ /*
+ * IPV4
+ */
+ str += sprintf(buf, "%pI4", ip + 12);
+ } else {
+ /*
+ * IPv6
+ */
+ str += sprintf(str, "%pI6", ip);
+ }
+ str += sprintf(str, "\n");
+ return str - buf;
+}
+
+static ssize_t sprintf_string(char *str, int len, char *buf)
+{
+ return sprintf(str, "%.*s\n", len, buf);
+}
+
+/*
+ * Helper function to verify the IBFT header.
+ */
+static int ibft_verify_hdr(char *t, struct ibft_hdr *hdr, int id, int length)
+{
+ if (hdr->id != id) {
+ printk(KERN_ERR "iBFT error: We expected the %s " \
+ "field header.id to have %d but " \
+ "found %d instead!\n", t, id, hdr->id);
+ return -ENODEV;
+ }
+ if (length && hdr->length != length) {
+ printk(KERN_ERR "iBFT error: We expected the %s " \
+ "field header.length to have %d but " \
+ "found %d instead!\n", t, length, hdr->length);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/*
+ * Routines for parsing the iBFT data to be human readable.
+ */
+static ssize_t ibft_attr_show_initiator(void *data, int type, char *buf)
+{
+ struct ibft_kobject *entry = data;
+ struct ibft_initiator *initiator = entry->initiator;
+ void *ibft_loc = entry->header;
+ char *str = buf;
+
+ if (!initiator)
+ return 0;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INDEX:
+ str += sprintf(str, "%d\n", initiator->hdr.index);
+ break;
+ case ISCSI_BOOT_INI_FLAGS:
+ str += sprintf(str, "%d\n", initiator->hdr.flags);
+ break;
+ case ISCSI_BOOT_INI_ISNS_SERVER:
+ str += sprintf_ipaddr(str, initiator->isns_server);
+ break;
+ case ISCSI_BOOT_INI_SLP_SERVER:
+ str += sprintf_ipaddr(str, initiator->slp_server);
+ break;
+ case ISCSI_BOOT_INI_PRI_RADIUS_SERVER:
+ str += sprintf_ipaddr(str, initiator->pri_radius_server);
+ break;
+ case ISCSI_BOOT_INI_SEC_RADIUS_SERVER:
+ str += sprintf_ipaddr(str, initiator->sec_radius_server);
+ break;
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ str += sprintf_string(str, initiator->initiator_name_len,
+ (char *)ibft_loc +
+ initiator->initiator_name_off);
+ break;
+ default:
+ break;
+ }
+
+ return str - buf;
+}
+
+static ssize_t ibft_attr_show_nic(void *data, int type, char *buf)
+{
+ struct ibft_kobject *entry = data;
+ struct ibft_nic *nic = entry->nic;
+ void *ibft_loc = entry->header;
+ char *str = buf;
+ __be32 val;
+
+ if (!nic)
+ return 0;
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_INDEX:
+ str += sprintf(str, "%d\n", nic->hdr.index);
+ break;
+ case ISCSI_BOOT_ETH_FLAGS:
+ str += sprintf(str, "%d\n", nic->hdr.flags);
+ break;
+ case ISCSI_BOOT_ETH_IP_ADDR:
+ str += sprintf_ipaddr(str, nic->ip_addr);
+ break;
+ case ISCSI_BOOT_ETH_SUBNET_MASK:
+ val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
+ str += sprintf(str, "%pI4", &val);
+ break;
+ case ISCSI_BOOT_ETH_PREFIX_LEN:
+ str += sprintf(str, "%d\n", nic->subnet_mask_prefix);
+ break;
+ case ISCSI_BOOT_ETH_ORIGIN:
+ str += sprintf(str, "%d\n", nic->origin);
+ break;
+ case ISCSI_BOOT_ETH_GATEWAY:
+ str += sprintf_ipaddr(str, nic->gateway);
+ break;
+ case ISCSI_BOOT_ETH_PRIMARY_DNS:
+ str += sprintf_ipaddr(str, nic->primary_dns);
+ break;
+ case ISCSI_BOOT_ETH_SECONDARY_DNS:
+ str += sprintf_ipaddr(str, nic->secondary_dns);
+ break;
+ case ISCSI_BOOT_ETH_DHCP:
+ str += sprintf_ipaddr(str, nic->dhcp);
+ break;
+ case ISCSI_BOOT_ETH_VLAN:
+ str += sprintf(str, "%d\n", nic->vlan);
+ break;
+ case ISCSI_BOOT_ETH_MAC:
+ str += sprintf(str, "%pM\n", nic->mac);
+ break;
+ case ISCSI_BOOT_ETH_HOSTNAME:
+ str += sprintf_string(str, nic->hostname_len,
+ (char *)ibft_loc + nic->hostname_off);
+ break;
+ default:
+ break;
+ }
+
+ return str - buf;
+};
+
+static ssize_t ibft_attr_show_target(void *data, int type, char *buf)
+{
+ struct ibft_kobject *entry = data;
+ struct ibft_tgt *tgt = entry->tgt;
+ void *ibft_loc = entry->header;
+ char *str = buf;
+ int i;
+
+ if (!tgt)
+ return 0;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_INDEX:
+ str += sprintf(str, "%d\n", tgt->hdr.index);
+ break;
+ case ISCSI_BOOT_TGT_FLAGS:
+ str += sprintf(str, "%d\n", tgt->hdr.flags);
+ break;
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ str += sprintf_ipaddr(str, tgt->ip_addr);
+ break;
+ case ISCSI_BOOT_TGT_PORT:
+ str += sprintf(str, "%d\n", tgt->port);
+ break;
+ case ISCSI_BOOT_TGT_LUN:
+ for (i = 0; i < 8; i++)
+ str += sprintf(str, "%x", (u8)tgt->lun[i]);
+ str += sprintf(str, "\n");
+ break;
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ str += sprintf(str, "%d\n", tgt->nic_assoc);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_TYPE:
+ str += sprintf(str, "%d\n", tgt->chap_type);
+ break;
+ case ISCSI_BOOT_TGT_NAME:
+ str += sprintf_string(str, tgt->tgt_name_len,
+ (char *)ibft_loc + tgt->tgt_name_off);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ str += sprintf_string(str, tgt->chap_name_len,
+ (char *)ibft_loc + tgt->chap_name_off);
+ break;
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ str += sprintf_string(str, tgt->chap_secret_len,
+ (char *)ibft_loc + tgt->chap_secret_off);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ str += sprintf_string(str, tgt->rev_chap_name_len,
+ (char *)ibft_loc +
+ tgt->rev_chap_name_off);
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ str += sprintf_string(str, tgt->rev_chap_secret_len,
+ (char *)ibft_loc +
+ tgt->rev_chap_secret_off);
+ break;
+ default:
+ break;
+ }
+
+ return str - buf;
+}
+
+static ssize_t ibft_attr_show_acpitbl(void *data, int type, char *buf)
+{
+ struct ibft_kobject *entry = data;
+ char *str = buf;
+
+ switch (type) {
+ case ISCSI_BOOT_ACPITBL_SIGNATURE:
+ str += sprintf_string(str, ACPI_NAMESEG_SIZE,
+ entry->header->header.signature);
+ break;
+ case ISCSI_BOOT_ACPITBL_OEM_ID:
+ str += sprintf_string(str, ACPI_OEM_ID_SIZE,
+ entry->header->header.oem_id);
+ break;
+ case ISCSI_BOOT_ACPITBL_OEM_TABLE_ID:
+ str += sprintf_string(str, ACPI_OEM_TABLE_ID_SIZE,
+ entry->header->header.oem_table_id);
+ break;
+ default:
+ break;
+ }
+
+ return str - buf;
+}
+
+static int __init ibft_check_device(void)
+{
+ int len;
+ u8 *pos;
+ u8 csum = 0;
+
+ len = ibft_addr->header.length;
+
+ /* Sanity checking of iBFT. */
+ if (ibft_addr->header.revision != 1) {
+ printk(KERN_ERR "iBFT module supports only revision 1, " \
+ "while this is %d.\n",
+ ibft_addr->header.revision);
+ return -ENOENT;
+ }
+ for (pos = (u8 *)ibft_addr; pos < (u8 *)ibft_addr + len; pos++)
+ csum += *pos;
+
+ if (csum) {
+ printk(KERN_ERR "iBFT has incorrect checksum (0x%x)!\n", csum);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+/*
+ * Helper routiners to check to determine if the entry is valid
+ * in the proper iBFT structure.
+ */
+static umode_t ibft_check_nic_for(void *data, int type)
+{
+ struct ibft_kobject *entry = data;
+ struct ibft_nic *nic = entry->nic;
+ umode_t rc = 0;
+
+ switch (type) {
+ case ISCSI_BOOT_ETH_INDEX:
+ case ISCSI_BOOT_ETH_FLAGS:
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_IP_ADDR:
+ if (address_not_null(nic->ip_addr))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_PREFIX_LEN:
+ case ISCSI_BOOT_ETH_SUBNET_MASK:
+ if (nic->subnet_mask_prefix)
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_ORIGIN:
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_GATEWAY:
+ if (address_not_null(nic->gateway))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_PRIMARY_DNS:
+ if (address_not_null(nic->primary_dns))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_SECONDARY_DNS:
+ if (address_not_null(nic->secondary_dns))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_DHCP:
+ if (address_not_null(nic->dhcp))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_VLAN:
+ case ISCSI_BOOT_ETH_MAC:
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_ETH_HOSTNAME:
+ if (nic->hostname_off)
+ rc = S_IRUGO;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static umode_t __init ibft_check_tgt_for(void *data, int type)
+{
+ struct ibft_kobject *entry = data;
+ struct ibft_tgt *tgt = entry->tgt;
+ umode_t rc = 0;
+
+ switch (type) {
+ case ISCSI_BOOT_TGT_INDEX:
+ case ISCSI_BOOT_TGT_FLAGS:
+ case ISCSI_BOOT_TGT_IP_ADDR:
+ case ISCSI_BOOT_TGT_PORT:
+ case ISCSI_BOOT_TGT_LUN:
+ case ISCSI_BOOT_TGT_NIC_ASSOC:
+ case ISCSI_BOOT_TGT_CHAP_TYPE:
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_TGT_NAME:
+ if (tgt->tgt_name_len)
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_TGT_CHAP_NAME:
+ case ISCSI_BOOT_TGT_CHAP_SECRET:
+ if (tgt->chap_name_len)
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+ case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+ if (tgt->rev_chap_name_len)
+ rc = S_IRUGO;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static umode_t __init ibft_check_initiator_for(void *data, int type)
+{
+ struct ibft_kobject *entry = data;
+ struct ibft_initiator *init = entry->initiator;
+ umode_t rc = 0;
+
+ switch (type) {
+ case ISCSI_BOOT_INI_INDEX:
+ case ISCSI_BOOT_INI_FLAGS:
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_INI_ISNS_SERVER:
+ if (address_not_null(init->isns_server))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_INI_SLP_SERVER:
+ if (address_not_null(init->slp_server))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_INI_PRI_RADIUS_SERVER:
+ if (address_not_null(init->pri_radius_server))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_INI_SEC_RADIUS_SERVER:
+ if (address_not_null(init->sec_radius_server))
+ rc = S_IRUGO;
+ break;
+ case ISCSI_BOOT_INI_INITIATOR_NAME:
+ if (init->initiator_name_len)
+ rc = S_IRUGO;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static umode_t __init ibft_check_acpitbl_for(void *data, int type)
+{
+
+ umode_t rc = 0;
+
+ switch (type) {
+ case ISCSI_BOOT_ACPITBL_SIGNATURE:
+ case ISCSI_BOOT_ACPITBL_OEM_ID:
+ case ISCSI_BOOT_ACPITBL_OEM_TABLE_ID:
+ rc = S_IRUGO;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static void ibft_kobj_release(void *data)
+{
+ kfree(data);
+}
+
+/*
+ * Helper function for ibft_register_kobjects.
+ */
+static int __init ibft_create_kobject(struct acpi_table_ibft *header,
+ struct ibft_hdr *hdr)
+{
+ struct iscsi_boot_kobj *boot_kobj = NULL;
+ struct ibft_kobject *ibft_kobj = NULL;
+ struct ibft_nic *nic = (struct ibft_nic *)hdr;
+ struct pci_dev *pci_dev;
+ int rc = 0;
+
+ ibft_kobj = kzalloc(sizeof(*ibft_kobj), GFP_KERNEL);
+ if (!ibft_kobj)
+ return -ENOMEM;
+
+ ibft_kobj->header = header;
+ ibft_kobj->hdr = hdr;
+
+ switch (hdr->id) {
+ case id_initiator:
+ rc = ibft_verify_hdr("initiator", hdr, id_initiator,
+ sizeof(*ibft_kobj->initiator));
+ if (rc)
+ break;
+
+ boot_kobj = iscsi_boot_create_initiator(boot_kset, hdr->index,
+ ibft_kobj,
+ ibft_attr_show_initiator,
+ ibft_check_initiator_for,
+ ibft_kobj_release);
+ if (!boot_kobj) {
+ rc = -ENOMEM;
+ goto free_ibft_obj;
+ }
+ break;
+ case id_nic:
+ rc = ibft_verify_hdr("ethernet", hdr, id_nic,
+ sizeof(*ibft_kobj->nic));
+ if (rc)
+ break;
+
+ boot_kobj = iscsi_boot_create_ethernet(boot_kset, hdr->index,
+ ibft_kobj,
+ ibft_attr_show_nic,
+ ibft_check_nic_for,
+ ibft_kobj_release);
+ if (!boot_kobj) {
+ rc = -ENOMEM;
+ goto free_ibft_obj;
+ }
+ break;
+ case id_target:
+ rc = ibft_verify_hdr("target", hdr, id_target,
+ sizeof(*ibft_kobj->tgt));
+ if (rc)
+ break;
+
+ boot_kobj = iscsi_boot_create_target(boot_kset, hdr->index,
+ ibft_kobj,
+ ibft_attr_show_target,
+ ibft_check_tgt_for,
+ ibft_kobj_release);
+ if (!boot_kobj) {
+ rc = -ENOMEM;
+ goto free_ibft_obj;
+ }
+ break;
+ case id_reserved:
+ case id_control:
+ case id_extensions:
+ /* Fields which we don't support. Ignore them */
+ rc = 1;
+ break;
+ default:
+ printk(KERN_ERR "iBFT has unknown structure type (%d). " \
+ "Report this bug to %.6s!\n", hdr->id,
+ header->header.oem_id);
+ rc = 1;
+ break;
+ }
+
+ if (rc) {
+ /* Skip adding this kobject, but exit with non-fatal error. */
+ rc = 0;
+ goto free_ibft_obj;
+ }
+
+ if (hdr->id == id_nic) {
+ /*
+ * We don't search for the device in other domains than
+ * zero. This is because on x86 platforms the BIOS
+ * executes only devices which are in domain 0. Furthermore, the
+ * iBFT spec doesn't have a domain id field :-(
+ */
+ pci_dev = pci_get_domain_bus_and_slot(0,
+ (nic->pci_bdf & 0xff00) >> 8,
+ (nic->pci_bdf & 0xff));
+ if (pci_dev) {
+ rc = sysfs_create_link(&boot_kobj->kobj,
+ &pci_dev->dev.kobj, "device");
+ pci_dev_put(pci_dev);
+ }
+ }
+ return 0;
+
+free_ibft_obj:
+ kfree(ibft_kobj);
+ return rc;
+}
+
+/*
+ * Scan the IBFT table structure for the NIC and Target fields. When
+ * found add them on the passed-in list. We do not support the other
+ * fields at this point, so they are skipped.
+ */
+static int __init ibft_register_kobjects(struct acpi_table_ibft *header)
+{
+ struct ibft_control *control = NULL;
+ struct iscsi_boot_kobj *boot_kobj;
+ struct ibft_kobject *ibft_kobj;
+ void *ptr, *end;
+ int rc = 0;
+ u16 offset;
+ u16 eot_offset;
+
+ control = (void *)header + sizeof(*header);
+ end = (void *)control + control->hdr.length;
+ eot_offset = (void *)header + header->header.length - (void *)control;
+ rc = ibft_verify_hdr("control", (struct ibft_hdr *)control, id_control, 0);
+
+ /* iBFT table safety checking */
+ rc |= ((control->hdr.index) ? -ENODEV : 0);
+ rc |= ((control->hdr.length < sizeof(*control)) ? -ENODEV : 0);
+ if (rc) {
+ printk(KERN_ERR "iBFT error: Control header is invalid!\n");
+ return rc;
+ }
+ for (ptr = &control->initiator_off; ptr + sizeof(u16) <= end; ptr += sizeof(u16)) {
+ offset = *(u16 *)ptr;
+ if (offset && offset < header->header.length &&
+ offset < eot_offset) {
+ rc = ibft_create_kobject(header,
+ (void *)header + offset);
+ if (rc)
+ break;
+ }
+ }
+ if (rc)
+ return rc;
+
+ ibft_kobj = kzalloc(sizeof(*ibft_kobj), GFP_KERNEL);
+ if (!ibft_kobj)
+ return -ENOMEM;
+
+ ibft_kobj->header = header;
+ ibft_kobj->hdr = NULL; /*for ibft_unregister*/
+
+ boot_kobj = iscsi_boot_create_acpitbl(boot_kset, 0,
+ ibft_kobj,
+ ibft_attr_show_acpitbl,
+ ibft_check_acpitbl_for,
+ ibft_kobj_release);
+ if (!boot_kobj) {
+ kfree(ibft_kobj);
+ rc = -ENOMEM;
+ }
+
+ return rc;
+}
+
+static void ibft_unregister(void)
+{
+ struct iscsi_boot_kobj *boot_kobj, *tmp_kobj;
+ struct ibft_kobject *ibft_kobj;
+
+ list_for_each_entry_safe(boot_kobj, tmp_kobj,
+ &boot_kset->kobj_list, list) {
+ ibft_kobj = boot_kobj->data;
+ if (ibft_kobj->hdr && ibft_kobj->hdr->id == id_nic)
+ sysfs_remove_link(&boot_kobj->kobj, "device");
+ };
+}
+
+static void ibft_cleanup(void)
+{
+ if (boot_kset) {
+ ibft_unregister();
+ iscsi_boot_destroy_kset(boot_kset);
+ }
+}
+
+static void __exit ibft_exit(void)
+{
+ ibft_cleanup();
+}
+
+#ifdef CONFIG_ACPI
+static const struct {
+ char *sign;
+} ibft_signs[] = {
+ /*
+ * One spec says "IBFT", the other says "iBFT". We have to check
+ * for both.
+ */
+ { ACPI_SIG_IBFT },
+ { "iBFT" },
+ { "BIFT" }, /* Broadcom iSCSI Offload */
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+ int i;
+ struct acpi_table_header *table = NULL;
+
+ if (acpi_disabled)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+ acpi_get_table(ibft_signs[i].sign, 0, &table);
+ ibft_addr = (struct acpi_table_ibft *)table;
+ }
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+
+/*
+ * ibft_init() - creates sysfs tree entries for the iBFT data.
+ */
+static int __init ibft_init(void)
+{
+ int rc = 0;
+
+ /*
+ As on UEFI systems the setup_arch()/find_ibft_region()
+ is called before ACPI tables are parsed and it only does
+ legacy finding.
+ */
+ if (!ibft_addr)
+ acpi_find_ibft_region();
+
+ if (ibft_addr) {
+ pr_info("iBFT detected.\n");
+
+ rc = ibft_check_device();
+ if (rc)
+ return rc;
+
+ boot_kset = iscsi_boot_create_kset("ibft");
+ if (!boot_kset)
+ return -ENOMEM;
+
+ /* Scan the IBFT for data and register the kobjects. */
+ rc = ibft_register_kobjects(ibft_addr);
+ if (rc)
+ goto out_free;
+ } else
+ printk(KERN_INFO "No iBFT detected.\n");
+
+ return 0;
+
+out_free:
+ ibft_cleanup();
+ return rc;
+}
+
+module_init(ibft_init);
+module_exit(ibft_exit);
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
new file mode 100644
index 000000000..64bb94523
--- /dev/null
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2007-2010 Red Hat, Inc.
+ * by Peter Jones <pjones@redhat.com>
+ * Copyright 2007 IBM, Inc.
+ * by Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Copyright 2008
+ * by Konrad Rzeszutek <ketuzsezr@darnok.org>
+ *
+ * This code finds the iSCSI Boot Format Table.
+ */
+
+#include <linux/memblock.h>
+#include <linux/blkdev.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/efi.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/iscsi_ibft.h>
+
+#include <asm/mmzone.h>
+
+/*
+ * Physical location of iSCSI Boot Format Table.
+ */
+struct acpi_table_ibft *ibft_addr;
+EXPORT_SYMBOL_GPL(ibft_addr);
+
+static const struct {
+ char *sign;
+} ibft_signs[] = {
+ { "iBFT" },
+ { "BIFT" }, /* Broadcom iSCSI Offload */
+};
+
+#define IBFT_SIGN_LEN 4
+#define IBFT_START 0x80000 /* 512kB */
+#define IBFT_END 0x100000 /* 1MB */
+#define VGA_MEM 0xA0000 /* VGA buffer */
+#define VGA_SIZE 0x20000 /* 128kB */
+
+static int __init find_ibft_in_mem(void)
+{
+ unsigned long pos;
+ unsigned int len = 0;
+ void *virt;
+ int i;
+
+ for (pos = IBFT_START; pos < IBFT_END; pos += 16) {
+ /* The table can't be inside the VGA BIOS reserved space,
+ * so skip that area */
+ if (pos == VGA_MEM)
+ pos += VGA_SIZE;
+ virt = isa_bus_to_virt(pos);
+
+ for (i = 0; i < ARRAY_SIZE(ibft_signs); i++) {
+ if (memcmp(virt, ibft_signs[i].sign, IBFT_SIGN_LEN) ==
+ 0) {
+ unsigned long *addr =
+ (unsigned long *)isa_bus_to_virt(pos + 4);
+ len = *addr;
+ /* if the length of the table extends past 1M,
+ * the table cannot be valid. */
+ if (pos + len <= (IBFT_END-1)) {
+ ibft_addr = (struct acpi_table_ibft *)virt;
+ pr_info("iBFT found at 0x%lx.\n", pos);
+ goto done;
+ }
+ }
+ }
+ }
+done:
+ return len;
+}
+/*
+ * Routine used to find the iSCSI Boot Format Table. The logical
+ * kernel address is set in the ibft_addr global variable.
+ */
+unsigned long __init find_ibft_region(unsigned long *sizep)
+{
+ ibft_addr = NULL;
+
+ /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
+ * only use ACPI for this */
+
+ if (!efi_enabled(EFI_BOOT))
+ find_ibft_in_mem();
+
+ if (ibft_addr) {
+ *sizep = PAGE_ALIGN(ibft_addr->header.length);
+ return (u64)virt_to_phys(ibft_addr);
+ }
+
+ *sizep = 0;
+ return 0;
+}
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
new file mode 100644
index 000000000..24945e2da
--- /dev/null
+++ b/drivers/firmware/memmap.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/firmware/memmap.c
+ * Copyright (C) 2008 SUSE LINUX Products GmbH
+ * by Bernhard Walle <bernhard.walle@gmx.de>
+ */
+
+#include <linux/string.h>
+#include <linux/firmware-map.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+/*
+ * Data types ------------------------------------------------------------------
+ */
+
+/*
+ * Firmware map entry. Because firmware memory maps are flat and not
+ * hierarchical, it's ok to organise them in a linked list. No parent
+ * information is necessary as for the resource tree.
+ */
+struct firmware_map_entry {
+ /*
+ * start and end must be u64 rather than resource_size_t, because e820
+ * resources can lie at addresses above 4G.
+ */
+ u64 start; /* start of the memory range */
+ u64 end; /* end of the memory range (incl.) */
+ const char *type; /* type of the memory range */
+ struct list_head list; /* entry for the linked list */
+ struct kobject kobj; /* kobject for each entry */
+};
+
+/*
+ * Forward declarations --------------------------------------------------------
+ */
+static ssize_t memmap_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+static ssize_t start_show(struct firmware_map_entry *entry, char *buf);
+static ssize_t end_show(struct firmware_map_entry *entry, char *buf);
+static ssize_t type_show(struct firmware_map_entry *entry, char *buf);
+
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry(u64 start, u64 end, const char *type);
+
+/*
+ * Static data -----------------------------------------------------------------
+ */
+
+struct memmap_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct firmware_map_entry *entry, char *buf);
+};
+
+static struct memmap_attribute memmap_start_attr = __ATTR_RO(start);
+static struct memmap_attribute memmap_end_attr = __ATTR_RO(end);
+static struct memmap_attribute memmap_type_attr = __ATTR_RO(type);
+
+/*
+ * These are default attributes that are added for every memmap entry.
+ */
+static struct attribute *def_attrs[] = {
+ &memmap_start_attr.attr,
+ &memmap_end_attr.attr,
+ &memmap_type_attr.attr,
+ NULL
+};
+
+static const struct sysfs_ops memmap_attr_ops = {
+ .show = memmap_attr_show,
+};
+
+/* Firmware memory map entries. */
+static LIST_HEAD(map_entries);
+static DEFINE_SPINLOCK(map_entries_lock);
+
+/*
+ * For memory hotplug, there is no way to free memory map entries allocated
+ * by boot mem after the system is up. So when we hot-remove memory whose
+ * map entry is allocated by bootmem, we need to remember the storage and
+ * reuse it when the memory is hot-added again.
+ */
+static LIST_HEAD(map_entries_bootmem);
+static DEFINE_SPINLOCK(map_entries_bootmem_lock);
+
+
+static inline struct firmware_map_entry *
+to_memmap_entry(struct kobject *kobj)
+{
+ return container_of(kobj, struct firmware_map_entry, kobj);
+}
+
+static void __meminit release_firmware_map_entry(struct kobject *kobj)
+{
+ struct firmware_map_entry *entry = to_memmap_entry(kobj);
+
+ if (PageReserved(virt_to_page(entry))) {
+ /*
+ * Remember the storage allocated by bootmem, and reuse it when
+ * the memory is hot-added again. The entry will be added to
+ * map_entries_bootmem here, and deleted from &map_entries in
+ * firmware_map_remove_entry().
+ */
+ spin_lock(&map_entries_bootmem_lock);
+ list_add(&entry->list, &map_entries_bootmem);
+ spin_unlock(&map_entries_bootmem_lock);
+
+ return;
+ }
+
+ kfree(entry);
+}
+
+static struct kobj_type __refdata memmap_ktype = {
+ .release = release_firmware_map_entry,
+ .sysfs_ops = &memmap_attr_ops,
+ .default_attrs = def_attrs,
+};
+
+/*
+ * Registration functions ------------------------------------------------------
+ */
+
+/**
+ * firmware_map_add_entry() - Does the real work to add a firmware memmap entry.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ * @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised
+ * entry.
+ *
+ * Common implementation of firmware_map_add() and firmware_map_add_early()
+ * which expects a pre-allocated struct firmware_map_entry.
+ *
+ * Return: 0 always
+ */
+static int firmware_map_add_entry(u64 start, u64 end,
+ const char *type,
+ struct firmware_map_entry *entry)
+{
+ BUG_ON(start > end);
+
+ entry->start = start;
+ entry->end = end - 1;
+ entry->type = type;
+ INIT_LIST_HEAD(&entry->list);
+ kobject_init(&entry->kobj, &memmap_ktype);
+
+ spin_lock(&map_entries_lock);
+ list_add_tail(&entry->list, &map_entries);
+ spin_unlock(&map_entries_lock);
+
+ return 0;
+}
+
+/**
+ * firmware_map_remove_entry() - Does the real work to remove a firmware
+ * memmap entry.
+ * @entry: removed entry.
+ *
+ * The caller must hold map_entries_lock, and release it properly.
+ */
+static inline void firmware_map_remove_entry(struct firmware_map_entry *entry)
+{
+ list_del(&entry->list);
+}
+
+/*
+ * Add memmap entry on sysfs
+ */
+static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry)
+{
+ static int map_entries_nr;
+ static struct kset *mmap_kset;
+
+ if (entry->kobj.state_in_sysfs)
+ return -EEXIST;
+
+ if (!mmap_kset) {
+ mmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj);
+ if (!mmap_kset)
+ return -ENOMEM;
+ }
+
+ entry->kobj.kset = mmap_kset;
+ if (kobject_add(&entry->kobj, NULL, "%d", map_entries_nr++))
+ kobject_put(&entry->kobj);
+
+ return 0;
+}
+
+/*
+ * Remove memmap entry on sysfs
+ */
+static inline void remove_sysfs_fw_map_entry(struct firmware_map_entry *entry)
+{
+ kobject_put(&entry->kobj);
+}
+
+/**
+ * firmware_map_find_entry_in_list() - Search memmap entry in a given list.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ * @list: In which to find the entry.
+ *
+ * This function is to find the memmap entey of a given memory range in a
+ * given list. The caller must hold map_entries_lock, and must not release
+ * the lock until the processing of the returned entry has completed.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry_in_list(u64 start, u64 end, const char *type,
+ struct list_head *list)
+{
+ struct firmware_map_entry *entry;
+
+ list_for_each_entry(entry, list, list)
+ if ((entry->start == start) && (entry->end == end) &&
+ (!strcmp(entry->type, type))) {
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * firmware_map_find_entry() - Search memmap entry in map_entries.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ *
+ * This function is to find the memmap entey of a given memory range.
+ * The caller must hold map_entries_lock, and must not release the lock
+ * until the processing of the returned entry has completed.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry(u64 start, u64 end, const char *type)
+{
+ return firmware_map_find_entry_in_list(start, end, type, &map_entries);
+}
+
+/**
+ * firmware_map_find_entry_bootmem() - Search memmap entry in map_entries_bootmem.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ *
+ * This function is similar to firmware_map_find_entry except that it find the
+ * given entry in map_entries_bootmem.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry_bootmem(u64 start, u64 end, const char *type)
+{
+ return firmware_map_find_entry_in_list(start, end, type,
+ &map_entries_bootmem);
+}
+
+/**
+ * firmware_map_add_hotplug() - Adds a firmware mapping entry when we do
+ * memory hotplug.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive)
+ * @type: Type of the memory range.
+ *
+ * Adds a firmware mapping entry. This function is for memory hotplug, it is
+ * similar to function firmware_map_add_early(). The only difference is that
+ * it will create the syfs entry dynamically.
+ *
+ * Return: 0 on success, or -ENOMEM if no memory could be allocated.
+ */
+int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type)
+{
+ struct firmware_map_entry *entry;
+
+ entry = firmware_map_find_entry(start, end - 1, type);
+ if (entry)
+ return 0;
+
+ entry = firmware_map_find_entry_bootmem(start, end - 1, type);
+ if (!entry) {
+ entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+ } else {
+ /* Reuse storage allocated by bootmem. */
+ spin_lock(&map_entries_bootmem_lock);
+ list_del(&entry->list);
+ spin_unlock(&map_entries_bootmem_lock);
+
+ memset(entry, 0, sizeof(*entry));
+ }
+
+ firmware_map_add_entry(start, end, type, entry);
+ /* create the memmap entry */
+ add_sysfs_fw_map_entry(entry);
+
+ return 0;
+}
+
+/**
+ * firmware_map_add_early() - Adds a firmware mapping entry.
+ * @start: Start of the memory range.
+ * @end: End of the memory range.
+ * @type: Type of the memory range.
+ *
+ * Adds a firmware mapping entry. This function uses the bootmem allocator
+ * for memory allocation.
+ *
+ * That function must be called before late_initcall.
+ *
+ * Return: 0 on success, or -ENOMEM if no memory could be allocated.
+ */
+int __init firmware_map_add_early(u64 start, u64 end, const char *type)
+{
+ struct firmware_map_entry *entry;
+
+ entry = memblock_alloc(sizeof(struct firmware_map_entry),
+ SMP_CACHE_BYTES);
+ if (WARN_ON(!entry))
+ return -ENOMEM;
+
+ return firmware_map_add_entry(start, end, type, entry);
+}
+
+/**
+ * firmware_map_remove() - remove a firmware mapping entry
+ * @start: Start of the memory range.
+ * @end: End of the memory range.
+ * @type: Type of the memory range.
+ *
+ * removes a firmware mapping entry.
+ *
+ * Return: 0 on success, or -EINVAL if no entry.
+ */
+int __meminit firmware_map_remove(u64 start, u64 end, const char *type)
+{
+ struct firmware_map_entry *entry;
+
+ spin_lock(&map_entries_lock);
+ entry = firmware_map_find_entry(start, end - 1, type);
+ if (!entry) {
+ spin_unlock(&map_entries_lock);
+ return -EINVAL;
+ }
+
+ firmware_map_remove_entry(entry);
+ spin_unlock(&map_entries_lock);
+
+ /* remove the memmap entry */
+ remove_sysfs_fw_map_entry(entry);
+
+ return 0;
+}
+
+/*
+ * Sysfs functions -------------------------------------------------------------
+ */
+
+static ssize_t start_show(struct firmware_map_entry *entry, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ (unsigned long long)entry->start);
+}
+
+static ssize_t end_show(struct firmware_map_entry *entry, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+ (unsigned long long)entry->end);
+}
+
+static ssize_t type_show(struct firmware_map_entry *entry, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", entry->type);
+}
+
+static inline struct memmap_attribute *to_memmap_attr(struct attribute *attr)
+{
+ return container_of(attr, struct memmap_attribute, attr);
+}
+
+static ssize_t memmap_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct firmware_map_entry *entry = to_memmap_entry(kobj);
+ struct memmap_attribute *memmap_attr = to_memmap_attr(attr);
+
+ return memmap_attr->show(entry, buf);
+}
+
+/*
+ * Initialises stuff and adds the entries in the map_entries list to
+ * sysfs. Important is that firmware_map_add() and firmware_map_add_early()
+ * must be called before late_initcall. That's just because that function
+ * is called as late_initcall() function, which means that if you call
+ * firmware_map_add() or firmware_map_add_early() afterwards, the entries
+ * are not added to sysfs.
+ */
+static int __init firmware_memmap_init(void)
+{
+ struct firmware_map_entry *entry;
+
+ list_for_each_entry(entry, &map_entries, list)
+ add_sysfs_fw_map_entry(entry);
+
+ return 0;
+}
+late_initcall(firmware_memmap_init);
+
diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
new file mode 100644
index 000000000..2671dcd0a
--- /dev/null
+++ b/drivers/firmware/meson/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Amlogic Secure Monitor driver
+#
+config MESON_SM
+ bool
+ default ARCH_MESON
+ depends on ARM64_4K_PAGES
+ help
+ Say y here to enable the Amlogic secure monitor driver
diff --git a/drivers/firmware/meson/Makefile b/drivers/firmware/meson/Makefile
new file mode 100644
index 000000000..c6c09483b
--- /dev/null
+++ b/drivers/firmware/meson/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MESON_SM) += meson_sm.o
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
new file mode 100644
index 000000000..7dff88331
--- /dev/null
+++ b/drivers/firmware/meson/meson_sm.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Amlogic Secure Monitor driver
+ *
+ * Copyright (C) 2016 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ */
+
+#define pr_fmt(fmt) "meson-sm: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/bug.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+ #include <linux/slab.h>
+
+#include <linux/firmware/meson/meson_sm.h>
+
+struct meson_sm_cmd {
+ unsigned int index;
+ u32 smc_id;
+};
+#define CMD(d, s) { .index = (d), .smc_id = (s), }
+
+struct meson_sm_chip {
+ unsigned int shmem_size;
+ u32 cmd_shmem_in_base;
+ u32 cmd_shmem_out_base;
+ struct meson_sm_cmd cmd[];
+};
+
+static const struct meson_sm_chip gxbb_chip = {
+ .shmem_size = SZ_4K,
+ .cmd_shmem_in_base = 0x82000020,
+ .cmd_shmem_out_base = 0x82000021,
+ .cmd = {
+ CMD(SM_EFUSE_READ, 0x82000030),
+ CMD(SM_EFUSE_WRITE, 0x82000031),
+ CMD(SM_EFUSE_USER_MAX, 0x82000033),
+ CMD(SM_GET_CHIP_ID, 0x82000044),
+ CMD(SM_A1_PWRC_SET, 0x82000093),
+ CMD(SM_A1_PWRC_GET, 0x82000095),
+ { /* sentinel */ },
+ },
+};
+
+struct meson_sm_firmware {
+ const struct meson_sm_chip *chip;
+ void __iomem *sm_shmem_in_base;
+ void __iomem *sm_shmem_out_base;
+};
+
+static u32 meson_sm_get_cmd(const struct meson_sm_chip *chip,
+ unsigned int cmd_index)
+{
+ const struct meson_sm_cmd *cmd = chip->cmd;
+
+ while (cmd->smc_id && cmd->index != cmd_index)
+ cmd++;
+
+ return cmd->smc_id;
+}
+
+static u32 __meson_sm_call(u32 cmd, u32 arg0, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(cmd, arg0, arg1, arg2, arg3, arg4, 0, 0, &res);
+ return res.a0;
+}
+
+static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size)
+{
+ u32 sm_phy_base;
+
+ sm_phy_base = __meson_sm_call(cmd_shmem, 0, 0, 0, 0, 0);
+ if (!sm_phy_base)
+ return 0;
+
+ return ioremap_cache(sm_phy_base, size);
+}
+
+/**
+ * meson_sm_call - generic SMC32 call to the secure-monitor
+ *
+ * @fw: Pointer to secure-monitor firmware
+ * @cmd_index: Index of the SMC32 function ID
+ * @ret: Returned value
+ * @arg0: SMC32 Argument 0
+ * @arg1: SMC32 Argument 1
+ * @arg2: SMC32 Argument 2
+ * @arg3: SMC32 Argument 3
+ * @arg4: SMC32 Argument 4
+ *
+ * Return: 0 on success, a negative value on error
+ */
+int meson_sm_call(struct meson_sm_firmware *fw, unsigned int cmd_index,
+ u32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 cmd, lret;
+
+ if (!fw->chip)
+ return -ENOENT;
+
+ cmd = meson_sm_get_cmd(fw->chip, cmd_index);
+ if (!cmd)
+ return -EINVAL;
+
+ lret = __meson_sm_call(cmd, arg0, arg1, arg2, arg3, arg4);
+
+ if (ret)
+ *ret = lret;
+
+ return 0;
+}
+EXPORT_SYMBOL(meson_sm_call);
+
+/**
+ * meson_sm_call_read - retrieve data from secure-monitor
+ *
+ * @fw: Pointer to secure-monitor firmware
+ * @buffer: Buffer to store the retrieved data
+ * @bsize: Size of the buffer
+ * @cmd_index: Index of the SMC32 function ID
+ * @arg0: SMC32 Argument 0
+ * @arg1: SMC32 Argument 1
+ * @arg2: SMC32 Argument 2
+ * @arg3: SMC32 Argument 3
+ * @arg4: SMC32 Argument 4
+ *
+ * Return: size of read data on success, a negative value on error
+ * When 0 is returned there is no guarantee about the amount of
+ * data read and bsize bytes are copied in buffer.
+ */
+int meson_sm_call_read(struct meson_sm_firmware *fw, void *buffer,
+ unsigned int bsize, unsigned int cmd_index, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 size;
+ int ret;
+
+ if (!fw->chip)
+ return -ENOENT;
+
+ if (!fw->chip->cmd_shmem_out_base)
+ return -EINVAL;
+
+ if (bsize > fw->chip->shmem_size)
+ return -EINVAL;
+
+ if (meson_sm_call(fw, cmd_index, &size, arg0, arg1, arg2, arg3, arg4) < 0)
+ return -EINVAL;
+
+ if (size > bsize)
+ return -EINVAL;
+
+ ret = size;
+
+ if (!size)
+ size = bsize;
+
+ if (buffer)
+ memcpy(buffer, fw->sm_shmem_out_base, size);
+
+ return ret;
+}
+EXPORT_SYMBOL(meson_sm_call_read);
+
+/**
+ * meson_sm_call_write - send data to secure-monitor
+ *
+ * @fw: Pointer to secure-monitor firmware
+ * @buffer: Buffer containing data to send
+ * @size: Size of the data to send
+ * @cmd_index: Index of the SMC32 function ID
+ * @arg0: SMC32 Argument 0
+ * @arg1: SMC32 Argument 1
+ * @arg2: SMC32 Argument 2
+ * @arg3: SMC32 Argument 3
+ * @arg4: SMC32 Argument 4
+ *
+ * Return: size of sent data on success, a negative value on error
+ */
+int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer,
+ unsigned int size, unsigned int cmd_index, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 written;
+
+ if (!fw->chip)
+ return -ENOENT;
+
+ if (size > fw->chip->shmem_size)
+ return -EINVAL;
+
+ if (!fw->chip->cmd_shmem_in_base)
+ return -EINVAL;
+
+ memcpy(fw->sm_shmem_in_base, buffer, size);
+
+ if (meson_sm_call(fw, cmd_index, &written, arg0, arg1, arg2, arg3, arg4) < 0)
+ return -EINVAL;
+
+ if (!written)
+ return -EINVAL;
+
+ return written;
+}
+EXPORT_SYMBOL(meson_sm_call_write);
+
+/**
+ * meson_sm_get - get pointer to meson_sm_firmware structure.
+ *
+ * @sm_node: Pointer to the secure-monitor Device Tree node.
+ *
+ * Return: NULL is the secure-monitor device is not ready.
+ */
+struct meson_sm_firmware *meson_sm_get(struct device_node *sm_node)
+{
+ struct platform_device *pdev = of_find_device_by_node(sm_node);
+
+ if (!pdev)
+ return NULL;
+
+ return platform_get_drvdata(pdev);
+}
+EXPORT_SYMBOL_GPL(meson_sm_get);
+
+#define SM_CHIP_ID_LENGTH 119
+#define SM_CHIP_ID_OFFSET 4
+#define SM_CHIP_ID_SIZE 12
+
+static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct meson_sm_firmware *fw;
+ uint8_t *id_buf;
+ int ret;
+
+ fw = platform_get_drvdata(pdev);
+
+ id_buf = kmalloc(SM_CHIP_ID_LENGTH, GFP_KERNEL);
+ if (!id_buf)
+ return -ENOMEM;
+
+ ret = meson_sm_call_read(fw, id_buf, SM_CHIP_ID_LENGTH, SM_GET_CHIP_ID,
+ 0, 0, 0, 0, 0);
+ if (ret < 0) {
+ kfree(id_buf);
+ return ret;
+ }
+
+ ret = sprintf(buf, "%12phN\n", &id_buf[SM_CHIP_ID_OFFSET]);
+
+ kfree(id_buf);
+
+ return ret;
+}
+
+static DEVICE_ATTR_RO(serial);
+
+static struct attribute *meson_sm_sysfs_attributes[] = {
+ &dev_attr_serial.attr,
+ NULL,
+};
+
+static const struct attribute_group meson_sm_sysfs_attr_group = {
+ .attrs = meson_sm_sysfs_attributes,
+};
+
+static const struct of_device_id meson_sm_ids[] = {
+ { .compatible = "amlogic,meson-gxbb-sm", .data = &gxbb_chip },
+ { /* sentinel */ },
+};
+
+static int __init meson_sm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct meson_sm_chip *chip;
+ struct meson_sm_firmware *fw;
+
+ fw = devm_kzalloc(dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
+
+ chip = of_match_device(meson_sm_ids, dev)->data;
+ if (!chip)
+ return -EINVAL;
+
+ if (chip->cmd_shmem_in_base) {
+ fw->sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base,
+ chip->shmem_size);
+ if (WARN_ON(!fw->sm_shmem_in_base))
+ goto out;
+ }
+
+ if (chip->cmd_shmem_out_base) {
+ fw->sm_shmem_out_base = meson_sm_map_shmem(chip->cmd_shmem_out_base,
+ chip->shmem_size);
+ if (WARN_ON(!fw->sm_shmem_out_base))
+ goto out_in_base;
+ }
+
+ fw->chip = chip;
+
+ platform_set_drvdata(pdev, fw);
+
+ if (devm_of_platform_populate(dev))
+ goto out_in_base;
+
+ if (sysfs_create_group(&pdev->dev.kobj, &meson_sm_sysfs_attr_group))
+ goto out_in_base;
+
+ pr_info("secure-monitor enabled\n");
+
+ return 0;
+
+out_in_base:
+ iounmap(fw->sm_shmem_in_base);
+out:
+ return -EINVAL;
+}
+
+static struct platform_driver meson_sm_driver = {
+ .driver = {
+ .name = "meson-sm",
+ .of_match_table = of_match_ptr(meson_sm_ids),
+ },
+};
+module_platform_driver_probe(meson_sm_driver, meson_sm_probe);
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
new file mode 100644
index 000000000..715a45442
--- /dev/null
+++ b/drivers/firmware/pcdp.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Parse the EFI PCDP table to locate the console device.
+ *
+ * (c) Copyright 2002, 2003, 2004 Hewlett-Packard Development Company, L.P.
+ * Khalid Aziz <khalid.aziz@hp.com>
+ * Alex Williamson <alex.williamson@hp.com>
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/console.h>
+#include <linux/efi.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <asm/vga.h>
+#include "pcdp.h"
+
+static int __init
+setup_serial_console(struct pcdp_uart *uart)
+{
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ int mmio;
+ static char options[64], *p = options;
+ char parity;
+
+ mmio = (uart->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY);
+ p += sprintf(p, "uart8250,%s,0x%llx",
+ mmio ? "mmio" : "io", uart->addr.address);
+ if (uart->baud) {
+ p += sprintf(p, ",%llu", uart->baud);
+ if (uart->bits) {
+ switch (uart->parity) {
+ case 0x2: parity = 'e'; break;
+ case 0x3: parity = 'o'; break;
+ default: parity = 'n';
+ }
+ p += sprintf(p, "%c%d", parity, uart->bits);
+ }
+ }
+
+ add_preferred_console("uart", 8250, &options[9]);
+ return setup_earlycon(options);
+#else
+ return -ENODEV;
+#endif
+}
+
+static int __init
+setup_vga_console(struct pcdp_device *dev)
+{
+#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
+ u8 *if_ptr;
+
+ if_ptr = ((u8 *)dev + sizeof(struct pcdp_device));
+ if (if_ptr[0] == PCDP_IF_PCI) {
+ struct pcdp_if_pci if_pci;
+
+ /* struct copy since ifptr might not be correctly aligned */
+
+ memcpy(&if_pci, if_ptr, sizeof(if_pci));
+
+ if (if_pci.trans & PCDP_PCI_TRANS_IOPORT)
+ vga_console_iobase = if_pci.ioport_tra;
+
+ if (if_pci.trans & PCDP_PCI_TRANS_MMIO)
+ vga_console_membase = if_pci.mmio_tra;
+ }
+
+ if (efi_mem_type(vga_console_membase + 0xA0000) == EFI_CONVENTIONAL_MEMORY) {
+ printk(KERN_ERR "PCDP: VGA selected, but frame buffer is not MMIO!\n");
+ return -ENODEV;
+ }
+
+ conswitchp = &vga_con;
+ printk(KERN_INFO "PCDP: VGA console\n");
+ return 0;
+#else
+ return -ENODEV;
+#endif
+}
+
+extern unsigned long hcdp_phys;
+
+int __init
+efi_setup_pcdp_console(char *cmdline)
+{
+ struct pcdp *pcdp;
+ struct pcdp_uart *uart;
+ struct pcdp_device *dev, *end;
+ int i, serial = 0;
+ int rc = -ENODEV;
+
+ if (hcdp_phys == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+ pcdp = early_memremap(hcdp_phys, 4096);
+ printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, hcdp_phys);
+
+ if (strstr(cmdline, "console=hcdp")) {
+ if (pcdp->rev < 3)
+ serial = 1;
+ } else if (strstr(cmdline, "console=")) {
+ printk(KERN_INFO "Explicit \"console=\"; ignoring PCDP\n");
+ goto out;
+ }
+
+ if (pcdp->rev < 3 && efi_uart_console_only())
+ serial = 1;
+
+ for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) {
+ if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) {
+ if (uart->type == PCDP_CONSOLE_UART) {
+ rc = setup_serial_console(uart);
+ goto out;
+ }
+ }
+ }
+
+ end = (struct pcdp_device *) ((u8 *) pcdp + pcdp->length);
+ for (dev = (struct pcdp_device *) (pcdp->uart + pcdp->num_uarts);
+ dev < end;
+ dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) {
+ if (dev->flags & PCDP_PRIMARY_CONSOLE) {
+ if (dev->type == PCDP_CONSOLE_VGA) {
+ rc = setup_vga_console(dev);
+ goto out;
+ }
+ }
+ }
+
+out:
+ early_memunmap(pcdp, 4096);
+ return rc;
+}
diff --git a/drivers/firmware/pcdp.h b/drivers/firmware/pcdp.h
new file mode 100644
index 000000000..e02540571
--- /dev/null
+++ b/drivers/firmware/pcdp.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Definitions for PCDP-defined console devices
+ *
+ * For DIG64_HCDPv10a_01.pdf and DIG64_PCDPv20.pdf (v1.0a and v2.0 resp.),
+ * please see <http://www.dig64.org/specifications/>
+ *
+ * (c) Copyright 2002, 2004 Hewlett-Packard Development Company, L.P.
+ * Khalid Aziz <khalid.aziz@hp.com>
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ */
+
+#define PCDP_CONSOLE 0
+#define PCDP_DEBUG 1
+#define PCDP_CONSOLE_OUTPUT 2
+#define PCDP_CONSOLE_INPUT 3
+
+#define PCDP_UART (0 << 3)
+#define PCDP_VGA (1 << 3)
+#define PCDP_USB (2 << 3)
+
+/* pcdp_uart.type and pcdp_device.type */
+#define PCDP_CONSOLE_UART (PCDP_UART | PCDP_CONSOLE)
+#define PCDP_DEBUG_UART (PCDP_UART | PCDP_DEBUG)
+#define PCDP_CONSOLE_VGA (PCDP_VGA | PCDP_CONSOLE_OUTPUT)
+#define PCDP_CONSOLE_USB (PCDP_USB | PCDP_CONSOLE_INPUT)
+
+/* pcdp_uart.flags */
+#define PCDP_UART_EDGE_SENSITIVE (1 << 0)
+#define PCDP_UART_ACTIVE_LOW (1 << 1)
+#define PCDP_UART_PRIMARY_CONSOLE (1 << 2)
+#define PCDP_UART_IRQ (1 << 6) /* in pci_func for rev < 3 */
+#define PCDP_UART_PCI (1 << 7) /* in pci_func for rev < 3 */
+
+struct pcdp_uart {
+ u8 type;
+ u8 bits;
+ u8 parity;
+ u8 stop_bits;
+ u8 pci_seg;
+ u8 pci_bus;
+ u8 pci_dev;
+ u8 pci_func;
+ u64 baud;
+ struct acpi_generic_address addr;
+ u16 pci_dev_id;
+ u16 pci_vendor_id;
+ u32 gsi;
+ u32 clock_rate;
+ u8 pci_prog_intfc;
+ u8 flags;
+ u16 conout_index;
+ u32 reserved;
+} __attribute__((packed));
+
+#define PCDP_IF_PCI 1
+
+/* pcdp_if_pci.trans */
+#define PCDP_PCI_TRANS_IOPORT 0x02
+#define PCDP_PCI_TRANS_MMIO 0x01
+
+struct pcdp_if_pci {
+ u8 interconnect;
+ u8 reserved;
+ u16 length;
+ u8 segment;
+ u8 bus;
+ u8 dev;
+ u8 fun;
+ u16 dev_id;
+ u16 vendor_id;
+ u32 acpi_interrupt;
+ u64 mmio_tra;
+ u64 ioport_tra;
+ u8 flags;
+ u8 trans;
+} __attribute__((packed));
+
+struct pcdp_vga {
+ u8 count; /* address space descriptors */
+} __attribute__((packed));
+
+/* pcdp_device.flags */
+#define PCDP_PRIMARY_CONSOLE 1
+
+struct pcdp_device {
+ u8 type;
+ u8 flags;
+ u16 length;
+ u16 efi_index;
+ /* next data is pcdp_if_pci or pcdp_if_acpi (not yet supported) */
+ /* next data is device specific type (currently only pcdp_vga) */
+} __attribute__((packed));
+
+struct pcdp {
+ u8 signature[4];
+ u32 length;
+ u8 rev; /* PCDP v2.0 is rev 3 */
+ u8 chksum;
+ u8 oemid[6];
+ u8 oem_tabid[8];
+ u32 oem_rev;
+ u8 creator_id[4];
+ u32 creator_rev;
+ u32 num_uarts;
+ struct pcdp_uart uart[]; /* actual size is num_uarts */
+ /* remainder of table is pcdp_device structures */
+} __attribute__((packed));
diff --git a/drivers/firmware/psci/Kconfig b/drivers/firmware/psci/Kconfig
new file mode 100644
index 000000000..97944168b
--- /dev/null
+++ b/drivers/firmware/psci/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config ARM_PSCI_FW
+ bool
+
+config ARM_PSCI_CHECKER
+ bool "ARM PSCI checker"
+ depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST
+ help
+ Run the PSCI checker during startup. This checks that hotplug and
+ suspend operations work correctly when using PSCI.
+
+ The torture tests may interfere with the PSCI checker by turning CPUs
+ on and off through hotplug, so for now torture tests and PSCI checker
+ are mutually exclusive.
diff --git a/drivers/firmware/psci/Makefile b/drivers/firmware/psci/Makefile
new file mode 100644
index 000000000..1956b8824
--- /dev/null
+++ b/drivers/firmware/psci/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+obj-$(CONFIG_ARM_PSCI_FW) += psci.o
+obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
new file mode 100644
index 000000000..00af99b6f
--- /dev/null
+++ b/drivers/firmware/psci/psci.c
@@ -0,0 +1,605 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+
+#define pr_fmt(fmt) "psci: " fmt
+
+#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
+#include <linux/cpuidle.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/printk.h>
+#include <linux/psci.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+
+#include <uapi/linux/psci.h>
+
+#include <asm/cpuidle.h>
+#include <asm/cputype.h>
+#include <asm/system_misc.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+
+/*
+ * While a 64-bit OS can make calls with SMC32 calling conventions, for some
+ * calls it is necessary to use SMC64 to pass or return 64-bit values.
+ * For such calls PSCI_FN_NATIVE(version, name) will choose the appropriate
+ * (native-width) function ID.
+ */
+#ifdef CONFIG_64BIT
+#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN64_##name
+#else
+#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN_##name
+#endif
+
+/*
+ * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
+ * calls to its resident CPU, so we must avoid issuing those. We never migrate
+ * a Trusted OS even if it claims to be capable of migration -- doing so will
+ * require cooperation with a Trusted OS driver.
+ */
+static int resident_cpu = -1;
+struct psci_operations psci_ops;
+static enum arm_smccc_conduit psci_conduit = SMCCC_CONDUIT_NONE;
+
+bool psci_tos_resident_on(int cpu)
+{
+ return cpu == resident_cpu;
+}
+
+typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+ unsigned long, unsigned long);
+static psci_fn *invoke_psci_fn;
+
+enum psci_function {
+ PSCI_FN_CPU_SUSPEND,
+ PSCI_FN_CPU_ON,
+ PSCI_FN_CPU_OFF,
+ PSCI_FN_MIGRATE,
+ PSCI_FN_MAX,
+};
+
+static u32 psci_function_id[PSCI_FN_MAX];
+
+#define PSCI_0_2_POWER_STATE_MASK \
+ (PSCI_0_2_POWER_STATE_ID_MASK | \
+ PSCI_0_2_POWER_STATE_TYPE_MASK | \
+ PSCI_0_2_POWER_STATE_AFFL_MASK)
+
+#define PSCI_1_0_EXT_POWER_STATE_MASK \
+ (PSCI_1_0_EXT_POWER_STATE_ID_MASK | \
+ PSCI_1_0_EXT_POWER_STATE_TYPE_MASK)
+
+static u32 psci_cpu_suspend_feature;
+static bool psci_system_reset2_supported;
+
+static inline bool psci_has_ext_power_state(void)
+{
+ return psci_cpu_suspend_feature &
+ PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK;
+}
+
+bool psci_has_osi_support(void)
+{
+ return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED;
+}
+
+static inline bool psci_power_state_loses_context(u32 state)
+{
+ const u32 mask = psci_has_ext_power_state() ?
+ PSCI_1_0_EXT_POWER_STATE_TYPE_MASK :
+ PSCI_0_2_POWER_STATE_TYPE_MASK;
+
+ return state & mask;
+}
+
+bool psci_power_state_is_valid(u32 state)
+{
+ const u32 valid_mask = psci_has_ext_power_state() ?
+ PSCI_1_0_EXT_POWER_STATE_MASK :
+ PSCI_0_2_POWER_STATE_MASK;
+
+ return !(state & ~valid_mask);
+}
+
+static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
+ return res.a0;
+}
+
+static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
+ return res.a0;
+}
+
+static int psci_to_linux_errno(int errno)
+{
+ switch (errno) {
+ case PSCI_RET_SUCCESS:
+ return 0;
+ case PSCI_RET_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case PSCI_RET_INVALID_PARAMS:
+ case PSCI_RET_INVALID_ADDRESS:
+ return -EINVAL;
+ case PSCI_RET_DENIED:
+ return -EPERM;
+ };
+
+ return -EINVAL;
+}
+
+static u32 psci_get_version(void)
+{
+ return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
+}
+
+int psci_set_osi_mode(bool enable)
+{
+ unsigned long suspend_mode;
+ int err;
+
+ suspend_mode = enable ? PSCI_1_0_SUSPEND_MODE_OSI :
+ PSCI_1_0_SUSPEND_MODE_PC;
+
+ err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE, suspend_mode, 0, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_suspend(u32 state, unsigned long entry_point)
+{
+ int err;
+ u32 fn;
+
+ fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
+ err = invoke_psci_fn(fn, state, entry_point, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_off(u32 state)
+{
+ int err;
+ u32 fn;
+
+ fn = psci_function_id[PSCI_FN_CPU_OFF];
+ err = invoke_psci_fn(fn, state, 0, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
+{
+ int err;
+ u32 fn;
+
+ fn = psci_function_id[PSCI_FN_CPU_ON];
+ err = invoke_psci_fn(fn, cpuid, entry_point, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_migrate(unsigned long cpuid)
+{
+ int err;
+ u32 fn;
+
+ fn = psci_function_id[PSCI_FN_MIGRATE];
+ err = invoke_psci_fn(fn, cpuid, 0, 0);
+ return psci_to_linux_errno(err);
+}
+
+static int psci_affinity_info(unsigned long target_affinity,
+ unsigned long lowest_affinity_level)
+{
+ return invoke_psci_fn(PSCI_FN_NATIVE(0_2, AFFINITY_INFO),
+ target_affinity, lowest_affinity_level, 0);
+}
+
+static int psci_migrate_info_type(void)
+{
+ return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
+}
+
+static unsigned long psci_migrate_info_up_cpu(void)
+{
+ return invoke_psci_fn(PSCI_FN_NATIVE(0_2, MIGRATE_INFO_UP_CPU),
+ 0, 0, 0);
+}
+
+static void set_conduit(enum arm_smccc_conduit conduit)
+{
+ switch (conduit) {
+ case SMCCC_CONDUIT_HVC:
+ invoke_psci_fn = __invoke_psci_fn_hvc;
+ break;
+ case SMCCC_CONDUIT_SMC:
+ invoke_psci_fn = __invoke_psci_fn_smc;
+ break;
+ default:
+ WARN(1, "Unexpected PSCI conduit %d\n", conduit);
+ }
+
+ psci_conduit = conduit;
+}
+
+static int get_set_conduit_method(struct device_node *np)
+{
+ const char *method;
+
+ pr_info("probing for conduit method from DT.\n");
+
+ if (of_property_read_string(np, "method", &method)) {
+ pr_warn("missing \"method\" property\n");
+ return -ENXIO;
+ }
+
+ if (!strcmp("hvc", method)) {
+ set_conduit(SMCCC_CONDUIT_HVC);
+ } else if (!strcmp("smc", method)) {
+ set_conduit(SMCCC_CONDUIT_SMC);
+ } else {
+ pr_warn("invalid \"method\" property: %s\n", method);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
+{
+ if ((reboot_mode == REBOOT_WARM || reboot_mode == REBOOT_SOFT) &&
+ psci_system_reset2_supported) {
+ /*
+ * reset_type[31] = 0 (architectural)
+ * reset_type[30:0] = 0 (SYSTEM_WARM_RESET)
+ * cookie = 0 (ignored by the implementation)
+ */
+ invoke_psci_fn(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2), 0, 0, 0);
+ } else {
+ invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+ }
+}
+
+static void psci_sys_poweroff(void)
+{
+ invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
+}
+
+static int __init psci_features(u32 psci_func_id)
+{
+ return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES,
+ psci_func_id, 0, 0);
+}
+
+#ifdef CONFIG_CPU_IDLE
+static int psci_suspend_finisher(unsigned long state)
+{
+ u32 power_state = state;
+
+ return psci_ops.cpu_suspend(power_state, __pa_symbol(cpu_resume));
+}
+
+int psci_cpu_suspend_enter(u32 state)
+{
+ int ret;
+
+ if (!psci_power_state_loses_context(state))
+ ret = psci_ops.cpu_suspend(state, 0);
+ else
+ ret = cpu_suspend(state, psci_suspend_finisher);
+
+ return ret;
+}
+#endif
+
+static int psci_system_suspend(unsigned long unused)
+{
+ return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
+ __pa_symbol(cpu_resume), 0, 0);
+}
+
+static int psci_system_suspend_enter(suspend_state_t state)
+{
+ return cpu_suspend(0, psci_system_suspend);
+}
+
+static const struct platform_suspend_ops psci_suspend_ops = {
+ .valid = suspend_valid_only_mem,
+ .enter = psci_system_suspend_enter,
+};
+
+static void __init psci_init_system_reset2(void)
+{
+ int ret;
+
+ ret = psci_features(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2));
+
+ if (ret != PSCI_RET_NOT_SUPPORTED)
+ psci_system_reset2_supported = true;
+}
+
+static void __init psci_init_system_suspend(void)
+{
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_SUSPEND))
+ return;
+
+ ret = psci_features(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND));
+
+ if (ret != PSCI_RET_NOT_SUPPORTED)
+ suspend_set_ops(&psci_suspend_ops);
+}
+
+static void __init psci_init_cpu_suspend(void)
+{
+ int feature = psci_features(psci_function_id[PSCI_FN_CPU_SUSPEND]);
+
+ if (feature != PSCI_RET_NOT_SUPPORTED)
+ psci_cpu_suspend_feature = feature;
+}
+
+/*
+ * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
+ * return DENIED (which would be fatal).
+ */
+static void __init psci_init_migrate(void)
+{
+ unsigned long cpuid;
+ int type, cpu = -1;
+
+ type = psci_ops.migrate_info_type();
+
+ if (type == PSCI_0_2_TOS_MP) {
+ pr_info("Trusted OS migration not required\n");
+ return;
+ }
+
+ if (type == PSCI_RET_NOT_SUPPORTED) {
+ pr_info("MIGRATE_INFO_TYPE not supported.\n");
+ return;
+ }
+
+ if (type != PSCI_0_2_TOS_UP_MIGRATE &&
+ type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
+ pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
+ return;
+ }
+
+ cpuid = psci_migrate_info_up_cpu();
+ if (cpuid & ~MPIDR_HWID_BITMASK) {
+ pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
+ cpuid);
+ return;
+ }
+
+ cpu = get_logical_index(cpuid);
+ resident_cpu = cpu >= 0 ? cpu : -1;
+
+ pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
+}
+
+static void __init psci_init_smccc(void)
+{
+ u32 ver = ARM_SMCCC_VERSION_1_0;
+ int feature;
+
+ feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
+
+ if (feature != PSCI_RET_NOT_SUPPORTED) {
+ u32 ret;
+ ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
+ if (ret >= ARM_SMCCC_VERSION_1_1) {
+ arm_smccc_version_init(ret, psci_conduit);
+ ver = ret;
+ }
+ }
+
+ /*
+ * Conveniently, the SMCCC and PSCI versions are encoded the
+ * same way. No, this isn't accidental.
+ */
+ pr_info("SMC Calling Convention v%d.%d\n",
+ PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
+
+}
+
+static void __init psci_0_2_set_functions(void)
+{
+ pr_info("Using standard PSCI v0.2 function IDs\n");
+ psci_ops.get_version = psci_get_version;
+
+ psci_function_id[PSCI_FN_CPU_SUSPEND] =
+ PSCI_FN_NATIVE(0_2, CPU_SUSPEND);
+ psci_ops.cpu_suspend = psci_cpu_suspend;
+
+ psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
+ psci_ops.cpu_off = psci_cpu_off;
+
+ psci_function_id[PSCI_FN_CPU_ON] = PSCI_FN_NATIVE(0_2, CPU_ON);
+ psci_ops.cpu_on = psci_cpu_on;
+
+ psci_function_id[PSCI_FN_MIGRATE] = PSCI_FN_NATIVE(0_2, MIGRATE);
+ psci_ops.migrate = psci_migrate;
+
+ psci_ops.affinity_info = psci_affinity_info;
+
+ psci_ops.migrate_info_type = psci_migrate_info_type;
+
+ arm_pm_restart = psci_sys_reset;
+
+ pm_power_off = psci_sys_poweroff;
+}
+
+/*
+ * Probe function for PSCI firmware versions >= 0.2
+ */
+static int __init psci_probe(void)
+{
+ u32 ver = psci_get_version();
+
+ pr_info("PSCIv%d.%d detected in firmware.\n",
+ PSCI_VERSION_MAJOR(ver),
+ PSCI_VERSION_MINOR(ver));
+
+ if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
+ pr_err("Conflicting PSCI version detected.\n");
+ return -EINVAL;
+ }
+
+ psci_0_2_set_functions();
+
+ psci_init_migrate();
+
+ if (PSCI_VERSION_MAJOR(ver) >= 1) {
+ psci_init_smccc();
+ psci_init_cpu_suspend();
+ psci_init_system_suspend();
+ psci_init_system_reset2();
+ }
+
+ return 0;
+}
+
+typedef int (*psci_initcall_t)(const struct device_node *);
+
+/*
+ * PSCI init function for PSCI versions >=0.2
+ *
+ * Probe based on PSCI PSCI_VERSION function
+ */
+static int __init psci_0_2_init(struct device_node *np)
+{
+ int err;
+
+ err = get_set_conduit_method(np);
+ if (err)
+ return err;
+
+ /*
+ * Starting with v0.2, the PSCI specification introduced a call
+ * (PSCI_VERSION) that allows probing the firmware version, so
+ * that PSCI function IDs and version specific initialization
+ * can be carried out according to the specific version reported
+ * by firmware
+ */
+ return psci_probe();
+}
+
+/*
+ * PSCI < v0.2 get PSCI Function IDs via DT.
+ */
+static int __init psci_0_1_init(struct device_node *np)
+{
+ u32 id;
+ int err;
+
+ err = get_set_conduit_method(np);
+ if (err)
+ return err;
+
+ pr_info("Using PSCI v0.1 Function IDs from DT\n");
+
+ if (!of_property_read_u32(np, "cpu_suspend", &id)) {
+ psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
+ psci_ops.cpu_suspend = psci_cpu_suspend;
+ }
+
+ if (!of_property_read_u32(np, "cpu_off", &id)) {
+ psci_function_id[PSCI_FN_CPU_OFF] = id;
+ psci_ops.cpu_off = psci_cpu_off;
+ }
+
+ if (!of_property_read_u32(np, "cpu_on", &id)) {
+ psci_function_id[PSCI_FN_CPU_ON] = id;
+ psci_ops.cpu_on = psci_cpu_on;
+ }
+
+ if (!of_property_read_u32(np, "migrate", &id)) {
+ psci_function_id[PSCI_FN_MIGRATE] = id;
+ psci_ops.migrate = psci_migrate;
+ }
+
+ return 0;
+}
+
+static int __init psci_1_0_init(struct device_node *np)
+{
+ int err;
+
+ err = psci_0_2_init(np);
+ if (err)
+ return err;
+
+ if (psci_has_osi_support()) {
+ pr_info("OSI mode supported.\n");
+
+ /* Default to PC mode. */
+ psci_set_osi_mode(false);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id psci_of_match[] __initconst = {
+ { .compatible = "arm,psci", .data = psci_0_1_init},
+ { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
+ { .compatible = "arm,psci-1.0", .data = psci_1_0_init},
+ {},
+};
+
+int __init psci_dt_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *matched_np;
+ psci_initcall_t init_fn;
+ int ret;
+
+ np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
+
+ if (!np || !of_device_is_available(np))
+ return -ENODEV;
+
+ init_fn = (psci_initcall_t)matched_np->data;
+ ret = init_fn(np);
+
+ of_node_put(np);
+ return ret;
+}
+
+#ifdef CONFIG_ACPI
+/*
+ * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
+ * explicitly clarified in SBBR
+ */
+int __init psci_acpi_init(void)
+{
+ if (!acpi_psci_present()) {
+ pr_info("is not implemented in ACPI.\n");
+ return -EOPNOTSUPP;
+ }
+
+ pr_info("probing for conduit method from ACPI.\n");
+
+ if (acpi_psci_use_hvc())
+ set_conduit(SMCCC_CONDUIT_HVC);
+ else
+ set_conduit(SMCCC_CONDUIT_SMC);
+
+ return psci_probe();
+}
+#endif
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
new file mode 100644
index 000000000..116eb465c
--- /dev/null
+++ b/drivers/firmware/psci/psci_checker.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2016 ARM Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <uapi/linux/sched/types.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/psci.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+#include <linux/topology.h>
+
+#include <asm/cpuidle.h>
+
+#include <uapi/linux/psci.h>
+
+#define NUM_SUSPEND_CYCLE (10)
+
+static unsigned int nb_available_cpus;
+static int tos_resident_cpu = -1;
+
+static atomic_t nb_active_threads;
+static struct completion suspend_threads_started =
+ COMPLETION_INITIALIZER(suspend_threads_started);
+static struct completion suspend_threads_done =
+ COMPLETION_INITIALIZER(suspend_threads_done);
+
+/*
+ * We assume that PSCI operations are used if they are available. This is not
+ * necessarily true on arm64, since the decision is based on the
+ * "enable-method" property of each CPU in the DT, but given that there is no
+ * arch-specific way to check this, we assume that the DT is sensible.
+ */
+static int psci_ops_check(void)
+{
+ int migrate_type = -1;
+ int cpu;
+
+ if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) {
+ pr_warn("Missing PSCI operations, aborting tests\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (psci_ops.migrate_info_type)
+ migrate_type = psci_ops.migrate_info_type();
+
+ if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE ||
+ migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
+ /* There is a UP Trusted OS, find on which core it resides. */
+ for_each_online_cpu(cpu)
+ if (psci_tos_resident_on(cpu)) {
+ tos_resident_cpu = cpu;
+ break;
+ }
+ if (tos_resident_cpu == -1)
+ pr_warn("UP Trusted OS resides on no online CPU\n");
+ }
+
+ return 0;
+}
+
+/*
+ * offlined_cpus is a temporary array but passing it as an argument avoids
+ * multiple allocations.
+ */
+static unsigned int down_and_up_cpus(const struct cpumask *cpus,
+ struct cpumask *offlined_cpus)
+{
+ int cpu;
+ int err = 0;
+
+ cpumask_clear(offlined_cpus);
+
+ /* Try to power down all CPUs in the mask. */
+ for_each_cpu(cpu, cpus) {
+ int ret = remove_cpu(cpu);
+
+ /*
+ * cpu_down() checks the number of online CPUs before the TOS
+ * resident CPU.
+ */
+ if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) {
+ if (ret != -EBUSY) {
+ pr_err("Unexpected return code %d while trying "
+ "to power down last online CPU %d\n",
+ ret, cpu);
+ ++err;
+ }
+ } else if (cpu == tos_resident_cpu) {
+ if (ret != -EPERM) {
+ pr_err("Unexpected return code %d while trying "
+ "to power down TOS resident CPU %d\n",
+ ret, cpu);
+ ++err;
+ }
+ } else if (ret != 0) {
+ pr_err("Error occurred (%d) while trying "
+ "to power down CPU %d\n", ret, cpu);
+ ++err;
+ }
+
+ if (ret == 0)
+ cpumask_set_cpu(cpu, offlined_cpus);
+ }
+
+ /* Try to power up all the CPUs that have been offlined. */
+ for_each_cpu(cpu, offlined_cpus) {
+ int ret = add_cpu(cpu);
+
+ if (ret != 0) {
+ pr_err("Error occurred (%d) while trying "
+ "to power up CPU %d\n", ret, cpu);
+ ++err;
+ } else {
+ cpumask_clear_cpu(cpu, offlined_cpus);
+ }
+ }
+
+ /*
+ * Something went bad at some point and some CPUs could not be turned
+ * back on.
+ */
+ WARN_ON(!cpumask_empty(offlined_cpus) ||
+ num_online_cpus() != nb_available_cpus);
+
+ return err;
+}
+
+static void free_cpu_groups(int num, cpumask_var_t **pcpu_groups)
+{
+ int i;
+ cpumask_var_t *cpu_groups = *pcpu_groups;
+
+ for (i = 0; i < num; ++i)
+ free_cpumask_var(cpu_groups[i]);
+ kfree(cpu_groups);
+}
+
+static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
+{
+ int num_groups = 0;
+ cpumask_var_t tmp, *cpu_groups;
+
+ if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpu_groups = kcalloc(nb_available_cpus, sizeof(*cpu_groups),
+ GFP_KERNEL);
+ if (!cpu_groups) {
+ free_cpumask_var(tmp);
+ return -ENOMEM;
+ }
+
+ cpumask_copy(tmp, cpu_online_mask);
+
+ while (!cpumask_empty(tmp)) {
+ const struct cpumask *cpu_group =
+ topology_core_cpumask(cpumask_any(tmp));
+
+ if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
+ free_cpumask_var(tmp);
+ free_cpu_groups(num_groups, &cpu_groups);
+ return -ENOMEM;
+ }
+ cpumask_copy(cpu_groups[num_groups++], cpu_group);
+ cpumask_andnot(tmp, tmp, cpu_group);
+ }
+
+ free_cpumask_var(tmp);
+ *pcpu_groups = cpu_groups;
+
+ return num_groups;
+}
+
+static int hotplug_tests(void)
+{
+ int i, nb_cpu_group, err = -ENOMEM;
+ cpumask_var_t offlined_cpus, *cpu_groups;
+ char *page_buf;
+
+ if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
+ return err;
+
+ nb_cpu_group = alloc_init_cpu_groups(&cpu_groups);
+ if (nb_cpu_group < 0)
+ goto out_free_cpus;
+ page_buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!page_buf)
+ goto out_free_cpu_groups;
+
+ /*
+ * Of course the last CPU cannot be powered down and cpu_down() should
+ * refuse doing that.
+ */
+ pr_info("Trying to turn off and on again all CPUs\n");
+ err = down_and_up_cpus(cpu_online_mask, offlined_cpus);
+
+ /*
+ * Take down CPUs by cpu group this time. When the last CPU is turned
+ * off, the cpu group itself should shut down.
+ */
+ for (i = 0; i < nb_cpu_group; ++i) {
+ ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
+ cpu_groups[i]);
+ /* Remove trailing newline. */
+ page_buf[len - 1] = '\0';
+ pr_info("Trying to turn off and on again group %d (CPUs %s)\n",
+ i, page_buf);
+ err += down_and_up_cpus(cpu_groups[i], offlined_cpus);
+ }
+
+ free_page((unsigned long)page_buf);
+out_free_cpu_groups:
+ free_cpu_groups(nb_cpu_group, &cpu_groups);
+out_free_cpus:
+ free_cpumask_var(offlined_cpus);
+ return err;
+}
+
+static void dummy_callback(struct timer_list *unused) {}
+
+static int suspend_cpu(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ struct cpuidle_state *state = &drv->states[index];
+ bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
+ int ret;
+
+ arch_cpu_idle_enter();
+
+ if (broadcast) {
+ /*
+ * The local timer will be shut down, we need to enter tick
+ * broadcast.
+ */
+ ret = tick_broadcast_enter();
+ if (ret) {
+ /*
+ * In the absence of hardware broadcast mechanism,
+ * this CPU might be used to broadcast wakeups, which
+ * may be why entering tick broadcast has failed.
+ * There is little the kernel can do to work around
+ * that, so enter WFI instead (idle state 0).
+ */
+ cpu_do_idle();
+ ret = 0;
+ goto out_arch_exit;
+ }
+ }
+
+ ret = state->enter(dev, drv, index);
+
+ if (broadcast)
+ tick_broadcast_exit();
+
+out_arch_exit:
+ arch_cpu_idle_exit();
+
+ return ret;
+}
+
+static int suspend_test_thread(void *arg)
+{
+ int cpu = (long)arg;
+ int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0;
+ struct cpuidle_device *dev;
+ struct cpuidle_driver *drv;
+ /* No need for an actual callback, we just want to wake up the CPU. */
+ struct timer_list wakeup_timer;
+
+ /* Wait for the main thread to give the start signal. */
+ wait_for_completion(&suspend_threads_started);
+
+ /* Set maximum priority to preempt all other threads on this CPU. */
+ sched_set_fifo(current);
+
+ dev = this_cpu_read(cpuidle_devices);
+ drv = cpuidle_get_cpu_driver(dev);
+
+ pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
+ cpu, drv->state_count - 1);
+
+ timer_setup_on_stack(&wakeup_timer, dummy_callback, 0);
+ for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
+ int index;
+ /*
+ * Test all possible states, except 0 (which is usually WFI and
+ * doesn't use PSCI).
+ */
+ for (index = 1; index < drv->state_count; ++index) {
+ int ret;
+ struct cpuidle_state *state = &drv->states[index];
+
+ /*
+ * Set the timer to wake this CPU up in some time (which
+ * should be largely sufficient for entering suspend).
+ * If the local tick is disabled when entering suspend,
+ * suspend_cpu() takes care of switching to a broadcast
+ * tick, so the timer will still wake us up.
+ */
+ mod_timer(&wakeup_timer, jiffies +
+ usecs_to_jiffies(state->target_residency));
+
+ /* IRQs must be disabled during suspend operations. */
+ local_irq_disable();
+
+ ret = suspend_cpu(dev, drv, index);
+
+ /*
+ * We have woken up. Re-enable IRQs to handle any
+ * pending interrupt, do not wait until the end of the
+ * loop.
+ */
+ local_irq_enable();
+
+ if (ret == index) {
+ ++nb_suspend;
+ } else if (ret >= 0) {
+ /* We did not enter the expected state. */
+ ++nb_shallow_sleep;
+ } else {
+ pr_err("Failed to suspend CPU %d: error %d "
+ "(requested state %d, cycle %d)\n",
+ cpu, ret, index, i);
+ ++nb_err;
+ }
+ }
+ }
+
+ /*
+ * Disable the timer to make sure that the timer will not trigger
+ * later.
+ */
+ del_timer(&wakeup_timer);
+ destroy_timer_on_stack(&wakeup_timer);
+
+ if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
+ complete(&suspend_threads_done);
+
+ for (;;) {
+ /* Needs to be set first to avoid missing a wakeup. */
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_park())
+ break;
+ schedule();
+ }
+
+ pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
+ cpu, nb_suspend, nb_shallow_sleep, nb_err);
+
+ kthread_parkme();
+
+ return nb_err;
+}
+
+static int suspend_tests(void)
+{
+ int i, cpu, err = 0;
+ struct task_struct **threads;
+ int nb_threads = 0;
+
+ threads = kmalloc_array(nb_available_cpus, sizeof(*threads),
+ GFP_KERNEL);
+ if (!threads)
+ return -ENOMEM;
+
+ /*
+ * Stop cpuidle to prevent the idle tasks from entering a deep sleep
+ * mode, as it might interfere with the suspend threads on other CPUs.
+ * This does not prevent the suspend threads from using cpuidle (only
+ * the idle tasks check this status). Take the idle lock so that
+ * the cpuidle driver and device look-up can be carried out safely.
+ */
+ cpuidle_pause_and_lock();
+
+ for_each_online_cpu(cpu) {
+ struct task_struct *thread;
+ /* Check that cpuidle is available on that CPU. */
+ struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+
+ if (!dev || !drv) {
+ pr_warn("cpuidle not available on CPU %d, ignoring\n",
+ cpu);
+ continue;
+ }
+
+ thread = kthread_create_on_cpu(suspend_test_thread,
+ (void *)(long)cpu, cpu,
+ "psci_suspend_test");
+ if (IS_ERR(thread))
+ pr_err("Failed to create kthread on CPU %d\n", cpu);
+ else
+ threads[nb_threads++] = thread;
+ }
+
+ if (nb_threads < 1) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ atomic_set(&nb_active_threads, nb_threads);
+
+ /*
+ * Wake up the suspend threads. To avoid the main thread being preempted
+ * before all the threads have been unparked, the suspend threads will
+ * wait for the completion of suspend_threads_started.
+ */
+ for (i = 0; i < nb_threads; ++i)
+ wake_up_process(threads[i]);
+ complete_all(&suspend_threads_started);
+
+ wait_for_completion(&suspend_threads_done);
+
+
+ /* Stop and destroy all threads, get return status. */
+ for (i = 0; i < nb_threads; ++i) {
+ err += kthread_park(threads[i]);
+ err += kthread_stop(threads[i]);
+ }
+ out:
+ cpuidle_resume_and_unlock();
+ kfree(threads);
+ return err;
+}
+
+static int __init psci_checker(void)
+{
+ int ret;
+
+ /*
+ * Since we're in an initcall, we assume that all the CPUs that all
+ * CPUs that can be onlined have been onlined.
+ *
+ * The tests assume that hotplug is enabled but nobody else is using it,
+ * otherwise the results will be unpredictable. However, since there
+ * is no userspace yet in initcalls, that should be fine, as long as
+ * no torture test is running at the same time (see Kconfig).
+ */
+ nb_available_cpus = num_online_cpus();
+
+ /* Check PSCI operations are set up and working. */
+ ret = psci_ops_check();
+ if (ret)
+ return ret;
+
+ pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus);
+
+ pr_info("Starting hotplug tests\n");
+ ret = hotplug_tests();
+ if (ret == 0)
+ pr_info("Hotplug tests passed OK\n");
+ else if (ret > 0)
+ pr_err("%d error(s) encountered in hotplug tests\n", ret);
+ else {
+ pr_err("Out of memory\n");
+ return ret;
+ }
+
+ pr_info("Starting suspend tests (%d cycles per state)\n",
+ NUM_SUSPEND_CYCLE);
+ ret = suspend_tests();
+ if (ret == 0)
+ pr_info("Suspend tests passed OK\n");
+ else if (ret > 0)
+ pr_err("%d error(s) encountered in suspend tests\n", ret);
+ else {
+ switch (ret) {
+ case -ENOMEM:
+ pr_err("Out of memory\n");
+ break;
+ case -ENODEV:
+ pr_warn("Could not start suspend tests on any CPU\n");
+ break;
+ }
+ }
+
+ pr_info("PSCI checker completed\n");
+ return ret < 0 ? ret : 0;
+}
+late_initcall(psci_checker);
diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c
new file mode 100644
index 000000000..eba6b60bf
--- /dev/null
+++ b/drivers/firmware/qcom_scm-legacy.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/qcom_scm.h>
+#include <linux/arm-smccc.h>
+#include <linux/dma-mapping.h>
+
+#include "qcom_scm.h"
+
+static DEFINE_MUTEX(qcom_scm_lock);
+
+
+/**
+ * struct arm_smccc_args
+ * @args: The array of values used in registers in smc instruction
+ */
+struct arm_smccc_args {
+ unsigned long args[8];
+};
+
+
+/**
+ * struct scm_legacy_command - one SCM command buffer
+ * @len: total available memory for command and response
+ * @buf_offset: start of command buffer
+ * @resp_hdr_offset: start of response buffer
+ * @id: command to be executed
+ * @buf: buffer returned from scm_legacy_get_command_buffer()
+ *
+ * An SCM command is laid out in memory as follows:
+ *
+ * ------------------- <--- struct scm_legacy_command
+ * | command header |
+ * ------------------- <--- scm_legacy_get_command_buffer()
+ * | command buffer |
+ * ------------------- <--- struct scm_legacy_response and
+ * | response header | scm_legacy_command_to_response()
+ * ------------------- <--- scm_legacy_get_response_buffer()
+ * | response buffer |
+ * -------------------
+ *
+ * There can be arbitrary padding between the headers and buffers so
+ * you should always use the appropriate scm_legacy_get_*_buffer() routines
+ * to access the buffers in a safe manner.
+ */
+struct scm_legacy_command {
+ __le32 len;
+ __le32 buf_offset;
+ __le32 resp_hdr_offset;
+ __le32 id;
+ __le32 buf[];
+};
+
+/**
+ * struct scm_legacy_response - one SCM response buffer
+ * @len: total available memory for response
+ * @buf_offset: start of response data relative to start of scm_legacy_response
+ * @is_complete: indicates if the command has finished processing
+ */
+struct scm_legacy_response {
+ __le32 len;
+ __le32 buf_offset;
+ __le32 is_complete;
+};
+
+/**
+ * scm_legacy_command_to_response() - Get a pointer to a scm_legacy_response
+ * @cmd: command
+ *
+ * Returns a pointer to a response for a command.
+ */
+static inline struct scm_legacy_response *scm_legacy_command_to_response(
+ const struct scm_legacy_command *cmd)
+{
+ return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
+}
+
+/**
+ * scm_legacy_get_command_buffer() - Get a pointer to a command buffer
+ * @cmd: command
+ *
+ * Returns a pointer to the command buffer of a command.
+ */
+static inline void *scm_legacy_get_command_buffer(
+ const struct scm_legacy_command *cmd)
+{
+ return (void *)cmd->buf;
+}
+
+/**
+ * scm_legacy_get_response_buffer() - Get a pointer to a response buffer
+ * @rsp: response
+ *
+ * Returns a pointer to a response buffer of a response.
+ */
+static inline void *scm_legacy_get_response_buffer(
+ const struct scm_legacy_response *rsp)
+{
+ return (void *)rsp + le32_to_cpu(rsp->buf_offset);
+}
+
+static void __scm_legacy_do(const struct arm_smccc_args *smc,
+ struct arm_smccc_res *res)
+{
+ do {
+ arm_smccc_smc(smc->args[0], smc->args[1], smc->args[2],
+ smc->args[3], smc->args[4], smc->args[5],
+ smc->args[6], smc->args[7], res);
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+}
+
+/**
+ * qcom_scm_call() - Sends a command to the SCM and waits for the command to
+ * finish processing.
+ *
+ * A note on cache maintenance:
+ * Note that any buffers that are expected to be accessed by the secure world
+ * must be flushed before invoking qcom_scm_call and invalidated in the cache
+ * immediately after qcom_scm_call returns. Cache maintenance on the command
+ * and response buffers is taken care of by qcom_scm_call; however, callers are
+ * responsible for any other cached buffers passed over to the secure world.
+ */
+int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res)
+{
+ u8 arglen = desc->arginfo & 0xf;
+ int ret = 0, context_id;
+ unsigned int i;
+ struct scm_legacy_command *cmd;
+ struct scm_legacy_response *rsp;
+ struct arm_smccc_args smc = {0};
+ struct arm_smccc_res smc_res;
+ const size_t cmd_len = arglen * sizeof(__le32);
+ const size_t resp_len = MAX_QCOM_SCM_RETS * sizeof(__le32);
+ size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
+ dma_addr_t cmd_phys;
+ __le32 *arg_buf;
+ const __le32 *res_buf;
+
+ cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->len = cpu_to_le32(alloc_len);
+ cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
+ cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
+ cmd->id = cpu_to_le32(SCM_LEGACY_FNID(desc->svc, desc->cmd));
+
+ arg_buf = scm_legacy_get_command_buffer(cmd);
+ for (i = 0; i < arglen; i++)
+ arg_buf[i] = cpu_to_le32(desc->args[i]);
+
+ rsp = scm_legacy_command_to_response(cmd);
+
+ cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, cmd_phys)) {
+ kfree(cmd);
+ return -ENOMEM;
+ }
+
+ smc.args[0] = 1;
+ smc.args[1] = (unsigned long)&context_id;
+ smc.args[2] = cmd_phys;
+
+ mutex_lock(&qcom_scm_lock);
+ __scm_legacy_do(&smc, &smc_res);
+ if (smc_res.a0)
+ ret = qcom_scm_remap_error(smc_res.a0);
+ mutex_unlock(&qcom_scm_lock);
+ if (ret)
+ goto out;
+
+ do {
+ dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
+ sizeof(*rsp), DMA_FROM_DEVICE);
+ } while (!rsp->is_complete);
+
+ dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
+ le32_to_cpu(rsp->buf_offset),
+ resp_len, DMA_FROM_DEVICE);
+
+ if (res) {
+ res_buf = scm_legacy_get_response_buffer(rsp);
+ for (i = 0; i < MAX_QCOM_SCM_RETS; i++)
+ res->result[i] = le32_to_cpu(res_buf[i]);
+ }
+out:
+ dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
+ kfree(cmd);
+ return ret;
+}
+
+#define SCM_LEGACY_ATOMIC_N_REG_ARGS 5
+#define SCM_LEGACY_ATOMIC_FIRST_REG_IDX 2
+#define SCM_LEGACY_CLASS_REGISTER (0x2 << 8)
+#define SCM_LEGACY_MASK_IRQS BIT(5)
+#define SCM_LEGACY_ATOMIC_ID(svc, cmd, n) \
+ ((SCM_LEGACY_FNID(svc, cmd) << 12) | \
+ SCM_LEGACY_CLASS_REGISTER | \
+ SCM_LEGACY_MASK_IRQS | \
+ (n & 0xf))
+
+/**
+ * qcom_scm_call_atomic() - Send an atomic SCM command with up to 5 arguments
+ * and 3 return values
+ * @desc: SCM call descriptor containing arguments
+ * @res: SCM call return values
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+int scm_legacy_call_atomic(struct device *unused,
+ const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res)
+{
+ int context_id;
+ struct arm_smccc_res smc_res;
+ size_t arglen = desc->arginfo & 0xf;
+
+ BUG_ON(arglen > SCM_LEGACY_ATOMIC_N_REG_ARGS);
+
+ arm_smccc_smc(SCM_LEGACY_ATOMIC_ID(desc->svc, desc->cmd, arglen),
+ (unsigned long)&context_id,
+ desc->args[0], desc->args[1], desc->args[2],
+ desc->args[3], desc->args[4], 0, &smc_res);
+
+ if (res) {
+ res->result[0] = smc_res.a1;
+ res->result[1] = smc_res.a2;
+ res->result[2] = smc_res.a3;
+ }
+
+ return smc_res.a0;
+}
diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
new file mode 100644
index 000000000..d11183336
--- /dev/null
+++ b/drivers/firmware/qcom_scm-smc.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/qcom_scm.h>
+#include <linux/arm-smccc.h>
+#include <linux/dma-mapping.h>
+
+#include "qcom_scm.h"
+
+/**
+ * struct arm_smccc_args
+ * @args: The array of values used in registers in smc instruction
+ */
+struct arm_smccc_args {
+ unsigned long args[8];
+};
+
+static DEFINE_MUTEX(qcom_scm_lock);
+
+#define QCOM_SCM_EBUSY_WAIT_MS 30
+#define QCOM_SCM_EBUSY_MAX_RETRY 20
+
+#define SCM_SMC_N_REG_ARGS 4
+#define SCM_SMC_FIRST_EXT_IDX (SCM_SMC_N_REG_ARGS - 1)
+#define SCM_SMC_N_EXT_ARGS (MAX_QCOM_SCM_ARGS - SCM_SMC_N_REG_ARGS + 1)
+#define SCM_SMC_FIRST_REG_IDX 2
+#define SCM_SMC_LAST_REG_IDX (SCM_SMC_FIRST_REG_IDX + SCM_SMC_N_REG_ARGS - 1)
+
+static void __scm_smc_do_quirk(const struct arm_smccc_args *smc,
+ struct arm_smccc_res *res)
+{
+ unsigned long a0 = smc->args[0];
+ struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
+
+ quirk.state.a6 = 0;
+
+ do {
+ arm_smccc_smc_quirk(a0, smc->args[1], smc->args[2],
+ smc->args[3], smc->args[4], smc->args[5],
+ quirk.state.a6, smc->args[7], res, &quirk);
+
+ if (res->a0 == QCOM_SCM_INTERRUPTED)
+ a0 = res->a0;
+
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+}
+
+static void __scm_smc_do(const struct arm_smccc_args *smc,
+ struct arm_smccc_res *res, bool atomic)
+{
+ int retry_count = 0;
+
+ if (atomic) {
+ __scm_smc_do_quirk(smc, res);
+ return;
+ }
+
+ do {
+ mutex_lock(&qcom_scm_lock);
+
+ __scm_smc_do_quirk(smc, res);
+
+ mutex_unlock(&qcom_scm_lock);
+
+ if (res->a0 == QCOM_SCM_V2_EBUSY) {
+ if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
+ break;
+ msleep(QCOM_SCM_EBUSY_WAIT_MS);
+ }
+ } while (res->a0 == QCOM_SCM_V2_EBUSY);
+}
+
+
+int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ enum qcom_scm_convention qcom_convention,
+ struct qcom_scm_res *res, bool atomic)
+{
+ int arglen = desc->arginfo & 0xf;
+ int i;
+ dma_addr_t args_phys = 0;
+ void *args_virt = NULL;
+ size_t alloc_len;
+ gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
+ u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
+ u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
+ ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
+ struct arm_smccc_res smc_res;
+ struct arm_smccc_args smc = {0};
+
+ smc.args[0] = ARM_SMCCC_CALL_VAL(
+ smccc_call_type,
+ qcom_smccc_convention,
+ desc->owner,
+ SCM_SMC_FNID(desc->svc, desc->cmd));
+ smc.args[1] = desc->arginfo;
+ for (i = 0; i < SCM_SMC_N_REG_ARGS; i++)
+ smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
+
+ if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
+ alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64);
+ args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
+
+ if (!args_virt)
+ return -ENOMEM;
+
+ if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
+ __le32 *args = args_virt;
+
+ for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
+ args[i] = cpu_to_le32(desc->args[i +
+ SCM_SMC_FIRST_EXT_IDX]);
+ } else {
+ __le64 *args = args_virt;
+
+ for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
+ args[i] = cpu_to_le64(desc->args[i +
+ SCM_SMC_FIRST_EXT_IDX]);
+ }
+
+ args_phys = dma_map_single(dev, args_virt, alloc_len,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, args_phys)) {
+ kfree(args_virt);
+ return -ENOMEM;
+ }
+
+ smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
+ }
+
+ __scm_smc_do(&smc, &smc_res, atomic);
+
+ if (args_virt) {
+ dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
+ kfree(args_virt);
+ }
+
+ if (res) {
+ res->result[0] = smc_res.a1;
+ res->result[1] = smc_res.a2;
+ res->result[2] = smc_res.a3;
+ }
+
+ return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
+
+}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
new file mode 100644
index 000000000..3e3e37e0e
--- /dev/null
+++ b/drivers/firmware/qcom_scm.c
@@ -0,0 +1,1307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/qcom_scm.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/reset-controller.h>
+#include <linux/arm-smccc.h>
+
+#include "qcom_scm.h"
+
+static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
+module_param(download_mode, bool, 0);
+
+#define SCM_HAS_CORE_CLK BIT(0)
+#define SCM_HAS_IFACE_CLK BIT(1)
+#define SCM_HAS_BUS_CLK BIT(2)
+
+struct qcom_scm {
+ struct device *dev;
+ struct clk *core_clk;
+ struct clk *iface_clk;
+ struct clk *bus_clk;
+ struct reset_controller_dev reset;
+
+ u64 dload_mode_addr;
+};
+
+struct qcom_scm_current_perm_info {
+ __le32 vmid;
+ __le32 perm;
+ __le64 ctx;
+ __le32 ctx_size;
+ __le32 unused;
+};
+
+struct qcom_scm_mem_map_info {
+ __le64 mem_addr;
+ __le64 mem_size;
+};
+
+#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
+#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
+#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
+#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
+
+#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
+#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
+#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
+#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
+
+struct qcom_scm_wb_entry {
+ int flag;
+ void *entry;
+};
+
+static struct qcom_scm_wb_entry qcom_scm_wb[] = {
+ { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
+ { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
+ { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
+ { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
+};
+
+static const char *qcom_scm_convention_names[] = {
+ [SMC_CONVENTION_UNKNOWN] = "unknown",
+ [SMC_CONVENTION_ARM_32] = "smc arm 32",
+ [SMC_CONVENTION_ARM_64] = "smc arm 64",
+ [SMC_CONVENTION_LEGACY] = "smc legacy",
+};
+
+static struct qcom_scm *__scm;
+
+static int qcom_scm_clk_enable(void)
+{
+ int ret;
+
+ ret = clk_prepare_enable(__scm->core_clk);
+ if (ret)
+ goto bail;
+
+ ret = clk_prepare_enable(__scm->iface_clk);
+ if (ret)
+ goto disable_core;
+
+ ret = clk_prepare_enable(__scm->bus_clk);
+ if (ret)
+ goto disable_iface;
+
+ return 0;
+
+disable_iface:
+ clk_disable_unprepare(__scm->iface_clk);
+disable_core:
+ clk_disable_unprepare(__scm->core_clk);
+bail:
+ return ret;
+}
+
+static void qcom_scm_clk_disable(void)
+{
+ clk_disable_unprepare(__scm->core_clk);
+ clk_disable_unprepare(__scm->iface_clk);
+ clk_disable_unprepare(__scm->bus_clk);
+}
+
+enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
+static DEFINE_SPINLOCK(scm_query_lock);
+
+static enum qcom_scm_convention __get_convention(void)
+{
+ unsigned long flags;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_INFO,
+ .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
+ .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
+ QCOM_SCM_INFO_IS_CALL_AVAIL) |
+ (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
+ .arginfo = QCOM_SCM_ARGS(1),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ enum qcom_scm_convention probed_convention;
+ int ret;
+ bool forced = false;
+
+ if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
+ return qcom_scm_convention;
+
+ /*
+ * Per the "SMC calling convention specification", the 64-bit calling
+ * convention can only be used when the client is 64-bit, otherwise
+ * system will encounter the undefined behaviour.
+ */
+#if IS_ENABLED(CONFIG_ARM64)
+ /*
+ * Device isn't required as there is only one argument - no device
+ * needed to dma_map_single to secure world
+ */
+ probed_convention = SMC_CONVENTION_ARM_64;
+ ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
+ if (!ret && res.result[0] == 1)
+ goto found;
+
+ /*
+ * Some SC7180 firmwares didn't implement the
+ * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
+ * calling conventions on these firmwares. Luckily we don't make any
+ * early calls into the firmware on these SoCs so the device pointer
+ * will be valid here to check if the compatible matches.
+ */
+ if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
+ forced = true;
+ goto found;
+ }
+#endif
+
+ probed_convention = SMC_CONVENTION_ARM_32;
+ ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
+ if (!ret && res.result[0] == 1)
+ goto found;
+
+ probed_convention = SMC_CONVENTION_LEGACY;
+found:
+ spin_lock_irqsave(&scm_query_lock, flags);
+ if (probed_convention != qcom_scm_convention) {
+ qcom_scm_convention = probed_convention;
+ pr_info("qcom_scm: convention: %s%s\n",
+ qcom_scm_convention_names[qcom_scm_convention],
+ forced ? " (forced)" : "");
+ }
+ spin_unlock_irqrestore(&scm_query_lock, flags);
+
+ return qcom_scm_convention;
+}
+
+/**
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+ */
+static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res)
+{
+ might_sleep();
+ switch (__get_convention()) {
+ case SMC_CONVENTION_ARM_32:
+ case SMC_CONVENTION_ARM_64:
+ return scm_smc_call(dev, desc, res, false);
+ case SMC_CONVENTION_LEGACY:
+ return scm_legacy_call(dev, desc, res);
+ default:
+ pr_err("Unknown current SCM calling convention.\n");
+ return -EINVAL;
+ }
+}
+
+/**
+ * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ * @res: Structure containing results from SMC/HVC call
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This can be called in atomic context.
+ */
+static int qcom_scm_call_atomic(struct device *dev,
+ const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res)
+{
+ switch (__get_convention()) {
+ case SMC_CONVENTION_ARM_32:
+ case SMC_CONVENTION_ARM_64:
+ return scm_smc_call(dev, desc, res, true);
+ case SMC_CONVENTION_LEGACY:
+ return scm_legacy_call_atomic(dev, desc, res);
+ default:
+ pr_err("Unknown current SCM calling convention.\n");
+ return -EINVAL;
+ }
+}
+
+static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+ u32 cmd_id)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_INFO,
+ .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ desc.arginfo = QCOM_SCM_ARGS(1);
+ switch (__get_convention()) {
+ case SMC_CONVENTION_ARM_32:
+ case SMC_CONVENTION_ARM_64:
+ desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
+ (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
+ break;
+ case SMC_CONVENTION_LEGACY:
+ desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
+ break;
+ default:
+ pr_err("Unknown SMC convention being used\n");
+ return false;
+ }
+
+ ret = qcom_scm_call(dev, &desc, &res);
+
+ return ret ? false : !!res.result[0];
+}
+
+/**
+ * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the Linux entry point for the SCM to transfer control to when coming
+ * out of a power down. CPU power down may be executed on cpuidle or hotplug.
+ */
+int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+{
+ int ret;
+ int flags = 0;
+ int cpu;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_BOOT,
+ .cmd = QCOM_SCM_BOOT_SET_ADDR,
+ .arginfo = QCOM_SCM_ARGS(2),
+ };
+
+ /*
+ * Reassign only if we are switching from hotplug entry point
+ * to cpuidle entry point or vice versa.
+ */
+ for_each_cpu(cpu, cpus) {
+ if (entry == qcom_scm_wb[cpu].entry)
+ continue;
+ flags |= qcom_scm_wb[cpu].flag;
+ }
+
+ /* No change in entry function */
+ if (!flags)
+ return 0;
+
+ desc.args[0] = flags;
+ desc.args[1] = virt_to_phys(entry);
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret) {
+ for_each_cpu(cpu, cpus)
+ qcom_scm_wb[cpu].entry = entry;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
+
+/**
+ * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the cold boot address of the cpus. Any cpu outside the supported
+ * range would be removed from the cpu present mask.
+ */
+int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+{
+ int flags = 0;
+ int cpu;
+ int scm_cb_flags[] = {
+ QCOM_SCM_FLAG_COLDBOOT_CPU0,
+ QCOM_SCM_FLAG_COLDBOOT_CPU1,
+ QCOM_SCM_FLAG_COLDBOOT_CPU2,
+ QCOM_SCM_FLAG_COLDBOOT_CPU3,
+ };
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_BOOT,
+ .cmd = QCOM_SCM_BOOT_SET_ADDR,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ if (!cpus || (cpus && cpumask_empty(cpus)))
+ return -EINVAL;
+
+ for_each_cpu(cpu, cpus) {
+ if (cpu < ARRAY_SIZE(scm_cb_flags))
+ flags |= scm_cb_flags[cpu];
+ else
+ set_cpu_present(cpu, false);
+ }
+
+ desc.args[0] = flags;
+ desc.args[1] = virt_to_phys(entry);
+
+ return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
+
+/**
+ * qcom_scm_cpu_power_down() - Power down the cpu
+ * @flags - Flags to flush cache
+ *
+ * This is an end point to power down cpu. If there was a pending interrupt,
+ * the control would return from this function, otherwise, the cpu jumps to the
+ * warm boot entry point set for this cpu upon reset.
+ */
+void qcom_scm_cpu_power_down(u32 flags)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_BOOT,
+ .cmd = QCOM_SCM_BOOT_TERMINATE_PC,
+ .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_cpu_power_down);
+
+int qcom_scm_set_remote_state(u32 state, u32 id)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_BOOT,
+ .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = state,
+ .args[1] = id,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ return ret ? : res.result[0];
+}
+EXPORT_SYMBOL(qcom_scm_set_remote_state);
+
+static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_BOOT,
+ .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
+
+ return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
+}
+
+static void qcom_scm_set_download_mode(bool enable)
+{
+ bool avail;
+ int ret = 0;
+
+ avail = __qcom_scm_is_call_available(__scm->dev,
+ QCOM_SCM_SVC_BOOT,
+ QCOM_SCM_BOOT_SET_DLOAD_MODE);
+ if (avail) {
+ ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
+ } else if (__scm->dload_mode_addr) {
+ ret = qcom_scm_io_writel(__scm->dload_mode_addr,
+ enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
+ } else {
+ dev_err(__scm->dev,
+ "No available mechanism for setting download mode\n");
+ }
+
+ if (ret)
+ dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
+}
+
+/**
+ * qcom_scm_pas_init_image() - Initialize peripheral authentication service
+ * state machine for a given peripheral, using the
+ * metadata
+ * @peripheral: peripheral id
+ * @metadata: pointer to memory containing ELF header, program header table
+ * and optional blob of data used for authenticating the metadata
+ * and the rest of the firmware
+ * @size: size of the metadata
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
+{
+ dma_addr_t mdata_phys;
+ void *mdata_buf;
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
+ .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
+ .args[0] = peripheral,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ /*
+ * During the scm call memory protection will be enabled for the meta
+ * data blob, so make sure it's physically contiguous, 4K aligned and
+ * non-cachable to avoid XPU violations.
+ */
+ mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
+ GFP_KERNEL);
+ if (!mdata_buf) {
+ dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
+ return -ENOMEM;
+ }
+ memcpy(mdata_buf, metadata, size);
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ goto free_metadata;
+
+ desc.args[1] = mdata_phys;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ qcom_scm_clk_disable();
+
+free_metadata:
+ dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
+
+ return ret ? : res.result[0];
+}
+EXPORT_SYMBOL(qcom_scm_pas_init_image);
+
+/**
+ * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
+ * for firmware loading
+ * @peripheral: peripheral id
+ * @addr: start address of memory area to prepare
+ * @size: size of the memory area to prepare
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
+ .arginfo = QCOM_SCM_ARGS(3),
+ .args[0] = peripheral,
+ .args[1] = addr,
+ .args[2] = size,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_clk_disable();
+
+ return ret ? : res.result[0];
+}
+EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
+
+/**
+ * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
+ * and reset the remote processor
+ * @peripheral: peripheral id
+ *
+ * Return 0 on success.
+ */
+int qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = peripheral,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ qcom_scm_clk_disable();
+
+ return ret ? : res.result[0];
+}
+EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
+
+/**
+ * qcom_scm_pas_shutdown() - Shut down the remote processor
+ * @peripheral: peripheral id
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_shutdown(u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = peripheral,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ qcom_scm_clk_disable();
+
+ return ret ? : res.result[0];
+}
+EXPORT_SYMBOL(qcom_scm_pas_shutdown);
+
+/**
+ * qcom_scm_pas_supported() - Check if the peripheral authentication service is
+ * available for the given peripherial
+ * @peripheral: peripheral id
+ *
+ * Returns true if PAS is supported for this peripheral, otherwise false.
+ */
+bool qcom_scm_pas_supported(u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = peripheral,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PIL_PAS_IS_SUPPORTED))
+ return false;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ return ret ? false : !!res.result[0];
+}
+EXPORT_SYMBOL(qcom_scm_pas_supported);
+
+static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_PIL,
+ .cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = reset,
+ .args[1] = 0,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ return ret ? : res.result[0];
+}
+
+static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ if (idx != 0)
+ return -EINVAL;
+
+ return __qcom_scm_pas_mss_reset(__scm->dev, 1);
+}
+
+static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ if (idx != 0)
+ return -EINVAL;
+
+ return __qcom_scm_pas_mss_reset(__scm->dev, 0);
+}
+
+static const struct reset_control_ops qcom_scm_pas_reset_ops = {
+ .assert = qcom_scm_pas_reset_assert,
+ .deassert = qcom_scm_pas_reset_deassert,
+};
+
+int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_IO,
+ .cmd = QCOM_SCM_IO_READ,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = addr,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+
+ ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
+ if (ret >= 0)
+ *val = res.result[0];
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL(qcom_scm_io_readl);
+
+int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_IO,
+ .cmd = QCOM_SCM_IO_WRITE,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = addr,
+ .args[1] = val,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_io_writel);
+
+/**
+ * qcom_scm_restore_sec_cfg_available() - Check if secure environment
+ * supports restore security config interface.
+ *
+ * Return true if restore-cfg interface is supported, false if not.
+ */
+bool qcom_scm_restore_sec_cfg_available(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
+ QCOM_SCM_MP_RESTORE_SEC_CFG);
+}
+EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
+
+int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = device_id,
+ .args[1] = spare,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ return ret ? : res.result[0];
+}
+EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
+
+int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = spare,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ if (size)
+ *size = res.result[0];
+
+ return ret ? : res.result[1];
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
+
+int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
+ .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
+ QCOM_SCM_VAL),
+ .args[0] = addr,
+ .args[1] = size,
+ .args[2] = spare,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+
+ /* the pg table has been initialized already, ignore the error */
+ if (ret == -EPERM)
+ ret = 0;
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+
+int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
+ u32 cp_nonpixel_start,
+ u32 cp_nonpixel_size)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_VIDEO_VAR,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
+ QCOM_SCM_VAL, QCOM_SCM_VAL),
+ .args[0] = cp_start,
+ .args[1] = cp_size,
+ .args[2] = cp_nonpixel_start,
+ .args[3] = cp_nonpixel_size,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ return ret ? : res.result[0];
+}
+EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
+
+static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
+ size_t mem_sz, phys_addr_t src, size_t src_sz,
+ phys_addr_t dest, size_t dest_sz)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_ASSIGN,
+ .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
+ QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
+ QCOM_SCM_VAL, QCOM_SCM_VAL),
+ .args[0] = mem_region,
+ .args[1] = mem_sz,
+ .args[2] = src,
+ .args[3] = src_sz,
+ .args[4] = dest,
+ .args[5] = dest_sz,
+ .args[6] = 0,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ ret = qcom_scm_call(dev, &desc, &res);
+
+ return ret ? : res.result[0];
+}
+
+/**
+ * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
+ * @mem_addr: mem region whose ownership need to be reassigned
+ * @mem_sz: size of the region.
+ * @srcvm: vmid for current set of owners, each set bit in
+ * flag indicate a unique owner
+ * @newvm: array having new owners and corresponding permission
+ * flags
+ * @dest_cnt: number of owners in next set.
+ *
+ * Return negative errno on failure or 0 on success with @srcvm updated.
+ */
+int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+ unsigned int *srcvm,
+ const struct qcom_scm_vmperm *newvm,
+ unsigned int dest_cnt)
+{
+ struct qcom_scm_current_perm_info *destvm;
+ struct qcom_scm_mem_map_info *mem_to_map;
+ phys_addr_t mem_to_map_phys;
+ phys_addr_t dest_phys;
+ dma_addr_t ptr_phys;
+ size_t mem_to_map_sz;
+ size_t dest_sz;
+ size_t src_sz;
+ size_t ptr_sz;
+ int next_vm;
+ __le32 *src;
+ void *ptr;
+ int ret, i, b;
+ unsigned long srcvm_bits = *srcvm;
+
+ src_sz = hweight_long(srcvm_bits) * sizeof(*src);
+ mem_to_map_sz = sizeof(*mem_to_map);
+ dest_sz = dest_cnt * sizeof(*destvm);
+ ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
+ ALIGN(dest_sz, SZ_64);
+
+ ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ /* Fill source vmid detail */
+ src = ptr;
+ i = 0;
+ for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
+ src[i++] = cpu_to_le32(b);
+
+ /* Fill details of mem buff to map */
+ mem_to_map = ptr + ALIGN(src_sz, SZ_64);
+ mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
+ mem_to_map->mem_addr = cpu_to_le64(mem_addr);
+ mem_to_map->mem_size = cpu_to_le64(mem_sz);
+
+ next_vm = 0;
+ /* Fill details of next vmid detail */
+ destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
+ dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
+ for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
+ destvm->vmid = cpu_to_le32(newvm->vmid);
+ destvm->perm = cpu_to_le32(newvm->perm);
+ destvm->ctx = 0;
+ destvm->ctx_size = 0;
+ next_vm |= BIT(newvm->vmid);
+ }
+
+ ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
+ ptr_phys, src_sz, dest_phys, dest_sz);
+ dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
+ if (ret) {
+ dev_err(__scm->dev,
+ "Assign memory protection call failed %d\n", ret);
+ return -EINVAL;
+ }
+
+ *srcvm = next_vm;
+ return 0;
+}
+EXPORT_SYMBOL(qcom_scm_assign_mem);
+
+/**
+ * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
+ */
+bool qcom_scm_ocmem_lock_available(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
+ QCOM_SCM_OCMEM_LOCK_CMD);
+}
+EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
+
+/**
+ * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
+ * region to the specified initiator
+ *
+ * @id: tz initiator id
+ * @offset: OCMEM offset
+ * @size: OCMEM size
+ * @mode: access mode (WIDE/NARROW)
+ */
+int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
+ u32 mode)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_OCMEM,
+ .cmd = QCOM_SCM_OCMEM_LOCK_CMD,
+ .args[0] = id,
+ .args[1] = offset,
+ .args[2] = size,
+ .args[3] = mode,
+ .arginfo = QCOM_SCM_ARGS(4),
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_ocmem_lock);
+
+/**
+ * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
+ * region from the specified initiator
+ *
+ * @id: tz initiator id
+ * @offset: OCMEM offset
+ * @size: OCMEM size
+ */
+int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_OCMEM,
+ .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
+ .args[0] = id,
+ .args[1] = offset,
+ .args[2] = size,
+ .arginfo = QCOM_SCM_ARGS(3),
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
+
+/**
+ * qcom_scm_ice_available() - Is the ICE key programming interface available?
+ *
+ * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
+ * qcom_scm_ice_set_key() are available.
+ */
+bool qcom_scm_ice_available(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
+}
+EXPORT_SYMBOL(qcom_scm_ice_available);
+
+/**
+ * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
+ * @index: the keyslot to invalidate
+ *
+ * The UFSHCI standard defines a standard way to do this, but it doesn't work on
+ * these SoCs; only this SCM call does.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_ice_invalidate_key(u32 index)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = index,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
+
+/**
+ * qcom_scm_ice_set_key() - Set an inline encryption key
+ * @index: the keyslot into which to set the key
+ * @key: the key to program
+ * @key_size: the size of the key in bytes
+ * @cipher: the encryption algorithm the key is for
+ * @data_unit_size: the encryption data unit size, i.e. the size of each
+ * individual plaintext and ciphertext. Given in 512-byte
+ * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
+ *
+ * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
+ * can then be used to encrypt/decrypt UFS I/O requests inline.
+ *
+ * The UFSHCI standard defines a standard way to do this, but it doesn't work on
+ * these SoCs; only this SCM call does.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
+ enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
+ QCOM_SCM_VAL, QCOM_SCM_VAL,
+ QCOM_SCM_VAL),
+ .args[0] = index,
+ .args[2] = key_size,
+ .args[3] = cipher,
+ .args[4] = data_unit_size,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ void *keybuf;
+ dma_addr_t key_phys;
+ int ret;
+
+ /*
+ * 'key' may point to vmalloc()'ed memory, but we need to pass a
+ * physical address that's been properly flushed. The sanctioned way to
+ * do this is by using the DMA API. But as is best practice for crypto
+ * keys, we also must wipe the key after use. This makes kmemdup() +
+ * dma_map_single() not clearly correct, since the DMA API can use
+ * bounce buffers. Instead, just use dma_alloc_coherent(). Programming
+ * keys is normally rare and thus not performance-critical.
+ */
+
+ keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
+ GFP_KERNEL);
+ if (!keybuf)
+ return -ENOMEM;
+ memcpy(keybuf, key, key_size);
+ desc.args[1] = key_phys;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+
+ memzero_explicit(keybuf, key_size);
+
+ dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_ice_set_key);
+
+/**
+ * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
+ *
+ * Return true if HDCP is supported, false if not.
+ */
+bool qcom_scm_hdcp_available(void)
+{
+ bool avail;
+ int ret = qcom_scm_clk_enable();
+
+ if (ret)
+ return ret;
+
+ avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
+ QCOM_SCM_HDCP_INVOKE);
+
+ qcom_scm_clk_disable();
+
+ return avail;
+}
+EXPORT_SYMBOL(qcom_scm_hdcp_available);
+
+/**
+ * qcom_scm_hdcp_req() - Send HDCP request.
+ * @req: HDCP request array
+ * @req_cnt: HDCP request array count
+ * @resp: response buffer passed to SCM
+ *
+ * Write HDCP register(s) through SCM.
+ */
+int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_HDCP,
+ .cmd = QCOM_SCM_HDCP_INVOKE,
+ .arginfo = QCOM_SCM_ARGS(10),
+ .args = {
+ req[0].addr,
+ req[0].val,
+ req[1].addr,
+ req[1].val,
+ req[2].addr,
+ req[2].val,
+ req[3].addr,
+ req[3].val,
+ req[4].addr,
+ req[4].val
+ },
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
+ return -ERANGE;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ *resp = res.result[0];
+
+ qcom_scm_clk_disable();
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_hdcp_req);
+
+int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
+ .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
+ .arginfo = QCOM_SCM_ARGS(2),
+ .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
+ .args[1] = en,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+
+ return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
+
+static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
+{
+ struct device_node *tcsr;
+ struct device_node *np = dev->of_node;
+ struct resource res;
+ u32 offset;
+ int ret;
+
+ tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
+ if (!tcsr)
+ return 0;
+
+ ret = of_address_to_resource(tcsr, 0, &res);
+ of_node_put(tcsr);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
+ if (ret < 0)
+ return ret;
+
+ *addr = res.start + offset;
+
+ return 0;
+}
+
+/**
+ * qcom_scm_is_available() - Checks if SCM is available
+ */
+bool qcom_scm_is_available(void)
+{
+ return !!__scm;
+}
+EXPORT_SYMBOL(qcom_scm_is_available);
+
+static int qcom_scm_probe(struct platform_device *pdev)
+{
+ struct qcom_scm *scm;
+ unsigned long clks;
+ int ret;
+
+ scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
+ if (!scm)
+ return -ENOMEM;
+
+ ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
+ if (ret < 0)
+ return ret;
+
+ clks = (unsigned long)of_device_get_match_data(&pdev->dev);
+
+ scm->core_clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(scm->core_clk)) {
+ if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
+ return PTR_ERR(scm->core_clk);
+
+ if (clks & SCM_HAS_CORE_CLK) {
+ dev_err(&pdev->dev, "failed to acquire core clk\n");
+ return PTR_ERR(scm->core_clk);
+ }
+
+ scm->core_clk = NULL;
+ }
+
+ scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(scm->iface_clk)) {
+ if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
+ return PTR_ERR(scm->iface_clk);
+
+ if (clks & SCM_HAS_IFACE_CLK) {
+ dev_err(&pdev->dev, "failed to acquire iface clk\n");
+ return PTR_ERR(scm->iface_clk);
+ }
+
+ scm->iface_clk = NULL;
+ }
+
+ scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ if (IS_ERR(scm->bus_clk)) {
+ if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
+ return PTR_ERR(scm->bus_clk);
+
+ if (clks & SCM_HAS_BUS_CLK) {
+ dev_err(&pdev->dev, "failed to acquire bus clk\n");
+ return PTR_ERR(scm->bus_clk);
+ }
+
+ scm->bus_clk = NULL;
+ }
+
+ scm->reset.ops = &qcom_scm_pas_reset_ops;
+ scm->reset.nr_resets = 1;
+ scm->reset.of_node = pdev->dev.of_node;
+ ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
+ if (ret)
+ return ret;
+
+ /* vote for max clk rate for highest performance */
+ ret = clk_set_rate(scm->core_clk, INT_MAX);
+ if (ret)
+ return ret;
+
+ __scm = scm;
+ __scm->dev = &pdev->dev;
+
+ __get_convention();
+
+ /*
+ * If requested enable "download mode", from this point on warmboot
+ * will cause the the boot stages to enter download mode, unless
+ * disabled below by a clean shutdown/reboot.
+ */
+ if (download_mode)
+ qcom_scm_set_download_mode(true);
+
+ return 0;
+}
+
+static void qcom_scm_shutdown(struct platform_device *pdev)
+{
+ /* Clean shutdown, disable download mode to allow normal restart */
+ qcom_scm_set_download_mode(false);
+}
+
+static const struct of_device_id qcom_scm_dt_match[] = {
+ { .compatible = "qcom,scm-apq8064",
+ /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
+ },
+ { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
+ SCM_HAS_IFACE_CLK |
+ SCM_HAS_BUS_CLK)
+ },
+ { .compatible = "qcom,scm-ipq4019" },
+ { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
+ { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
+ { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
+ SCM_HAS_IFACE_CLK |
+ SCM_HAS_BUS_CLK)
+ },
+ { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
+ SCM_HAS_IFACE_CLK |
+ SCM_HAS_BUS_CLK)
+ },
+ { .compatible = "qcom,scm-msm8994" },
+ { .compatible = "qcom,scm-msm8996" },
+ { .compatible = "qcom,scm" },
+ {}
+};
+
+static struct platform_driver qcom_scm_driver = {
+ .driver = {
+ .name = "qcom_scm",
+ .of_match_table = qcom_scm_dt_match,
+ },
+ .probe = qcom_scm_probe,
+ .shutdown = qcom_scm_shutdown,
+};
+
+static int __init qcom_scm_init(void)
+{
+ return platform_driver_register(&qcom_scm_driver);
+}
+subsys_initcall(qcom_scm_init);
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
new file mode 100644
index 000000000..632fe3142
--- /dev/null
+++ b/drivers/firmware/qcom_scm.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2010-2015,2019 The Linux Foundation. All rights reserved.
+ */
+#ifndef __QCOM_SCM_INT_H
+#define __QCOM_SCM_INT_H
+
+enum qcom_scm_convention {
+ SMC_CONVENTION_UNKNOWN,
+ SMC_CONVENTION_LEGACY,
+ SMC_CONVENTION_ARM_32,
+ SMC_CONVENTION_ARM_64,
+};
+
+extern enum qcom_scm_convention qcom_scm_convention;
+
+#define MAX_QCOM_SCM_ARGS 10
+#define MAX_QCOM_SCM_RETS 3
+
+enum qcom_scm_arg_types {
+ QCOM_SCM_VAL,
+ QCOM_SCM_RO,
+ QCOM_SCM_RW,
+ QCOM_SCM_BUFVAL,
+};
+
+#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
+ (((a) & 0x3) << 4) | \
+ (((b) & 0x3) << 6) | \
+ (((c) & 0x3) << 8) | \
+ (((d) & 0x3) << 10) | \
+ (((e) & 0x3) << 12) | \
+ (((f) & 0x3) << 14) | \
+ (((g) & 0x3) << 16) | \
+ (((h) & 0x3) << 18) | \
+ (((i) & 0x3) << 20) | \
+ (((j) & 0x3) << 22) | \
+ ((num) & 0xf))
+
+#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+
+/**
+ * struct qcom_scm_desc
+ * @arginfo: Metadata describing the arguments in args[]
+ * @args: The array of arguments for the secure syscall
+ */
+struct qcom_scm_desc {
+ u32 svc;
+ u32 cmd;
+ u32 arginfo;
+ u64 args[MAX_QCOM_SCM_ARGS];
+ u32 owner;
+};
+
+/**
+ * struct qcom_scm_res
+ * @result: The values returned by the secure syscall
+ */
+struct qcom_scm_res {
+ u64 result[MAX_QCOM_SCM_RETS];
+};
+
+#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
+extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ enum qcom_scm_convention qcom_convention,
+ struct qcom_scm_res *res, bool atomic);
+#define scm_smc_call(dev, desc, res, atomic) \
+ __scm_smc_call((dev), (desc), qcom_scm_convention, (res), (atomic))
+
+#define SCM_LEGACY_FNID(s, c) (((s) << 10) | ((c) & 0x3ff))
+extern int scm_legacy_call_atomic(struct device *dev,
+ const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res);
+extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
+ struct qcom_scm_res *res);
+
+#define QCOM_SCM_SVC_BOOT 0x01
+#define QCOM_SCM_BOOT_SET_ADDR 0x01
+#define QCOM_SCM_BOOT_TERMINATE_PC 0x02
+#define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10
+#define QCOM_SCM_BOOT_SET_REMOTE_STATE 0x0a
+#define QCOM_SCM_FLUSH_FLAG_MASK 0x3
+
+#define QCOM_SCM_SVC_PIL 0x02
+#define QCOM_SCM_PIL_PAS_INIT_IMAGE 0x01
+#define QCOM_SCM_PIL_PAS_MEM_SETUP 0x02
+#define QCOM_SCM_PIL_PAS_AUTH_AND_RESET 0x05
+#define QCOM_SCM_PIL_PAS_SHUTDOWN 0x06
+#define QCOM_SCM_PIL_PAS_IS_SUPPORTED 0x07
+#define QCOM_SCM_PIL_PAS_MSS_RESET 0x0a
+
+#define QCOM_SCM_SVC_IO 0x05
+#define QCOM_SCM_IO_READ 0x01
+#define QCOM_SCM_IO_WRITE 0x02
+
+#define QCOM_SCM_SVC_INFO 0x06
+#define QCOM_SCM_INFO_IS_CALL_AVAIL 0x01
+
+#define QCOM_SCM_SVC_MP 0x0c
+#define QCOM_SCM_MP_RESTORE_SEC_CFG 0x02
+#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE 0x03
+#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT 0x04
+#define QCOM_SCM_MP_VIDEO_VAR 0x08
+#define QCOM_SCM_MP_ASSIGN 0x16
+
+#define QCOM_SCM_SVC_OCMEM 0x0f
+#define QCOM_SCM_OCMEM_LOCK_CMD 0x01
+#define QCOM_SCM_OCMEM_UNLOCK_CMD 0x02
+
+#define QCOM_SCM_SVC_ES 0x10 /* Enterprise Security */
+#define QCOM_SCM_ES_INVALIDATE_ICE_KEY 0x03
+#define QCOM_SCM_ES_CONFIG_SET_ICE_KEY 0x04
+
+#define QCOM_SCM_SVC_HDCP 0x11
+#define QCOM_SCM_HDCP_INVOKE 0x01
+
+#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
+#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03
+#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02
+
+extern void __qcom_scm_init(void);
+
+/* common error codes */
+#define QCOM_SCM_V2_EBUSY -12
+#define QCOM_SCM_ENOMEM -5
+#define QCOM_SCM_EOPNOTSUPP -4
+#define QCOM_SCM_EINVAL_ADDR -3
+#define QCOM_SCM_EINVAL_ARG -2
+#define QCOM_SCM_ERROR -1
+#define QCOM_SCM_INTERRUPTED 1
+
+static inline int qcom_scm_remap_error(int err)
+{
+ switch (err) {
+ case QCOM_SCM_ERROR:
+ return -EIO;
+ case QCOM_SCM_EINVAL_ADDR:
+ case QCOM_SCM_EINVAL_ARG:
+ return -EINVAL;
+ case QCOM_SCM_EOPNOTSUPP:
+ return -EOPNOTSUPP;
+ case QCOM_SCM_ENOMEM:
+ return -ENOMEM;
+ case QCOM_SCM_V2_EBUSY:
+ return -EBUSY;
+ }
+ return -EINVAL;
+}
+
+#endif
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
new file mode 100644
index 000000000..f08e056ed
--- /dev/null
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -0,0 +1,938 @@
+/*
+ * drivers/firmware/qemu_fw_cfg.c
+ *
+ * Copyright 2015 Carnegie Mellon University
+ *
+ * Expose entries from QEMU's firmware configuration (fw_cfg) device in
+ * sysfs (read-only, under "/sys/firmware/qemu_fw_cfg/...").
+ *
+ * The fw_cfg device may be instantiated via either an ACPI node (on x86
+ * and select subsets of aarch64), a Device Tree node (on arm), or using
+ * a kernel module (or command line) parameter with the following syntax:
+ *
+ * [qemu_fw_cfg.]ioport=<size>@<base>[:<ctrl_off>:<data_off>[:<dma_off>]]
+ * or
+ * [qemu_fw_cfg.]mmio=<size>@<base>[:<ctrl_off>:<data_off>[:<dma_off>]]
+ *
+ * where:
+ * <size> := size of ioport or mmio range
+ * <base> := physical base address of ioport or mmio range
+ * <ctrl_off> := (optional) offset of control register
+ * <data_off> := (optional) offset of data register
+ * <dma_off> := (optional) offset of dma register
+ *
+ * e.g.:
+ * qemu_fw_cfg.ioport=12@0x510:0:1:4 (the default on x86)
+ * or
+ * qemu_fw_cfg.mmio=16@0x9020000:8:0:16 (the default on arm)
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <uapi/linux/qemu_fw_cfg.h>
+#include <linux/delay.h>
+#include <linux/crash_dump.h>
+#include <linux/crash_core.h>
+
+MODULE_AUTHOR("Gabriel L. Somlo <somlo@cmu.edu>");
+MODULE_DESCRIPTION("QEMU fw_cfg sysfs support");
+MODULE_LICENSE("GPL");
+
+/* fw_cfg revision attribute, in /sys/firmware/qemu_fw_cfg top-level dir. */
+static u32 fw_cfg_rev;
+
+/* fw_cfg device i/o register addresses */
+static bool fw_cfg_is_mmio;
+static phys_addr_t fw_cfg_p_base;
+static resource_size_t fw_cfg_p_size;
+static void __iomem *fw_cfg_dev_base;
+static void __iomem *fw_cfg_reg_ctrl;
+static void __iomem *fw_cfg_reg_data;
+static void __iomem *fw_cfg_reg_dma;
+
+/* atomic access to fw_cfg device (potentially slow i/o, so using mutex) */
+static DEFINE_MUTEX(fw_cfg_dev_lock);
+
+/* pick appropriate endianness for selector key */
+static void fw_cfg_sel_endianness(u16 key)
+{
+ if (fw_cfg_is_mmio)
+ iowrite16be(key, fw_cfg_reg_ctrl);
+ else
+ iowrite16(key, fw_cfg_reg_ctrl);
+}
+
+#ifdef CONFIG_CRASH_CORE
+static inline bool fw_cfg_dma_enabled(void)
+{
+ return (fw_cfg_rev & FW_CFG_VERSION_DMA) && fw_cfg_reg_dma;
+}
+
+/* qemu fw_cfg device is sync today, but spec says it may become async */
+static void fw_cfg_wait_for_control(struct fw_cfg_dma_access *d)
+{
+ for (;;) {
+ u32 ctrl = be32_to_cpu(READ_ONCE(d->control));
+
+ /* do not reorder the read to d->control */
+ rmb();
+ if ((ctrl & ~FW_CFG_DMA_CTL_ERROR) == 0)
+ return;
+
+ cpu_relax();
+ }
+}
+
+static ssize_t fw_cfg_dma_transfer(void *address, u32 length, u32 control)
+{
+ phys_addr_t dma;
+ struct fw_cfg_dma_access *d = NULL;
+ ssize_t ret = length;
+
+ d = kmalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ /* fw_cfg device does not need IOMMU protection, so use physical addresses */
+ *d = (struct fw_cfg_dma_access) {
+ .address = cpu_to_be64(address ? virt_to_phys(address) : 0),
+ .length = cpu_to_be32(length),
+ .control = cpu_to_be32(control)
+ };
+
+ dma = virt_to_phys(d);
+
+ iowrite32be((u64)dma >> 32, fw_cfg_reg_dma);
+ /* force memory to sync before notifying device via MMIO */
+ wmb();
+ iowrite32be(dma, fw_cfg_reg_dma + 4);
+
+ fw_cfg_wait_for_control(d);
+
+ if (be32_to_cpu(READ_ONCE(d->control)) & FW_CFG_DMA_CTL_ERROR) {
+ ret = -EIO;
+ }
+
+end:
+ kfree(d);
+
+ return ret;
+}
+#endif
+
+/* read chunk of given fw_cfg blob (caller responsible for sanity-check) */
+static ssize_t fw_cfg_read_blob(u16 key,
+ void *buf, loff_t pos, size_t count)
+{
+ u32 glk = -1U;
+ acpi_status status;
+
+ /* If we have ACPI, ensure mutual exclusion against any potential
+ * device access by the firmware, e.g. via AML methods:
+ */
+ status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
+ if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
+ /* Should never get here */
+ WARN(1, "fw_cfg_read_blob: Failed to lock ACPI!\n");
+ memset(buf, 0, count);
+ return -EINVAL;
+ }
+
+ mutex_lock(&fw_cfg_dev_lock);
+ fw_cfg_sel_endianness(key);
+ while (pos-- > 0)
+ ioread8(fw_cfg_reg_data);
+ ioread8_rep(fw_cfg_reg_data, buf, count);
+ mutex_unlock(&fw_cfg_dev_lock);
+
+ acpi_release_global_lock(glk);
+ return count;
+}
+
+#ifdef CONFIG_CRASH_CORE
+/* write chunk of given fw_cfg blob (caller responsible for sanity-check) */
+static ssize_t fw_cfg_write_blob(u16 key,
+ void *buf, loff_t pos, size_t count)
+{
+ u32 glk = -1U;
+ acpi_status status;
+ ssize_t ret = count;
+
+ /* If we have ACPI, ensure mutual exclusion against any potential
+ * device access by the firmware, e.g. via AML methods:
+ */
+ status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
+ if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
+ /* Should never get here */
+ WARN(1, "%s: Failed to lock ACPI!\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&fw_cfg_dev_lock);
+ if (pos == 0) {
+ ret = fw_cfg_dma_transfer(buf, count, key << 16
+ | FW_CFG_DMA_CTL_SELECT
+ | FW_CFG_DMA_CTL_WRITE);
+ } else {
+ fw_cfg_sel_endianness(key);
+ ret = fw_cfg_dma_transfer(NULL, pos, FW_CFG_DMA_CTL_SKIP);
+ if (ret < 0)
+ goto end;
+ ret = fw_cfg_dma_transfer(buf, count, FW_CFG_DMA_CTL_WRITE);
+ }
+
+end:
+ mutex_unlock(&fw_cfg_dev_lock);
+
+ acpi_release_global_lock(glk);
+
+ return ret;
+}
+#endif /* CONFIG_CRASH_CORE */
+
+/* clean up fw_cfg device i/o */
+static void fw_cfg_io_cleanup(void)
+{
+ if (fw_cfg_is_mmio) {
+ iounmap(fw_cfg_dev_base);
+ release_mem_region(fw_cfg_p_base, fw_cfg_p_size);
+ } else {
+ ioport_unmap(fw_cfg_dev_base);
+ release_region(fw_cfg_p_base, fw_cfg_p_size);
+ }
+}
+
+/* arch-specific ctrl & data register offsets are not available in ACPI, DT */
+#if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF))
+# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
+# define FW_CFG_CTRL_OFF 0x08
+# define FW_CFG_DATA_OFF 0x00
+# define FW_CFG_DMA_OFF 0x10
+# elif defined(CONFIG_PARISC) /* parisc */
+# define FW_CFG_CTRL_OFF 0x00
+# define FW_CFG_DATA_OFF 0x04
+# elif (defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC32)) /* ppc/mac,sun4m */
+# define FW_CFG_CTRL_OFF 0x00
+# define FW_CFG_DATA_OFF 0x02
+# elif (defined(CONFIG_X86) || defined(CONFIG_SPARC64)) /* x86, sun4u */
+# define FW_CFG_CTRL_OFF 0x00
+# define FW_CFG_DATA_OFF 0x01
+# define FW_CFG_DMA_OFF 0x04
+# else
+# error "QEMU FW_CFG not available on this architecture!"
+# endif
+#endif
+
+/* initialize fw_cfg device i/o from platform data */
+static int fw_cfg_do_platform_probe(struct platform_device *pdev)
+{
+ char sig[FW_CFG_SIG_SIZE];
+ struct resource *range, *ctrl, *data, *dma;
+
+ /* acquire i/o range details */
+ fw_cfg_is_mmio = false;
+ range = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!range) {
+ fw_cfg_is_mmio = true;
+ range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!range)
+ return -EINVAL;
+ }
+ fw_cfg_p_base = range->start;
+ fw_cfg_p_size = resource_size(range);
+
+ if (fw_cfg_is_mmio) {
+ if (!request_mem_region(fw_cfg_p_base,
+ fw_cfg_p_size, "fw_cfg_mem"))
+ return -EBUSY;
+ fw_cfg_dev_base = ioremap(fw_cfg_p_base, fw_cfg_p_size);
+ if (!fw_cfg_dev_base) {
+ release_mem_region(fw_cfg_p_base, fw_cfg_p_size);
+ return -EFAULT;
+ }
+ } else {
+ if (!request_region(fw_cfg_p_base,
+ fw_cfg_p_size, "fw_cfg_io"))
+ return -EBUSY;
+ fw_cfg_dev_base = ioport_map(fw_cfg_p_base, fw_cfg_p_size);
+ if (!fw_cfg_dev_base) {
+ release_region(fw_cfg_p_base, fw_cfg_p_size);
+ return -EFAULT;
+ }
+ }
+
+ /* were custom register offsets provided (e.g. on the command line)? */
+ ctrl = platform_get_resource_byname(pdev, IORESOURCE_REG, "ctrl");
+ data = platform_get_resource_byname(pdev, IORESOURCE_REG, "data");
+ dma = platform_get_resource_byname(pdev, IORESOURCE_REG, "dma");
+ if (ctrl && data) {
+ fw_cfg_reg_ctrl = fw_cfg_dev_base + ctrl->start;
+ fw_cfg_reg_data = fw_cfg_dev_base + data->start;
+ } else {
+ /* use architecture-specific offsets */
+ fw_cfg_reg_ctrl = fw_cfg_dev_base + FW_CFG_CTRL_OFF;
+ fw_cfg_reg_data = fw_cfg_dev_base + FW_CFG_DATA_OFF;
+ }
+
+ if (dma)
+ fw_cfg_reg_dma = fw_cfg_dev_base + dma->start;
+#ifdef FW_CFG_DMA_OFF
+ else
+ fw_cfg_reg_dma = fw_cfg_dev_base + FW_CFG_DMA_OFF;
+#endif
+
+ /* verify fw_cfg device signature */
+ if (fw_cfg_read_blob(FW_CFG_SIGNATURE, sig,
+ 0, FW_CFG_SIG_SIZE) < 0 ||
+ memcmp(sig, "QEMU", FW_CFG_SIG_SIZE) != 0) {
+ fw_cfg_io_cleanup();
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static ssize_t fw_cfg_showrev(struct kobject *k, struct kobj_attribute *a,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", fw_cfg_rev);
+}
+
+static const struct kobj_attribute fw_cfg_rev_attr = {
+ .attr = { .name = "rev", .mode = S_IRUSR },
+ .show = fw_cfg_showrev,
+};
+
+/* fw_cfg_sysfs_entry type */
+struct fw_cfg_sysfs_entry {
+ struct kobject kobj;
+ u32 size;
+ u16 select;
+ char name[FW_CFG_MAX_FILE_PATH];
+ struct list_head list;
+};
+
+#ifdef CONFIG_CRASH_CORE
+static ssize_t fw_cfg_write_vmcoreinfo(const struct fw_cfg_file *f)
+{
+ static struct fw_cfg_vmcoreinfo *data;
+ ssize_t ret;
+
+ data = kmalloc(sizeof(struct fw_cfg_vmcoreinfo), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ *data = (struct fw_cfg_vmcoreinfo) {
+ .guest_format = cpu_to_le16(FW_CFG_VMCOREINFO_FORMAT_ELF),
+ .size = cpu_to_le32(VMCOREINFO_NOTE_SIZE),
+ .paddr = cpu_to_le64(paddr_vmcoreinfo_note())
+ };
+ /* spare ourself reading host format support for now since we
+ * don't know what else to format - host may ignore ours
+ */
+ ret = fw_cfg_write_blob(be16_to_cpu(f->select), data,
+ 0, sizeof(struct fw_cfg_vmcoreinfo));
+
+ kfree(data);
+ return ret;
+}
+#endif /* CONFIG_CRASH_CORE */
+
+/* get fw_cfg_sysfs_entry from kobject member */
+static inline struct fw_cfg_sysfs_entry *to_entry(struct kobject *kobj)
+{
+ return container_of(kobj, struct fw_cfg_sysfs_entry, kobj);
+}
+
+/* fw_cfg_sysfs_attribute type */
+struct fw_cfg_sysfs_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct fw_cfg_sysfs_entry *entry, char *buf);
+};
+
+/* get fw_cfg_sysfs_attribute from attribute member */
+static inline struct fw_cfg_sysfs_attribute *to_attr(struct attribute *attr)
+{
+ return container_of(attr, struct fw_cfg_sysfs_attribute, attr);
+}
+
+/* global cache of fw_cfg_sysfs_entry objects */
+static LIST_HEAD(fw_cfg_entry_cache);
+
+/* kobjects removed lazily by kernel, mutual exclusion needed */
+static DEFINE_SPINLOCK(fw_cfg_cache_lock);
+
+static inline void fw_cfg_sysfs_cache_enlist(struct fw_cfg_sysfs_entry *entry)
+{
+ spin_lock(&fw_cfg_cache_lock);
+ list_add_tail(&entry->list, &fw_cfg_entry_cache);
+ spin_unlock(&fw_cfg_cache_lock);
+}
+
+static inline void fw_cfg_sysfs_cache_delist(struct fw_cfg_sysfs_entry *entry)
+{
+ spin_lock(&fw_cfg_cache_lock);
+ list_del(&entry->list);
+ spin_unlock(&fw_cfg_cache_lock);
+}
+
+static void fw_cfg_sysfs_cache_cleanup(void)
+{
+ struct fw_cfg_sysfs_entry *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) {
+ fw_cfg_sysfs_cache_delist(entry);
+ kobject_put(&entry->kobj);
+ }
+}
+
+/* default_attrs: per-entry attributes and show methods */
+
+#define FW_CFG_SYSFS_ATTR(_attr) \
+struct fw_cfg_sysfs_attribute fw_cfg_sysfs_attr_##_attr = { \
+ .attr = { .name = __stringify(_attr), .mode = S_IRUSR }, \
+ .show = fw_cfg_sysfs_show_##_attr, \
+}
+
+static ssize_t fw_cfg_sysfs_show_size(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+ return sprintf(buf, "%u\n", e->size);
+}
+
+static ssize_t fw_cfg_sysfs_show_key(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+ return sprintf(buf, "%u\n", e->select);
+}
+
+static ssize_t fw_cfg_sysfs_show_name(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+ return sprintf(buf, "%s\n", e->name);
+}
+
+static FW_CFG_SYSFS_ATTR(size);
+static FW_CFG_SYSFS_ATTR(key);
+static FW_CFG_SYSFS_ATTR(name);
+
+static struct attribute *fw_cfg_sysfs_entry_attrs[] = {
+ &fw_cfg_sysfs_attr_size.attr,
+ &fw_cfg_sysfs_attr_key.attr,
+ &fw_cfg_sysfs_attr_name.attr,
+ NULL,
+};
+
+/* sysfs_ops: find fw_cfg_[entry, attribute] and call appropriate show method */
+static ssize_t fw_cfg_sysfs_attr_show(struct kobject *kobj, struct attribute *a,
+ char *buf)
+{
+ struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+ struct fw_cfg_sysfs_attribute *attr = to_attr(a);
+
+ return attr->show(entry, buf);
+}
+
+static const struct sysfs_ops fw_cfg_sysfs_attr_ops = {
+ .show = fw_cfg_sysfs_attr_show,
+};
+
+/* release: destructor, to be called via kobject_put() */
+static void fw_cfg_sysfs_release_entry(struct kobject *kobj)
+{
+ struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+
+ kfree(entry);
+}
+
+/* kobj_type: ties together all properties required to register an entry */
+static struct kobj_type fw_cfg_sysfs_entry_ktype = {
+ .default_attrs = fw_cfg_sysfs_entry_attrs,
+ .sysfs_ops = &fw_cfg_sysfs_attr_ops,
+ .release = fw_cfg_sysfs_release_entry,
+};
+
+/* raw-read method and attribute */
+static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+
+ if (pos > entry->size)
+ return -EINVAL;
+
+ if (count > entry->size - pos)
+ count = entry->size - pos;
+
+ return fw_cfg_read_blob(entry->select, buf, pos, count);
+}
+
+static struct bin_attribute fw_cfg_sysfs_attr_raw = {
+ .attr = { .name = "raw", .mode = S_IRUSR },
+ .read = fw_cfg_sysfs_read_raw,
+};
+
+/*
+ * Create a kset subdirectory matching each '/' delimited dirname token
+ * in 'name', starting with sysfs kset/folder 'dir'; At the end, create
+ * a symlink directed at the given 'target'.
+ * NOTE: We do this on a best-effort basis, since 'name' is not guaranteed
+ * to be a well-behaved path name. Whenever a symlink vs. kset directory
+ * name collision occurs, the kernel will issue big scary warnings while
+ * refusing to add the offending link or directory. We follow up with our
+ * own, slightly less scary error messages explaining the situation :)
+ */
+static int fw_cfg_build_symlink(struct kset *dir,
+ struct kobject *target, const char *name)
+{
+ int ret;
+ struct kset *subdir;
+ struct kobject *ko;
+ char *name_copy, *p, *tok;
+
+ if (!dir || !target || !name || !*name)
+ return -EINVAL;
+
+ /* clone a copy of name for parsing */
+ name_copy = p = kstrdup(name, GFP_KERNEL);
+ if (!name_copy)
+ return -ENOMEM;
+
+ /* create folders for each dirname token, then symlink for basename */
+ while ((tok = strsep(&p, "/")) && *tok) {
+
+ /* last (basename) token? If so, add symlink here */
+ if (!p || !*p) {
+ ret = sysfs_create_link(&dir->kobj, target, tok);
+ break;
+ }
+
+ /* does the current dir contain an item named after tok ? */
+ ko = kset_find_obj(dir, tok);
+ if (ko) {
+ /* drop reference added by kset_find_obj */
+ kobject_put(ko);
+
+ /* ko MUST be a kset - we're about to use it as one ! */
+ if (ko->ktype != dir->kobj.ktype) {
+ ret = -EINVAL;
+ break;
+ }
+
+ /* descend into already existing subdirectory */
+ dir = to_kset(ko);
+ } else {
+ /* create new subdirectory kset */
+ subdir = kzalloc(sizeof(struct kset), GFP_KERNEL);
+ if (!subdir) {
+ ret = -ENOMEM;
+ break;
+ }
+ subdir->kobj.kset = dir;
+ subdir->kobj.ktype = dir->kobj.ktype;
+ ret = kobject_set_name(&subdir->kobj, "%s", tok);
+ if (ret) {
+ kfree(subdir);
+ break;
+ }
+ ret = kset_register(subdir);
+ if (ret) {
+ kfree(subdir);
+ break;
+ }
+
+ /* descend into newly created subdirectory */
+ dir = subdir;
+ }
+ }
+
+ /* we're done with cloned copy of name */
+ kfree(name_copy);
+ return ret;
+}
+
+/* recursively unregister fw_cfg/by_name/ kset directory tree */
+static void fw_cfg_kset_unregister_recursive(struct kset *kset)
+{
+ struct kobject *k, *next;
+
+ list_for_each_entry_safe(k, next, &kset->list, entry)
+ /* all set members are ksets too, but check just in case... */
+ if (k->ktype == kset->kobj.ktype)
+ fw_cfg_kset_unregister_recursive(to_kset(k));
+
+ /* symlinks are cleanly and automatically removed with the directory */
+ kset_unregister(kset);
+}
+
+/* kobjects & kset representing top-level, by_key, and by_name folders */
+static struct kobject *fw_cfg_top_ko;
+static struct kobject *fw_cfg_sel_ko;
+static struct kset *fw_cfg_fname_kset;
+
+/* register an individual fw_cfg file */
+static int fw_cfg_register_file(const struct fw_cfg_file *f)
+{
+ int err;
+ struct fw_cfg_sysfs_entry *entry;
+
+#ifdef CONFIG_CRASH_CORE
+ if (fw_cfg_dma_enabled() &&
+ strcmp(f->name, FW_CFG_VMCOREINFO_FILENAME) == 0 &&
+ !is_kdump_kernel()) {
+ if (fw_cfg_write_vmcoreinfo(f) < 0)
+ pr_warn("fw_cfg: failed to write vmcoreinfo");
+ }
+#endif
+
+ /* allocate new entry */
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ /* set file entry information */
+ entry->size = be32_to_cpu(f->size);
+ entry->select = be16_to_cpu(f->select);
+ strscpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH);
+
+ /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
+ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
+ fw_cfg_sel_ko, "%d", entry->select);
+ if (err)
+ goto err_put_entry;
+
+ /* add raw binary content access */
+ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
+ if (err)
+ goto err_del_entry;
+
+ /* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */
+ fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name);
+
+ /* success, add entry to global cache */
+ fw_cfg_sysfs_cache_enlist(entry);
+ return 0;
+
+err_del_entry:
+ kobject_del(&entry->kobj);
+err_put_entry:
+ kobject_put(&entry->kobj);
+ return err;
+}
+
+/* iterate over all fw_cfg directory entries, registering each one */
+static int fw_cfg_register_dir_entries(void)
+{
+ int ret = 0;
+ __be32 files_count;
+ u32 count, i;
+ struct fw_cfg_file *dir;
+ size_t dir_size;
+
+ ret = fw_cfg_read_blob(FW_CFG_FILE_DIR, &files_count,
+ 0, sizeof(files_count));
+ if (ret < 0)
+ return ret;
+
+ count = be32_to_cpu(files_count);
+ dir_size = count * sizeof(struct fw_cfg_file);
+
+ dir = kmalloc(dir_size, GFP_KERNEL);
+ if (!dir)
+ return -ENOMEM;
+
+ ret = fw_cfg_read_blob(FW_CFG_FILE_DIR, dir,
+ sizeof(files_count), dir_size);
+ if (ret < 0)
+ goto end;
+
+ for (i = 0; i < count; i++) {
+ ret = fw_cfg_register_file(&dir[i]);
+ if (ret)
+ break;
+ }
+
+end:
+ kfree(dir);
+ return ret;
+}
+
+/* unregister top-level or by_key folder */
+static inline void fw_cfg_kobj_cleanup(struct kobject *kobj)
+{
+ kobject_del(kobj);
+ kobject_put(kobj);
+}
+
+static int fw_cfg_sysfs_probe(struct platform_device *pdev)
+{
+ int err;
+ __le32 rev;
+
+ /* NOTE: If we supported multiple fw_cfg devices, we'd first create
+ * a subdirectory named after e.g. pdev->id, then hang per-device
+ * by_key (and by_name) subdirectories underneath it. However, only
+ * one fw_cfg device exist system-wide, so if one was already found
+ * earlier, we might as well stop here.
+ */
+ if (fw_cfg_sel_ko)
+ return -EBUSY;
+
+ /* create by_key and by_name subdirs of /sys/firmware/qemu_fw_cfg/ */
+ err = -ENOMEM;
+ fw_cfg_sel_ko = kobject_create_and_add("by_key", fw_cfg_top_ko);
+ if (!fw_cfg_sel_ko)
+ goto err_sel;
+ fw_cfg_fname_kset = kset_create_and_add("by_name", NULL, fw_cfg_top_ko);
+ if (!fw_cfg_fname_kset)
+ goto err_name;
+
+ /* initialize fw_cfg device i/o from platform data */
+ err = fw_cfg_do_platform_probe(pdev);
+ if (err)
+ goto err_probe;
+
+ /* get revision number, add matching top-level attribute */
+ err = fw_cfg_read_blob(FW_CFG_ID, &rev, 0, sizeof(rev));
+ if (err < 0)
+ goto err_probe;
+
+ fw_cfg_rev = le32_to_cpu(rev);
+ err = sysfs_create_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+ if (err)
+ goto err_rev;
+
+ /* process fw_cfg file directory entry, registering each file */
+ err = fw_cfg_register_dir_entries();
+ if (err)
+ goto err_dir;
+
+ /* success */
+ pr_debug("fw_cfg: loaded.\n");
+ return 0;
+
+err_dir:
+ fw_cfg_sysfs_cache_cleanup();
+ sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+err_rev:
+ fw_cfg_io_cleanup();
+err_probe:
+ fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
+err_name:
+ fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
+err_sel:
+ return err;
+}
+
+static int fw_cfg_sysfs_remove(struct platform_device *pdev)
+{
+ pr_debug("fw_cfg: unloading.\n");
+ fw_cfg_sysfs_cache_cleanup();
+ sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+ fw_cfg_io_cleanup();
+ fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
+ fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
+ return 0;
+}
+
+static const struct of_device_id fw_cfg_sysfs_mmio_match[] = {
+ { .compatible = "qemu,fw-cfg-mmio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fw_cfg_sysfs_mmio_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id fw_cfg_sysfs_acpi_match[] = {
+ { FW_CFG_ACPI_DEVICE_ID, },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, fw_cfg_sysfs_acpi_match);
+#endif
+
+static struct platform_driver fw_cfg_sysfs_driver = {
+ .probe = fw_cfg_sysfs_probe,
+ .remove = fw_cfg_sysfs_remove,
+ .driver = {
+ .name = "fw_cfg",
+ .of_match_table = fw_cfg_sysfs_mmio_match,
+ .acpi_match_table = ACPI_PTR(fw_cfg_sysfs_acpi_match),
+ },
+};
+
+#ifdef CONFIG_FW_CFG_SYSFS_CMDLINE
+
+static struct platform_device *fw_cfg_cmdline_dev;
+
+/* this probably belongs in e.g. include/linux/types.h,
+ * but right now we are the only ones doing it...
+ */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define __PHYS_ADDR_PREFIX "ll"
+#else
+#define __PHYS_ADDR_PREFIX ""
+#endif
+
+/* use special scanf/printf modifier for phys_addr_t, resource_size_t */
+#define PH_ADDR_SCAN_FMT "@%" __PHYS_ADDR_PREFIX "i%n" \
+ ":%" __PHYS_ADDR_PREFIX "i" \
+ ":%" __PHYS_ADDR_PREFIX "i%n" \
+ ":%" __PHYS_ADDR_PREFIX "i%n"
+
+#define PH_ADDR_PR_1_FMT "0x%" __PHYS_ADDR_PREFIX "x@" \
+ "0x%" __PHYS_ADDR_PREFIX "x"
+
+#define PH_ADDR_PR_3_FMT PH_ADDR_PR_1_FMT \
+ ":%" __PHYS_ADDR_PREFIX "u" \
+ ":%" __PHYS_ADDR_PREFIX "u"
+
+#define PH_ADDR_PR_4_FMT PH_ADDR_PR_3_FMT \
+ ":%" __PHYS_ADDR_PREFIX "u"
+
+static int fw_cfg_cmdline_set(const char *arg, const struct kernel_param *kp)
+{
+ struct resource res[4] = {};
+ char *str;
+ phys_addr_t base;
+ resource_size_t size, ctrl_off, data_off, dma_off;
+ int processed, consumed = 0;
+
+ /* only one fw_cfg device can exist system-wide, so if one
+ * was processed on the command line already, we might as
+ * well stop here.
+ */
+ if (fw_cfg_cmdline_dev) {
+ /* avoid leaking previously registered device */
+ platform_device_unregister(fw_cfg_cmdline_dev);
+ return -EINVAL;
+ }
+
+ /* consume "<size>" portion of command line argument */
+ size = memparse(arg, &str);
+
+ /* get "@<base>[:<ctrl_off>:<data_off>[:<dma_off>]]" chunks */
+ processed = sscanf(str, PH_ADDR_SCAN_FMT,
+ &base, &consumed,
+ &ctrl_off, &data_off, &consumed,
+ &dma_off, &consumed);
+
+ /* sscanf() must process precisely 1, 3 or 4 chunks:
+ * <base> is mandatory, optionally followed by <ctrl_off>
+ * and <data_off>, and <dma_off>;
+ * there must be no extra characters after the last chunk,
+ * so str[consumed] must be '\0'.
+ */
+ if (str[consumed] ||
+ (processed != 1 && processed != 3 && processed != 4))
+ return -EINVAL;
+
+ res[0].start = base;
+ res[0].end = base + size - 1;
+ res[0].flags = !strcmp(kp->name, "mmio") ? IORESOURCE_MEM :
+ IORESOURCE_IO;
+
+ /* insert register offsets, if provided */
+ if (processed > 1) {
+ res[1].name = "ctrl";
+ res[1].start = ctrl_off;
+ res[1].flags = IORESOURCE_REG;
+ res[2].name = "data";
+ res[2].start = data_off;
+ res[2].flags = IORESOURCE_REG;
+ }
+ if (processed > 3) {
+ res[3].name = "dma";
+ res[3].start = dma_off;
+ res[3].flags = IORESOURCE_REG;
+ }
+
+ /* "processed" happens to nicely match the number of resources
+ * we need to pass in to this platform device.
+ */
+ fw_cfg_cmdline_dev = platform_device_register_simple("fw_cfg",
+ PLATFORM_DEVID_NONE, res, processed);
+
+ return PTR_ERR_OR_ZERO(fw_cfg_cmdline_dev);
+}
+
+static int fw_cfg_cmdline_get(char *buf, const struct kernel_param *kp)
+{
+ /* stay silent if device was not configured via the command
+ * line, or if the parameter name (ioport/mmio) doesn't match
+ * the device setting
+ */
+ if (!fw_cfg_cmdline_dev ||
+ (!strcmp(kp->name, "mmio") ^
+ (fw_cfg_cmdline_dev->resource[0].flags == IORESOURCE_MEM)))
+ return 0;
+
+ switch (fw_cfg_cmdline_dev->num_resources) {
+ case 1:
+ return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_1_FMT,
+ resource_size(&fw_cfg_cmdline_dev->resource[0]),
+ fw_cfg_cmdline_dev->resource[0].start);
+ case 3:
+ return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_3_FMT,
+ resource_size(&fw_cfg_cmdline_dev->resource[0]),
+ fw_cfg_cmdline_dev->resource[0].start,
+ fw_cfg_cmdline_dev->resource[1].start,
+ fw_cfg_cmdline_dev->resource[2].start);
+ case 4:
+ return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_4_FMT,
+ resource_size(&fw_cfg_cmdline_dev->resource[0]),
+ fw_cfg_cmdline_dev->resource[0].start,
+ fw_cfg_cmdline_dev->resource[1].start,
+ fw_cfg_cmdline_dev->resource[2].start,
+ fw_cfg_cmdline_dev->resource[3].start);
+ }
+
+ /* Should never get here */
+ WARN(1, "Unexpected number of resources: %d\n",
+ fw_cfg_cmdline_dev->num_resources);
+ return 0;
+}
+
+static const struct kernel_param_ops fw_cfg_cmdline_param_ops = {
+ .set = fw_cfg_cmdline_set,
+ .get = fw_cfg_cmdline_get,
+};
+
+device_param_cb(ioport, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
+device_param_cb(mmio, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
+
+#endif /* CONFIG_FW_CFG_SYSFS_CMDLINE */
+
+static int __init fw_cfg_sysfs_init(void)
+{
+ int ret;
+
+ /* create /sys/firmware/qemu_fw_cfg/ top level directory */
+ fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj);
+ if (!fw_cfg_top_ko)
+ return -ENOMEM;
+
+ ret = platform_driver_register(&fw_cfg_sysfs_driver);
+ if (ret)
+ fw_cfg_kobj_cleanup(fw_cfg_top_ko);
+
+ return ret;
+}
+
+static void __exit fw_cfg_sysfs_exit(void)
+{
+ platform_driver_unregister(&fw_cfg_sysfs_driver);
+
+#ifdef CONFIG_FW_CFG_SYSFS_CMDLINE
+ platform_device_unregister(fw_cfg_cmdline_dev);
+#endif
+
+ /* clean up /sys/firmware/qemu_fw_cfg/ */
+ fw_cfg_kobj_cleanup(fw_cfg_top_ko);
+}
+
+module_init(fw_cfg_sysfs_init);
+module_exit(fw_cfg_sysfs_exit);
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
new file mode 100644
index 000000000..45ff03da2
--- /dev/null
+++ b/drivers/firmware/raspberrypi.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Defines interfaces for interacting wtih the Raspberry Pi firmware's
+ * property channel.
+ *
+ * Copyright © 2015 Broadcom
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/kref.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <soc/bcm2835/raspberrypi-firmware.h>
+
+#define MBOX_MSG(chan, data28) (((data28) & ~0xf) | ((chan) & 0xf))
+#define MBOX_CHAN(msg) ((msg) & 0xf)
+#define MBOX_DATA28(msg) ((msg) & ~0xf)
+#define MBOX_CHAN_PROPERTY 8
+
+static struct platform_device *rpi_hwmon;
+static struct platform_device *rpi_clk;
+
+struct rpi_firmware {
+ struct mbox_client cl;
+ struct mbox_chan *chan; /* The property channel. */
+ struct completion c;
+ u32 enabled;
+
+ struct kref consumers;
+};
+
+static DEFINE_MUTEX(transaction_lock);
+
+static void response_callback(struct mbox_client *cl, void *msg)
+{
+ struct rpi_firmware *fw = container_of(cl, struct rpi_firmware, cl);
+ complete(&fw->c);
+}
+
+/*
+ * Sends a request to the firmware through the BCM2835 mailbox driver,
+ * and synchronously waits for the reply.
+ */
+static int
+rpi_firmware_transaction(struct rpi_firmware *fw, u32 chan, u32 data)
+{
+ u32 message = MBOX_MSG(chan, data);
+ int ret;
+
+ WARN_ON(data & 0xf);
+
+ mutex_lock(&transaction_lock);
+ reinit_completion(&fw->c);
+ ret = mbox_send_message(fw->chan, &message);
+ if (ret >= 0) {
+ if (wait_for_completion_timeout(&fw->c, HZ)) {
+ ret = 0;
+ } else {
+ ret = -ETIMEDOUT;
+ WARN_ONCE(1, "Firmware transaction timeout");
+ }
+ } else {
+ dev_err(fw->cl.dev, "mbox_send_message returned %d\n", ret);
+ }
+ mutex_unlock(&transaction_lock);
+
+ return ret;
+}
+
+/**
+ * rpi_firmware_property_list - Submit firmware property list
+ * @fw: Pointer to firmware structure from rpi_firmware_get().
+ * @data: Buffer holding tags.
+ * @tag_size: Size of tags buffer.
+ *
+ * Submits a set of concatenated tags to the VPU firmware through the
+ * mailbox property interface.
+ *
+ * The buffer header and the ending tag are added by this function and
+ * don't need to be supplied, just the actual tags for your operation.
+ * See struct rpi_firmware_property_tag_header for the per-tag
+ * structure.
+ */
+int rpi_firmware_property_list(struct rpi_firmware *fw,
+ void *data, size_t tag_size)
+{
+ size_t size = tag_size + 12;
+ u32 *buf;
+ dma_addr_t bus_addr;
+ int ret;
+
+ /* Packets are processed a dword at a time. */
+ if (size & 3)
+ return -EINVAL;
+
+ buf = dma_alloc_coherent(fw->cl.dev, PAGE_ALIGN(size), &bus_addr,
+ GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ /* The firmware will error out without parsing in this case. */
+ WARN_ON(size >= 1024 * 1024);
+
+ buf[0] = size;
+ buf[1] = RPI_FIRMWARE_STATUS_REQUEST;
+ memcpy(&buf[2], data, tag_size);
+ buf[size / 4 - 1] = RPI_FIRMWARE_PROPERTY_END;
+ wmb();
+
+ ret = rpi_firmware_transaction(fw, MBOX_CHAN_PROPERTY, bus_addr);
+
+ rmb();
+ memcpy(data, &buf[2], tag_size);
+ if (ret == 0 && buf[1] != RPI_FIRMWARE_STATUS_SUCCESS) {
+ /*
+ * The tag name here might not be the one causing the
+ * error, if there were multiple tags in the request.
+ * But single-tag is the most common, so go with it.
+ */
+ dev_err(fw->cl.dev, "Request 0x%08x returned status 0x%08x\n",
+ buf[2], buf[1]);
+ ret = -EINVAL;
+ }
+
+ dma_free_coherent(fw->cl.dev, PAGE_ALIGN(size), buf, bus_addr);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_property_list);
+
+/**
+ * rpi_firmware_property - Submit single firmware property
+ * @fw: Pointer to firmware structure from rpi_firmware_get().
+ * @tag: One of enum_mbox_property_tag.
+ * @tag_data: Tag data buffer.
+ * @buf_size: Buffer size.
+ *
+ * Submits a single tag to the VPU firmware through the mailbox
+ * property interface.
+ *
+ * This is a convenience wrapper around
+ * rpi_firmware_property_list() to avoid some of the
+ * boilerplate in property calls.
+ */
+int rpi_firmware_property(struct rpi_firmware *fw,
+ u32 tag, void *tag_data, size_t buf_size)
+{
+ struct rpi_firmware_property_tag_header *header;
+ int ret;
+
+ /* Some mailboxes can use over 1k bytes. Rather than checking
+ * size and using stack or kmalloc depending on requirements,
+ * just use kmalloc. Mailboxes don't get called enough to worry
+ * too much about the time taken in the allocation.
+ */
+ void *data = kmalloc(sizeof(*header) + buf_size, GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ header = data;
+ header->tag = tag;
+ header->buf_size = buf_size;
+ header->req_resp_size = 0;
+ memcpy(data + sizeof(*header), tag_data, buf_size);
+
+ ret = rpi_firmware_property_list(fw, data, buf_size + sizeof(*header));
+
+ memcpy(tag_data, data + sizeof(*header), buf_size);
+
+ kfree(data);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_property);
+
+static void
+rpi_firmware_print_firmware_revision(struct rpi_firmware *fw)
+{
+ time64_t date_and_time;
+ u32 packet;
+ int ret = rpi_firmware_property(fw,
+ RPI_FIRMWARE_GET_FIRMWARE_REVISION,
+ &packet, sizeof(packet));
+
+ if (ret)
+ return;
+
+ /* This is not compatible with y2038 */
+ date_and_time = packet;
+ dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &date_and_time);
+}
+
+static void
+rpi_register_hwmon_driver(struct device *dev, struct rpi_firmware *fw)
+{
+ u32 packet;
+ int ret = rpi_firmware_property(fw, RPI_FIRMWARE_GET_THROTTLED,
+ &packet, sizeof(packet));
+
+ if (ret)
+ return;
+
+ rpi_hwmon = platform_device_register_data(dev, "raspberrypi-hwmon",
+ -1, NULL, 0);
+}
+
+static void rpi_register_clk_driver(struct device *dev)
+{
+ struct device_node *firmware;
+
+ /*
+ * Earlier DTs don't have a node for the firmware clocks but
+ * rely on us creating a platform device by hand. If we do
+ * have a node for the firmware clocks, just bail out here.
+ */
+ firmware = of_get_compatible_child(dev->of_node,
+ "raspberrypi,firmware-clocks");
+ if (firmware) {
+ of_node_put(firmware);
+ return;
+ }
+
+ rpi_clk = platform_device_register_data(dev, "raspberrypi-clk",
+ -1, NULL, 0);
+}
+
+static void rpi_firmware_delete(struct kref *kref)
+{
+ struct rpi_firmware *fw = container_of(kref, struct rpi_firmware,
+ consumers);
+
+ mbox_free_channel(fw->chan);
+ kfree(fw);
+}
+
+void rpi_firmware_put(struct rpi_firmware *fw)
+{
+ kref_put(&fw->consumers, rpi_firmware_delete);
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_put);
+
+static void devm_rpi_firmware_put(void *data)
+{
+ struct rpi_firmware *fw = data;
+
+ rpi_firmware_put(fw);
+}
+
+static int rpi_firmware_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpi_firmware *fw;
+
+ /*
+ * Memory will be freed by rpi_firmware_delete() once all users have
+ * released their firmware handles. Don't use devm_kzalloc() here.
+ */
+ fw = kzalloc(sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
+
+ fw->cl.dev = dev;
+ fw->cl.rx_callback = response_callback;
+ fw->cl.tx_block = true;
+
+ fw->chan = mbox_request_channel(&fw->cl, 0);
+ if (IS_ERR(fw->chan)) {
+ int ret = PTR_ERR(fw->chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get mbox channel: %d\n", ret);
+ kfree(fw);
+ return ret;
+ }
+
+ init_completion(&fw->c);
+ kref_init(&fw->consumers);
+
+ platform_set_drvdata(pdev, fw);
+
+ rpi_firmware_print_firmware_revision(fw);
+ rpi_register_hwmon_driver(dev, fw);
+ rpi_register_clk_driver(dev);
+
+ return 0;
+}
+
+static void rpi_firmware_shutdown(struct platform_device *pdev)
+{
+ struct rpi_firmware *fw = platform_get_drvdata(pdev);
+
+ if (!fw)
+ return;
+
+ rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_REBOOT, NULL, 0);
+}
+
+static int rpi_firmware_remove(struct platform_device *pdev)
+{
+ struct rpi_firmware *fw = platform_get_drvdata(pdev);
+
+ platform_device_unregister(rpi_hwmon);
+ rpi_hwmon = NULL;
+ platform_device_unregister(rpi_clk);
+ rpi_clk = NULL;
+
+ rpi_firmware_put(fw);
+
+ return 0;
+}
+
+/**
+ * rpi_firmware_get - Get pointer to rpi_firmware structure.
+ * @firmware_node: Pointer to the firmware Device Tree node.
+ *
+ * The reference to rpi_firmware has to be released with rpi_firmware_put().
+ *
+ * Returns NULL is the firmware device is not ready.
+ */
+struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
+{
+ struct platform_device *pdev = of_find_device_by_node(firmware_node);
+ struct rpi_firmware *fw;
+
+ if (!pdev)
+ return NULL;
+
+ fw = platform_get_drvdata(pdev);
+ if (!fw)
+ goto err_put_device;
+
+ if (!kref_get_unless_zero(&fw->consumers))
+ goto err_put_device;
+
+ put_device(&pdev->dev);
+
+ return fw;
+
+err_put_device:
+ put_device(&pdev->dev);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_get);
+
+/**
+ * devm_rpi_firmware_get - Get pointer to rpi_firmware structure.
+ * @firmware_node: Pointer to the firmware Device Tree node.
+ *
+ * Returns NULL is the firmware device is not ready.
+ */
+struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
+ struct device_node *firmware_node)
+{
+ struct rpi_firmware *fw;
+
+ fw = rpi_firmware_get(firmware_node);
+ if (!fw)
+ return NULL;
+
+ if (devm_add_action_or_reset(dev, devm_rpi_firmware_put, fw))
+ return NULL;
+
+ return fw;
+}
+EXPORT_SYMBOL_GPL(devm_rpi_firmware_get);
+
+static const struct of_device_id rpi_firmware_of_match[] = {
+ { .compatible = "raspberrypi,bcm2835-firmware", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rpi_firmware_of_match);
+
+static struct platform_driver rpi_firmware_driver = {
+ .driver = {
+ .name = "raspberrypi-firmware",
+ .of_match_table = rpi_firmware_of_match,
+ },
+ .probe = rpi_firmware_probe,
+ .shutdown = rpi_firmware_shutdown,
+ .remove = rpi_firmware_remove,
+};
+module_platform_driver(rpi_firmware_driver);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("Raspberry Pi firmware driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c
new file mode 100644
index 000000000..800673910
--- /dev/null
+++ b/drivers/firmware/scpi_pm_domain.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SCPI Generic power domain support.
+ *
+ * Copyright (C) 2016 ARM Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pm_domain.h>
+#include <linux/scpi_protocol.h>
+
+struct scpi_pm_domain {
+ struct generic_pm_domain genpd;
+ struct scpi_ops *ops;
+ u32 domain;
+};
+
+/*
+ * These device power state values are not well-defined in the specification.
+ * In case, different implementations use different values, we can make these
+ * specific to compatibles rather than getting these values from device tree.
+ */
+enum scpi_power_domain_state {
+ SCPI_PD_STATE_ON = 0,
+ SCPI_PD_STATE_OFF = 3,
+};
+
+#define to_scpi_pd(gpd) container_of(gpd, struct scpi_pm_domain, genpd)
+
+static int scpi_pd_power(struct scpi_pm_domain *pd, bool power_on)
+{
+ int ret;
+ enum scpi_power_domain_state state;
+
+ if (power_on)
+ state = SCPI_PD_STATE_ON;
+ else
+ state = SCPI_PD_STATE_OFF;
+
+ ret = pd->ops->device_set_power_state(pd->domain, state);
+ if (ret)
+ return ret;
+
+ return !(state == pd->ops->device_get_power_state(pd->domain));
+}
+
+static int scpi_pd_power_on(struct generic_pm_domain *domain)
+{
+ struct scpi_pm_domain *pd = to_scpi_pd(domain);
+
+ return scpi_pd_power(pd, true);
+}
+
+static int scpi_pd_power_off(struct generic_pm_domain *domain)
+{
+ struct scpi_pm_domain *pd = to_scpi_pd(domain);
+
+ return scpi_pd_power(pd, false);
+}
+
+static int scpi_pm_domain_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct scpi_pm_domain *scpi_pd;
+ struct genpd_onecell_data *scpi_pd_data;
+ struct generic_pm_domain **domains;
+ struct scpi_ops *scpi_ops;
+ int ret, num_domains, i;
+
+ scpi_ops = get_scpi_ops();
+ if (!scpi_ops)
+ return -EPROBE_DEFER;
+
+ if (!np) {
+ dev_err(dev, "device tree node not found\n");
+ return -ENODEV;
+ }
+
+ if (!scpi_ops->device_set_power_state ||
+ !scpi_ops->device_get_power_state) {
+ dev_err(dev, "power domains not supported in the firmware\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(np, "num-domains", &num_domains);
+ if (ret) {
+ dev_err(dev, "number of domains not found\n");
+ return -EINVAL;
+ }
+
+ scpi_pd = devm_kcalloc(dev, num_domains, sizeof(*scpi_pd), GFP_KERNEL);
+ if (!scpi_pd)
+ return -ENOMEM;
+
+ scpi_pd_data = devm_kzalloc(dev, sizeof(*scpi_pd_data), GFP_KERNEL);
+ if (!scpi_pd_data)
+ return -ENOMEM;
+
+ domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL);
+ if (!domains)
+ return -ENOMEM;
+
+ for (i = 0; i < num_domains; i++, scpi_pd++) {
+ domains[i] = &scpi_pd->genpd;
+
+ scpi_pd->domain = i;
+ scpi_pd->ops = scpi_ops;
+ scpi_pd->genpd.name = devm_kasprintf(dev, GFP_KERNEL,
+ "%pOFn.%d", np, i);
+ if (!scpi_pd->genpd.name) {
+ dev_err(dev, "Failed to allocate genpd name:%pOFn.%d\n",
+ np, i);
+ continue;
+ }
+ scpi_pd->genpd.power_off = scpi_pd_power_off;
+ scpi_pd->genpd.power_on = scpi_pd_power_on;
+
+ /*
+ * Treat all power domains as off at boot.
+ *
+ * The SCP firmware itself may have switched on some domains,
+ * but for reference counting purpose, keep it this way.
+ */
+ pm_genpd_init(&scpi_pd->genpd, NULL, true);
+ }
+
+ scpi_pd_data->domains = domains;
+ scpi_pd_data->num_domains = num_domains;
+
+ of_genpd_add_provider_onecell(np, scpi_pd_data);
+
+ return 0;
+}
+
+static const struct of_device_id scpi_power_domain_ids[] = {
+ { .compatible = "arm,scpi-power-domains", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, scpi_power_domain_ids);
+
+static struct platform_driver scpi_power_domain_driver = {
+ .driver = {
+ .name = "scpi_power_domain",
+ .of_match_table = scpi_power_domain_ids,
+ },
+ .probe = scpi_pm_domain_probe,
+};
+module_platform_driver(scpi_power_domain_driver);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCPI power domain driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/smccc/Kconfig b/drivers/firmware/smccc/Kconfig
new file mode 100644
index 000000000..15e746617
--- /dev/null
+++ b/drivers/firmware/smccc/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config HAVE_ARM_SMCCC
+ bool
+ help
+ Include support for the Secure Monitor Call (SMC) and Hypervisor
+ Call (HVC) instructions on Armv7 and above architectures.
+
+config HAVE_ARM_SMCCC_DISCOVERY
+ bool
+ depends on ARM_PSCI_FW
+ default y
+ help
+ SMCCC v1.0 lacked discoverability and hence PSCI v1.0 was updated
+ to add SMCCC discovery mechanism though the PSCI firmware
+ implementation of PSCI_FEATURES(SMCCC_VERSION) which returns
+ success on firmware compliant to SMCCC v1.1 and above.
+
+config ARM_SMCCC_SOC_ID
+ bool "SoC bus device for the ARM SMCCC SOC_ID"
+ depends on HAVE_ARM_SMCCC_DISCOVERY
+ default y
+ select SOC_BUS
+ help
+ Include support for the SoC bus on the ARM SMCCC firmware based
+ platforms providing some sysfs information about the SoC variant.
diff --git a/drivers/firmware/smccc/Makefile b/drivers/firmware/smccc/Makefile
new file mode 100644
index 000000000..72ab84042
--- /dev/null
+++ b/drivers/firmware/smccc/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o
+obj-$(CONFIG_ARM_SMCCC_SOC_ID) += soc_id.o
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
new file mode 100644
index 000000000..00c88b809
--- /dev/null
+++ b/drivers/firmware/smccc/smccc.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Arm Limited
+ */
+
+#define pr_fmt(fmt) "smccc: " fmt
+
+#include <linux/init.h>
+#include <linux/arm-smccc.h>
+
+static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
+static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE;
+
+void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
+{
+ smccc_version = version;
+ smccc_conduit = conduit;
+}
+
+enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
+{
+ if (smccc_version < ARM_SMCCC_VERSION_1_1)
+ return SMCCC_CONDUIT_NONE;
+
+ return smccc_conduit;
+}
+EXPORT_SYMBOL_GPL(arm_smccc_1_1_get_conduit);
+
+u32 arm_smccc_get_version(void)
+{
+ return smccc_version;
+}
+EXPORT_SYMBOL_GPL(arm_smccc_get_version);
diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c
new file mode 100644
index 000000000..dd7c3d5e8
--- /dev/null
+++ b/drivers/firmware/smccc/soc_id.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Arm Limited
+ */
+
+#define pr_fmt(fmt) "SMCCC: SOC_ID: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#define SMCCC_SOC_ID_JEP106_BANK_IDX_MASK GENMASK(30, 24)
+/*
+ * As per the SMC Calling Convention specification v1.2 (ARM DEN 0028C)
+ * Section 7.4 SMCCC_ARCH_SOC_ID bits[23:16] are JEP-106 identification
+ * code with parity bit for the SiP. We can drop the parity bit.
+ */
+#define SMCCC_SOC_ID_JEP106_ID_CODE_MASK GENMASK(22, 16)
+#define SMCCC_SOC_ID_IMP_DEF_SOC_ID_MASK GENMASK(15, 0)
+
+#define JEP106_BANK_CONT_CODE(x) \
+ (u8)(FIELD_GET(SMCCC_SOC_ID_JEP106_BANK_IDX_MASK, (x)))
+#define JEP106_ID_CODE(x) \
+ (u8)(FIELD_GET(SMCCC_SOC_ID_JEP106_ID_CODE_MASK, (x)))
+#define IMP_DEF_SOC_ID(x) \
+ (u16)(FIELD_GET(SMCCC_SOC_ID_IMP_DEF_SOC_ID_MASK, (x)))
+
+static struct soc_device *soc_dev;
+static struct soc_device_attribute *soc_dev_attr;
+
+static int __init smccc_soc_init(void)
+{
+ struct arm_smccc_res res;
+ int soc_id_rev, soc_id_version;
+ static char soc_id_str[20], soc_id_rev_str[12];
+ static char soc_id_jep106_id_str[12];
+
+ if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2)
+ return 0;
+
+ if (arm_smccc_1_1_get_conduit() == SMCCC_CONDUIT_NONE) {
+ pr_err("%s: invalid SMCCC conduit\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_SOC_ID, &res);
+
+ if ((int)res.a0 == SMCCC_RET_NOT_SUPPORTED) {
+ pr_info("ARCH_SOC_ID not implemented, skipping ....\n");
+ return 0;
+ }
+
+ if ((int)res.a0 < 0) {
+ pr_info("ARCH_FEATURES(ARCH_SOC_ID) returned error: %lx\n",
+ res.a0);
+ return -EINVAL;
+ }
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 0, &res);
+ if ((int)res.a0 < 0) {
+ pr_err("ARCH_SOC_ID(0) returned error: %lx\n", res.a0);
+ return -EINVAL;
+ }
+
+ soc_id_version = res.a0;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 1, &res);
+ if ((int)res.a0 < 0) {
+ pr_err("ARCH_SOC_ID(1) returned error: %lx\n", res.a0);
+ return -EINVAL;
+ }
+
+ soc_id_rev = res.a0;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ sprintf(soc_id_rev_str, "0x%08x", soc_id_rev);
+ sprintf(soc_id_jep106_id_str, "jep106:%02x%02x",
+ JEP106_BANK_CONT_CODE(soc_id_version),
+ JEP106_ID_CODE(soc_id_version));
+ sprintf(soc_id_str, "%s:%04x", soc_id_jep106_id_str,
+ IMP_DEF_SOC_ID(soc_id_version));
+
+ soc_dev_attr->soc_id = soc_id_str;
+ soc_dev_attr->revision = soc_id_rev_str;
+ soc_dev_attr->family = soc_id_jep106_id_str;
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+
+ pr_info("ID = %s Revision = %s\n", soc_dev_attr->soc_id,
+ soc_dev_attr->revision);
+
+ return 0;
+}
+module_init(smccc_soc_init);
+
+static void __exit smccc_soc_exit(void)
+{
+ if (soc_dev)
+ soc_device_unregister(soc_dev);
+ kfree(soc_dev_attr);
+}
+module_exit(smccc_soc_exit);
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
new file mode 100644
index 000000000..9378075d0
--- /dev/null
+++ b/drivers/firmware/stratix10-rsu.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2019, Intel Corporation
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/intel/stratix10-svc-client.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+
+#define RSU_STATE_MASK GENMASK_ULL(31, 0)
+#define RSU_VERSION_MASK GENMASK_ULL(63, 32)
+#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0)
+#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32)
+#define RSU_DCMF0_MASK GENMASK_ULL(31, 0)
+#define RSU_DCMF1_MASK GENMASK_ULL(63, 32)
+#define RSU_DCMF2_MASK GENMASK_ULL(31, 0)
+#define RSU_DCMF3_MASK GENMASK_ULL(63, 32)
+
+#define RSU_TIMEOUT (msecs_to_jiffies(SVC_RSU_REQUEST_TIMEOUT_MS))
+
+#define INVALID_RETRY_COUNTER 0xFF
+#define INVALID_DCMF_VERSION 0xFF
+
+
+typedef void (*rsu_callback)(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data);
+/**
+ * struct stratix10_rsu_priv - rsu data structure
+ * @chan: pointer to the allocated service channel
+ * @client: active service client
+ * @completion: state for callback completion
+ * @lock: a mutex to protect callback completion state
+ * @status.current_image: address of image currently running in flash
+ * @status.fail_image: address of failed image in flash
+ * @status.version: the interface version number of RSU firmware
+ * @status.state: the state of RSU system
+ * @status.error_details: error code
+ * @status.error_location: the error offset inside the image that failed
+ * @dcmf_version.dcmf0: Quartus dcmf0 version
+ * @dcmf_version.dcmf1: Quartus dcmf1 version
+ * @dcmf_version.dcmf2: Quartus dcmf2 version
+ * @dcmf_version.dcmf3: Quartus dcmf3 version
+ * @retry_counter: the current image's retry counter
+ * @max_retry: the preset max retry value
+ */
+struct stratix10_rsu_priv {
+ struct stratix10_svc_chan *chan;
+ struct stratix10_svc_client client;
+ struct completion completion;
+ struct mutex lock;
+ struct {
+ unsigned long current_image;
+ unsigned long fail_image;
+ unsigned int version;
+ unsigned int state;
+ unsigned int error_details;
+ unsigned int error_location;
+ } status;
+
+ struct {
+ unsigned int dcmf0;
+ unsigned int dcmf1;
+ unsigned int dcmf2;
+ unsigned int dcmf3;
+ } dcmf_version;
+
+ unsigned int retry_counter;
+ unsigned int max_retry;
+};
+
+/**
+ * rsu_status_callback() - Status callback from Intel Service Layer
+ * @client: pointer to service client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for RSU status request. Status is
+ * only updated after a system reboot, so a get updated status call is
+ * made during driver probe.
+ */
+static void rsu_status_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ struct arm_smccc_res *res = (struct arm_smccc_res *)data->kaddr1;
+
+ if (data->status == BIT(SVC_STATUS_OK)) {
+ priv->status.version = FIELD_GET(RSU_VERSION_MASK,
+ res->a2);
+ priv->status.state = FIELD_GET(RSU_STATE_MASK, res->a2);
+ priv->status.fail_image = res->a1;
+ priv->status.current_image = res->a0;
+ priv->status.error_location =
+ FIELD_GET(RSU_ERROR_LOCATION_MASK, res->a3);
+ priv->status.error_details =
+ FIELD_GET(RSU_ERROR_DETAIL_MASK, res->a3);
+ } else {
+ dev_err(client->dev, "COMMAND_RSU_STATUS returned 0x%lX\n",
+ res->a0);
+ priv->status.version = 0;
+ priv->status.state = 0;
+ priv->status.fail_image = 0;
+ priv->status.current_image = 0;
+ priv->status.error_location = 0;
+ priv->status.error_details = 0;
+ }
+
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_command_callback() - Update callback from Intel Service Layer
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for RSU commands.
+ */
+static void rsu_command_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+
+ if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
+ dev_warn(client->dev, "FW doesn't support notify\n");
+ else if (data->status == BIT(SVC_STATUS_ERROR))
+ dev_err(client->dev, "Failure, returned status is %lu\n",
+ BIT(data->status));
+
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_retry_callback() - Callback from Intel service layer for getting
+ * the current image's retry counter from the firmware
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for retry counter, which is used by
+ * user to know how many times the images is still allowed to reload
+ * itself before giving up and starting RSU fail-over flow.
+ */
+static void rsu_retry_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned int *counter = (unsigned int *)data->kaddr1;
+
+ if (data->status == BIT(SVC_STATUS_OK))
+ priv->retry_counter = *counter;
+ else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
+ dev_warn(client->dev, "FW doesn't support retry\n");
+ else
+ dev_err(client->dev, "Failed to get retry counter %lu\n",
+ BIT(data->status));
+
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_max_retry_callback() - Callback from Intel service layer for getting
+ * the max retry value from the firmware
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for max retry.
+ */
+static void rsu_max_retry_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned int *max_retry = (unsigned int *)data->kaddr1;
+
+ if (data->status == BIT(SVC_STATUS_OK))
+ priv->max_retry = *max_retry;
+ else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
+ dev_warn(client->dev, "FW doesn't support max retry\n");
+ else
+ dev_err(client->dev, "Failed to get max retry %lu\n",
+ BIT(data->status));
+
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_dcmf_version_callback() - Callback from Intel service layer for getting
+ * the DCMF version
+ * @client: pointer to client
+ * @data: pointer to callback data structure
+ *
+ * Callback from Intel service layer for DCMF version number
+ */
+static void rsu_dcmf_version_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned long long *value1 = (unsigned long long *)data->kaddr1;
+ unsigned long long *value2 = (unsigned long long *)data->kaddr2;
+
+ if (data->status == BIT(SVC_STATUS_OK)) {
+ priv->dcmf_version.dcmf0 = FIELD_GET(RSU_DCMF0_MASK, *value1);
+ priv->dcmf_version.dcmf1 = FIELD_GET(RSU_DCMF1_MASK, *value1);
+ priv->dcmf_version.dcmf2 = FIELD_GET(RSU_DCMF2_MASK, *value2);
+ priv->dcmf_version.dcmf3 = FIELD_GET(RSU_DCMF3_MASK, *value2);
+ } else
+ dev_err(client->dev, "failed to get DCMF version\n");
+
+ complete(&priv->completion);
+}
+
+/**
+ * rsu_send_msg() - send a message to Intel service layer
+ * @priv: pointer to rsu private data
+ * @command: RSU status or update command
+ * @arg: the request argument, the bitstream address or notify status
+ * @callback: function pointer for the callback (status or update)
+ *
+ * Start an Intel service layer transaction to perform the SMC call that
+ * is necessary to get RSU boot log or set the address of bitstream to
+ * boot after reboot.
+ *
+ * Returns 0 on success or -ETIMEDOUT on error.
+ */
+static int rsu_send_msg(struct stratix10_rsu_priv *priv,
+ enum stratix10_svc_command_code command,
+ unsigned long arg,
+ rsu_callback callback)
+{
+ struct stratix10_svc_client_msg msg;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ reinit_completion(&priv->completion);
+ priv->client.receive_cb = callback;
+
+ msg.command = command;
+ if (arg)
+ msg.arg[0] = arg;
+
+ ret = stratix10_svc_send(priv->chan, &msg);
+ if (ret < 0)
+ goto status_done;
+
+ ret = wait_for_completion_interruptible_timeout(&priv->completion,
+ RSU_TIMEOUT);
+ if (!ret) {
+ dev_err(priv->client.dev,
+ "timeout waiting for SMC call\n");
+ ret = -ETIMEDOUT;
+ goto status_done;
+ } else if (ret < 0) {
+ dev_err(priv->client.dev,
+ "error %d waiting for SMC call\n", ret);
+ goto status_done;
+ } else {
+ ret = 0;
+ }
+
+status_done:
+ stratix10_svc_done(priv->chan);
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+/*
+ * This driver exposes some optional features of the Intel Stratix 10 SoC FPGA.
+ * The sysfs interfaces exposed here are FPGA Remote System Update (RSU)
+ * related. They allow user space software to query the configuration system
+ * status and to request optional reboot behavior specific to Intel FPGAs.
+ */
+
+static ssize_t current_image_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08lx\n", priv->status.current_image);
+}
+
+static ssize_t fail_image_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08lx\n", priv->status.fail_image);
+}
+
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->status.version);
+}
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->status.state);
+}
+
+static ssize_t error_location_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->status.error_location);
+}
+
+static ssize_t error_details_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->status.error_details);
+}
+
+static ssize_t retry_counter_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->retry_counter);
+}
+
+static ssize_t max_retry_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->max_retry);
+}
+
+static ssize_t dcmf0_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf0);
+}
+
+static ssize_t dcmf1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf1);
+}
+
+static ssize_t dcmf2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf2);
+}
+
+static ssize_t dcmf3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf3);
+}
+
+static ssize_t reboot_image_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+ unsigned long address;
+ int ret;
+
+ if (!priv)
+ return -ENODEV;
+
+ ret = kstrtoul(buf, 0, &address);
+ if (ret)
+ return ret;
+
+ ret = rsu_send_msg(priv, COMMAND_RSU_UPDATE,
+ address, rsu_command_callback);
+ if (ret) {
+ dev_err(dev, "Error, RSU update returned %i\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+
+static ssize_t notify_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+ unsigned long status;
+ int ret;
+
+ if (!priv)
+ return -ENODEV;
+
+ ret = kstrtoul(buf, 0, &status);
+ if (ret)
+ return ret;
+
+ ret = rsu_send_msg(priv, COMMAND_RSU_NOTIFY,
+ status, rsu_command_callback);
+ if (ret) {
+ dev_err(dev, "Error, RSU notify returned %i\n", ret);
+ return ret;
+ }
+
+ /* to get the updated state */
+ ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
+ 0, rsu_status_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU status %i\n", ret);
+ return ret;
+ }
+
+ ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU retry %i\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(current_image);
+static DEVICE_ATTR_RO(fail_image);
+static DEVICE_ATTR_RO(state);
+static DEVICE_ATTR_RO(version);
+static DEVICE_ATTR_RO(error_location);
+static DEVICE_ATTR_RO(error_details);
+static DEVICE_ATTR_RO(retry_counter);
+static DEVICE_ATTR_RO(max_retry);
+static DEVICE_ATTR_RO(dcmf0);
+static DEVICE_ATTR_RO(dcmf1);
+static DEVICE_ATTR_RO(dcmf2);
+static DEVICE_ATTR_RO(dcmf3);
+static DEVICE_ATTR_WO(reboot_image);
+static DEVICE_ATTR_WO(notify);
+
+static struct attribute *rsu_attrs[] = {
+ &dev_attr_current_image.attr,
+ &dev_attr_fail_image.attr,
+ &dev_attr_state.attr,
+ &dev_attr_version.attr,
+ &dev_attr_error_location.attr,
+ &dev_attr_error_details.attr,
+ &dev_attr_retry_counter.attr,
+ &dev_attr_max_retry.attr,
+ &dev_attr_dcmf0.attr,
+ &dev_attr_dcmf1.attr,
+ &dev_attr_dcmf2.attr,
+ &dev_attr_dcmf3.attr,
+ &dev_attr_reboot_image.attr,
+ &dev_attr_notify.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(rsu);
+
+static int stratix10_rsu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stratix10_rsu_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client.dev = dev;
+ priv->client.receive_cb = NULL;
+ priv->client.priv = priv;
+ priv->status.current_image = 0;
+ priv->status.fail_image = 0;
+ priv->status.error_location = 0;
+ priv->status.error_details = 0;
+ priv->status.version = 0;
+ priv->status.state = 0;
+ priv->retry_counter = INVALID_RETRY_COUNTER;
+ priv->dcmf_version.dcmf0 = INVALID_DCMF_VERSION;
+ priv->dcmf_version.dcmf1 = INVALID_DCMF_VERSION;
+ priv->dcmf_version.dcmf2 = INVALID_DCMF_VERSION;
+ priv->dcmf_version.dcmf3 = INVALID_DCMF_VERSION;
+ priv->max_retry = INVALID_RETRY_COUNTER;
+
+ mutex_init(&priv->lock);
+ priv->chan = stratix10_svc_request_channel_byname(&priv->client,
+ SVC_CLIENT_RSU);
+ if (IS_ERR(priv->chan)) {
+ dev_err(dev, "couldn't get service channel %s\n",
+ SVC_CLIENT_RSU);
+ return PTR_ERR(priv->chan);
+ }
+
+ init_completion(&priv->completion);
+ platform_set_drvdata(pdev, priv);
+
+ /* get the initial state from firmware */
+ ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
+ 0, rsu_status_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU status %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
+ /* get DCMF version from firmware */
+ ret = rsu_send_msg(priv, COMMAND_RSU_DCMF_VERSION,
+ 0, rsu_dcmf_version_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting DCMF version %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
+ ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU retry %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
+ ret = rsu_send_msg(priv, COMMAND_RSU_MAX_RETRY, 0,
+ rsu_max_retry_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU max retry %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+
+ return ret;
+}
+
+static int stratix10_rsu_remove(struct platform_device *pdev)
+{
+ struct stratix10_rsu_priv *priv = platform_get_drvdata(pdev);
+
+ stratix10_svc_free_channel(priv->chan);
+ return 0;
+}
+
+static struct platform_driver stratix10_rsu_driver = {
+ .probe = stratix10_rsu_probe,
+ .remove = stratix10_rsu_remove,
+ .driver = {
+ .name = "stratix10-rsu",
+ .dev_groups = rsu_groups,
+ },
+};
+
+module_platform_driver(stratix10_rsu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Remote System Update Driver");
+MODULE_AUTHOR("Richard Gong <richard.gong@intel.com>");
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
new file mode 100644
index 000000000..cd0f12b2b
--- /dev/null
+++ b/drivers/firmware/stratix10-svc.c
@@ -0,0 +1,1127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017-2018, Intel Corporation
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/firmware/intel/stratix10-smc.h>
+#include <linux/firmware/intel/stratix10-svc-client.h>
+#include <linux/types.h>
+
+/**
+ * SVC_NUM_DATA_IN_FIFO - number of struct stratix10_svc_data in the FIFO
+ *
+ * SVC_NUM_CHANNEL - number of channel supported by service layer driver
+ *
+ * FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS - claim back the submitted buffer(s)
+ * from the secure world for FPGA manager to reuse, or to free the buffer(s)
+ * when all bit-stream data had be send.
+ *
+ * FPGA_CONFIG_STATUS_TIMEOUT_SEC - poll the FPGA configuration status,
+ * service layer will return error to FPGA manager when timeout occurs,
+ * timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC.
+ */
+#define SVC_NUM_DATA_IN_FIFO 32
+#define SVC_NUM_CHANNEL 2
+#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200
+#define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30
+
+/* stratix10 service layer clients */
+#define STRATIX10_RSU "stratix10-rsu"
+
+typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long,
+ struct arm_smccc_res *);
+struct stratix10_svc_chan;
+
+/**
+ * struct stratix10_svc - svc private data
+ * @stratix10_svc_rsu: pointer to stratix10 RSU device
+ */
+struct stratix10_svc {
+ struct platform_device *stratix10_svc_rsu;
+};
+
+/**
+ * struct stratix10_svc_sh_memory - service shared memory structure
+ * @sync_complete: state for a completion
+ * @addr: physical address of shared memory block
+ * @size: size of shared memory block
+ * @invoke_fn: function to issue secure monitor or hypervisor call
+ *
+ * This struct is used to save physical address and size of shared memory
+ * block. The shared memory blocked is allocated by secure monitor software
+ * at secure world.
+ *
+ * Service layer driver uses the physical address and size to create a memory
+ * pool, then allocates data buffer from that memory pool for service client.
+ */
+struct stratix10_svc_sh_memory {
+ struct completion sync_complete;
+ unsigned long addr;
+ unsigned long size;
+ svc_invoke_fn *invoke_fn;
+};
+
+/**
+ * struct stratix10_svc_data_mem - service memory structure
+ * @vaddr: virtual address
+ * @paddr: physical address
+ * @size: size of memory
+ * @node: link list head node
+ *
+ * This struct is used in a list that keeps track of buffers which have
+ * been allocated or freed from the memory pool. Service layer driver also
+ * uses this struct to transfer physical address to virtual address.
+ */
+struct stratix10_svc_data_mem {
+ void *vaddr;
+ phys_addr_t paddr;
+ size_t size;
+ struct list_head node;
+};
+
+/**
+ * struct stratix10_svc_data - service data structure
+ * @chan: service channel
+ * @paddr: playload physical address
+ * @size: playload size
+ * @command: service command requested by client
+ * @flag: configuration type (full or partial)
+ * @arg: args to be passed via registers and not physically mapped buffers
+ *
+ * This struct is used in service FIFO for inter-process communication.
+ */
+struct stratix10_svc_data {
+ struct stratix10_svc_chan *chan;
+ phys_addr_t paddr;
+ size_t size;
+ u32 command;
+ u32 flag;
+ u64 arg[3];
+};
+
+/**
+ * struct stratix10_svc_controller - service controller
+ * @dev: device
+ * @chans: array of service channels
+ * @num_chans: number of channels in 'chans' array
+ * @num_active_client: number of active service client
+ * @node: list management
+ * @genpool: memory pool pointing to the memory region
+ * @task: pointer to the thread task which handles SMC or HVC call
+ * @svc_fifo: a queue for storing service message data
+ * @complete_status: state for completion
+ * @svc_fifo_lock: protect access to service message data queue
+ * @invoke_fn: function to issue secure monitor call or hypervisor call
+ *
+ * This struct is used to create communication channels for service clients, to
+ * handle secure monitor or hypervisor call.
+ */
+struct stratix10_svc_controller {
+ struct device *dev;
+ struct stratix10_svc_chan *chans;
+ int num_chans;
+ int num_active_client;
+ struct list_head node;
+ struct gen_pool *genpool;
+ struct task_struct *task;
+ struct kfifo svc_fifo;
+ struct completion complete_status;
+ spinlock_t svc_fifo_lock;
+ svc_invoke_fn *invoke_fn;
+};
+
+/**
+ * struct stratix10_svc_chan - service communication channel
+ * @ctrl: pointer to service controller which is the provider of this channel
+ * @scl: pointer to service client which owns the channel
+ * @name: service client name associated with the channel
+ * @lock: protect access to the channel
+ *
+ * This struct is used by service client to communicate with service layer, each
+ * service client has its own channel created by service controller.
+ */
+struct stratix10_svc_chan {
+ struct stratix10_svc_controller *ctrl;
+ struct stratix10_svc_client *scl;
+ char *name;
+ spinlock_t lock;
+};
+
+static LIST_HEAD(svc_ctrl);
+static LIST_HEAD(svc_data_mem);
+
+/**
+ * svc_pa_to_va() - translate physical address to virtual address
+ * @addr: to be translated physical address
+ *
+ * Return: valid virtual address or NULL if the provided physical
+ * address doesn't exist.
+ */
+static void *svc_pa_to_va(unsigned long addr)
+{
+ struct stratix10_svc_data_mem *pmem;
+
+ pr_debug("claim back P-addr=0x%016x\n", (unsigned int)addr);
+ list_for_each_entry(pmem, &svc_data_mem, node)
+ if (pmem->paddr == addr)
+ return pmem->vaddr;
+
+ /* physical address is not found */
+ return NULL;
+}
+
+/**
+ * svc_thread_cmd_data_claim() - claim back buffer from the secure world
+ * @ctrl: pointer to service layer controller
+ * @p_data: pointer to service data structure
+ * @cb_data: pointer to callback data structure to service client
+ *
+ * Claim back the submitted buffers from the secure world and pass buffer
+ * back to service client (FPGA manager, etc) for reuse.
+ */
+static void svc_thread_cmd_data_claim(struct stratix10_svc_controller *ctrl,
+ struct stratix10_svc_data *p_data,
+ struct stratix10_svc_cb_data *cb_data)
+{
+ struct arm_smccc_res res;
+ unsigned long timeout;
+
+ reinit_completion(&ctrl->complete_status);
+ timeout = msecs_to_jiffies(FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS);
+
+ pr_debug("%s: claim back the submitted buffer\n", __func__);
+ do {
+ ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE,
+ 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 == INTEL_SIP_SMC_STATUS_OK) {
+ if (!res.a1) {
+ complete(&ctrl->complete_status);
+ break;
+ }
+ cb_data->status = BIT(SVC_STATUS_BUFFER_DONE);
+ cb_data->kaddr1 = svc_pa_to_va(res.a1);
+ cb_data->kaddr2 = (res.a2) ?
+ svc_pa_to_va(res.a2) : NULL;
+ cb_data->kaddr3 = (res.a3) ?
+ svc_pa_to_va(res.a3) : NULL;
+ p_data->chan->scl->receive_cb(p_data->chan->scl,
+ cb_data);
+ } else {
+ pr_debug("%s: secure world busy, polling again\n",
+ __func__);
+ }
+ } while (res.a0 == INTEL_SIP_SMC_STATUS_OK ||
+ res.a0 == INTEL_SIP_SMC_STATUS_BUSY ||
+ wait_for_completion_timeout(&ctrl->complete_status, timeout));
+}
+
+/**
+ * svc_thread_cmd_config_status() - check configuration status
+ * @ctrl: pointer to service layer controller
+ * @p_data: pointer to service data structure
+ * @cb_data: pointer to callback data structure to service client
+ *
+ * Check whether the secure firmware at secure world has finished the FPGA
+ * configuration, and then inform FPGA manager the configuration status.
+ */
+static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
+ struct stratix10_svc_data *p_data,
+ struct stratix10_svc_cb_data *cb_data)
+{
+ struct arm_smccc_res res;
+ int count_in_sec;
+
+ cb_data->kaddr1 = NULL;
+ cb_data->kaddr2 = NULL;
+ cb_data->kaddr3 = NULL;
+ cb_data->status = BIT(SVC_STATUS_ERROR);
+
+ pr_debug("%s: polling config status\n", __func__);
+
+ count_in_sec = FPGA_CONFIG_STATUS_TIMEOUT_SEC;
+ while (count_in_sec) {
+ ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_ISDONE,
+ 0, 0, 0, 0, 0, 0, 0, &res);
+ if ((res.a0 == INTEL_SIP_SMC_STATUS_OK) ||
+ (res.a0 == INTEL_SIP_SMC_STATUS_ERROR))
+ break;
+
+ /*
+ * configuration is still in progress, wait one second then
+ * poll again
+ */
+ msleep(1000);
+ count_in_sec--;
+ }
+
+ if (res.a0 == INTEL_SIP_SMC_STATUS_OK && count_in_sec)
+ cb_data->status = BIT(SVC_STATUS_COMPLETED);
+
+ p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data);
+}
+
+/**
+ * svc_thread_recv_status_ok() - handle the successful status
+ * @p_data: pointer to service data structure
+ * @cb_data: pointer to callback data structure to service client
+ * @res: result from SMC or HVC call
+ *
+ * Send back the correspond status to the service clients.
+ */
+static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
+ struct stratix10_svc_cb_data *cb_data,
+ struct arm_smccc_res res)
+{
+ cb_data->kaddr1 = NULL;
+ cb_data->kaddr2 = NULL;
+ cb_data->kaddr3 = NULL;
+
+ switch (p_data->command) {
+ case COMMAND_RECONFIG:
+ case COMMAND_RSU_UPDATE:
+ case COMMAND_RSU_NOTIFY:
+ cb_data->status = BIT(SVC_STATUS_OK);
+ break;
+ case COMMAND_RECONFIG_DATA_SUBMIT:
+ cb_data->status = BIT(SVC_STATUS_BUFFER_SUBMITTED);
+ break;
+ case COMMAND_RECONFIG_STATUS:
+ cb_data->status = BIT(SVC_STATUS_COMPLETED);
+ break;
+ case COMMAND_RSU_RETRY:
+ case COMMAND_RSU_MAX_RETRY:
+ cb_data->status = BIT(SVC_STATUS_OK);
+ cb_data->kaddr1 = &res.a1;
+ break;
+ case COMMAND_RSU_DCMF_VERSION:
+ cb_data->status = BIT(SVC_STATUS_OK);
+ cb_data->kaddr1 = &res.a1;
+ cb_data->kaddr2 = &res.a2;
+ break;
+ default:
+ pr_warn("it shouldn't happen\n");
+ break;
+ }
+
+ pr_debug("%s: call receive_cb\n", __func__);
+ p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data);
+}
+
+/**
+ * svc_normal_to_secure_thread() - the function to run in the kthread
+ * @data: data pointer for kthread function
+ *
+ * Service layer driver creates stratix10_svc_smc_hvc_call kthread on CPU
+ * node 0, its function stratix10_svc_secure_call_thread is used to handle
+ * SMC or HVC calls between kernel driver and secure monitor software.
+ *
+ * Return: 0 for success or -ENOMEM on error.
+ */
+static int svc_normal_to_secure_thread(void *data)
+{
+ struct stratix10_svc_controller
+ *ctrl = (struct stratix10_svc_controller *)data;
+ struct stratix10_svc_data *pdata;
+ struct stratix10_svc_cb_data *cbdata;
+ struct arm_smccc_res res;
+ unsigned long a0, a1, a2;
+ int ret_fifo = 0;
+
+ pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ cbdata = kmalloc(sizeof(*cbdata), GFP_KERNEL);
+ if (!cbdata) {
+ kfree(pdata);
+ return -ENOMEM;
+ }
+
+ /* default set, to remove build warning */
+ a0 = INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK;
+ a1 = 0;
+ a2 = 0;
+
+ pr_debug("smc_hvc_shm_thread is running\n");
+
+ while (!kthread_should_stop()) {
+ ret_fifo = kfifo_out_spinlocked(&ctrl->svc_fifo,
+ pdata, sizeof(*pdata),
+ &ctrl->svc_fifo_lock);
+
+ if (!ret_fifo)
+ continue;
+
+ pr_debug("get from FIFO pa=0x%016x, command=%u, size=%u\n",
+ (unsigned int)pdata->paddr, pdata->command,
+ (unsigned int)pdata->size);
+
+ switch (pdata->command) {
+ case COMMAND_RECONFIG_DATA_CLAIM:
+ svc_thread_cmd_data_claim(ctrl, pdata, cbdata);
+ continue;
+ case COMMAND_RECONFIG:
+ a0 = INTEL_SIP_SMC_FPGA_CONFIG_START;
+ pr_debug("conf_type=%u\n", (unsigned int)pdata->flag);
+ a1 = pdata->flag;
+ a2 = 0;
+ break;
+ case COMMAND_RECONFIG_DATA_SUBMIT:
+ a0 = INTEL_SIP_SMC_FPGA_CONFIG_WRITE;
+ a1 = (unsigned long)pdata->paddr;
+ a2 = (unsigned long)pdata->size;
+ break;
+ case COMMAND_RECONFIG_STATUS:
+ a0 = INTEL_SIP_SMC_FPGA_CONFIG_ISDONE;
+ a1 = 0;
+ a2 = 0;
+ break;
+ case COMMAND_RSU_STATUS:
+ a0 = INTEL_SIP_SMC_RSU_STATUS;
+ a1 = 0;
+ a2 = 0;
+ break;
+ case COMMAND_RSU_UPDATE:
+ a0 = INTEL_SIP_SMC_RSU_UPDATE;
+ a1 = pdata->arg[0];
+ a2 = 0;
+ break;
+ case COMMAND_RSU_NOTIFY:
+ a0 = INTEL_SIP_SMC_RSU_NOTIFY;
+ a1 = pdata->arg[0];
+ a2 = 0;
+ break;
+ case COMMAND_RSU_RETRY:
+ a0 = INTEL_SIP_SMC_RSU_RETRY_COUNTER;
+ a1 = 0;
+ a2 = 0;
+ break;
+ case COMMAND_RSU_MAX_RETRY:
+ a0 = INTEL_SIP_SMC_RSU_MAX_RETRY;
+ a1 = 0;
+ a2 = 0;
+ break;
+ case COMMAND_RSU_DCMF_VERSION:
+ a0 = INTEL_SIP_SMC_RSU_DCMF_VERSION;
+ a1 = 0;
+ a2 = 0;
+ break;
+ default:
+ pr_warn("it shouldn't happen\n");
+ break;
+ }
+ pr_debug("%s: before SMC call -- a0=0x%016x a1=0x%016x",
+ __func__, (unsigned int)a0, (unsigned int)a1);
+ pr_debug(" a2=0x%016x\n", (unsigned int)a2);
+
+ ctrl->invoke_fn(a0, a1, a2, 0, 0, 0, 0, 0, &res);
+
+ pr_debug("%s: after SMC call -- res.a0=0x%016x",
+ __func__, (unsigned int)res.a0);
+ pr_debug(" res.a1=0x%016x, res.a2=0x%016x",
+ (unsigned int)res.a1, (unsigned int)res.a2);
+ pr_debug(" res.a3=0x%016x\n", (unsigned int)res.a3);
+
+ if (pdata->command == COMMAND_RSU_STATUS) {
+ if (res.a0 == INTEL_SIP_SMC_RSU_ERROR)
+ cbdata->status = BIT(SVC_STATUS_ERROR);
+ else
+ cbdata->status = BIT(SVC_STATUS_OK);
+
+ cbdata->kaddr1 = &res;
+ cbdata->kaddr2 = NULL;
+ cbdata->kaddr3 = NULL;
+ pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
+ continue;
+ }
+
+ switch (res.a0) {
+ case INTEL_SIP_SMC_STATUS_OK:
+ svc_thread_recv_status_ok(pdata, cbdata, res);
+ break;
+ case INTEL_SIP_SMC_STATUS_BUSY:
+ switch (pdata->command) {
+ case COMMAND_RECONFIG_DATA_SUBMIT:
+ svc_thread_cmd_data_claim(ctrl,
+ pdata, cbdata);
+ break;
+ case COMMAND_RECONFIG_STATUS:
+ svc_thread_cmd_config_status(ctrl,
+ pdata, cbdata);
+ break;
+ default:
+ pr_warn("it shouldn't happen\n");
+ break;
+ }
+ break;
+ case INTEL_SIP_SMC_STATUS_REJECTED:
+ pr_debug("%s: STATUS_REJECTED\n", __func__);
+ break;
+ case INTEL_SIP_SMC_STATUS_ERROR:
+ case INTEL_SIP_SMC_RSU_ERROR:
+ pr_err("%s: STATUS_ERROR\n", __func__);
+ cbdata->status = BIT(SVC_STATUS_ERROR);
+ cbdata->kaddr1 = &res.a1;
+ cbdata->kaddr2 = NULL;
+ cbdata->kaddr3 = NULL;
+ pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
+ break;
+ default:
+ pr_warn("Secure firmware doesn't support...\n");
+
+ /*
+ * be compatible with older version firmware which
+ * doesn't support RSU notify or retry
+ */
+ if ((pdata->command == COMMAND_RSU_RETRY) ||
+ (pdata->command == COMMAND_RSU_MAX_RETRY) ||
+ (pdata->command == COMMAND_RSU_NOTIFY)) {
+ cbdata->status =
+ BIT(SVC_STATUS_NO_SUPPORT);
+ cbdata->kaddr1 = NULL;
+ cbdata->kaddr2 = NULL;
+ cbdata->kaddr3 = NULL;
+ pdata->chan->scl->receive_cb(
+ pdata->chan->scl, cbdata);
+ }
+ break;
+
+ }
+ }
+
+ kfree(cbdata);
+ kfree(pdata);
+
+ return 0;
+}
+
+/**
+ * svc_normal_to_secure_shm_thread() - the function to run in the kthread
+ * @data: data pointer for kthread function
+ *
+ * Service layer driver creates stratix10_svc_smc_hvc_shm kthread on CPU
+ * node 0, its function stratix10_svc_secure_shm_thread is used to query the
+ * physical address of memory block reserved by secure monitor software at
+ * secure world.
+ *
+ * svc_normal_to_secure_shm_thread() calls do_exit() directly since it is a
+ * standlone thread for which no one will call kthread_stop() or return when
+ * 'kthread_should_stop()' is true.
+ */
+static int svc_normal_to_secure_shm_thread(void *data)
+{
+ struct stratix10_svc_sh_memory
+ *sh_mem = (struct stratix10_svc_sh_memory *)data;
+ struct arm_smccc_res res;
+
+ /* SMC or HVC call to get shared memory info from secure world */
+ sh_mem->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM,
+ 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 == INTEL_SIP_SMC_STATUS_OK) {
+ sh_mem->addr = res.a1;
+ sh_mem->size = res.a2;
+ } else {
+ pr_err("%s: after SMC call -- res.a0=0x%016x", __func__,
+ (unsigned int)res.a0);
+ sh_mem->addr = 0;
+ sh_mem->size = 0;
+ }
+
+ complete(&sh_mem->sync_complete);
+ do_exit(0);
+}
+
+/**
+ * svc_get_sh_memory() - get memory block reserved by secure monitor SW
+ * @pdev: pointer to service layer device
+ * @sh_memory: pointer to service shared memory structure
+ *
+ * Return: zero for successfully getting the physical address of memory block
+ * reserved by secure monitor software, or negative value on error.
+ */
+static int svc_get_sh_memory(struct platform_device *pdev,
+ struct stratix10_svc_sh_memory *sh_memory)
+{
+ struct device *dev = &pdev->dev;
+ struct task_struct *sh_memory_task;
+ unsigned int cpu = 0;
+
+ init_completion(&sh_memory->sync_complete);
+
+ /* smc or hvc call happens on cpu 0 bound kthread */
+ sh_memory_task = kthread_create_on_node(svc_normal_to_secure_shm_thread,
+ (void *)sh_memory,
+ cpu_to_node(cpu),
+ "svc_smc_hvc_shm_thread");
+ if (IS_ERR(sh_memory_task)) {
+ dev_err(dev, "fail to create stratix10_svc_smc_shm_thread\n");
+ return -EINVAL;
+ }
+
+ wake_up_process(sh_memory_task);
+
+ if (!wait_for_completion_timeout(&sh_memory->sync_complete, 10 * HZ)) {
+ dev_err(dev,
+ "timeout to get sh-memory paras from secure world\n");
+ return -ETIMEDOUT;
+ }
+
+ if (!sh_memory->addr || !sh_memory->size) {
+ dev_err(dev,
+ "failed to get shared memory info from secure world\n");
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "SM software provides paddr: 0x%016x, size: 0x%08x\n",
+ (unsigned int)sh_memory->addr,
+ (unsigned int)sh_memory->size);
+
+ return 0;
+}
+
+/**
+ * svc_create_memory_pool() - create a memory pool from reserved memory block
+ * @pdev: pointer to service layer device
+ * @sh_memory: pointer to service shared memory structure
+ *
+ * Return: pool allocated from reserved memory block or ERR_PTR() on error.
+ */
+static struct gen_pool *
+svc_create_memory_pool(struct platform_device *pdev,
+ struct stratix10_svc_sh_memory *sh_memory)
+{
+ struct device *dev = &pdev->dev;
+ struct gen_pool *genpool;
+ unsigned long vaddr;
+ phys_addr_t paddr;
+ size_t size;
+ phys_addr_t begin;
+ phys_addr_t end;
+ void *va;
+ size_t page_mask = PAGE_SIZE - 1;
+ int min_alloc_order = 3;
+ int ret;
+
+ begin = roundup(sh_memory->addr, PAGE_SIZE);
+ end = rounddown(sh_memory->addr + sh_memory->size, PAGE_SIZE);
+ paddr = begin;
+ size = end - begin;
+ va = devm_memremap(dev, paddr, size, MEMREMAP_WC);
+ if (IS_ERR(va)) {
+ dev_err(dev, "fail to remap shared memory\n");
+ return ERR_PTR(-EINVAL);
+ }
+ vaddr = (unsigned long)va;
+ dev_dbg(dev,
+ "reserved memory vaddr: %p, paddr: 0x%16x size: 0x%8x\n",
+ va, (unsigned int)paddr, (unsigned int)size);
+ if ((vaddr & page_mask) || (paddr & page_mask) ||
+ (size & page_mask)) {
+ dev_err(dev, "page is not aligned\n");
+ return ERR_PTR(-EINVAL);
+ }
+ genpool = gen_pool_create(min_alloc_order, -1);
+ if (!genpool) {
+ dev_err(dev, "fail to create genpool\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ gen_pool_set_algo(genpool, gen_pool_best_fit, NULL);
+ ret = gen_pool_add_virt(genpool, vaddr, paddr, size, -1);
+ if (ret) {
+ dev_err(dev, "fail to add memory chunk to the pool\n");
+ gen_pool_destroy(genpool);
+ return ERR_PTR(ret);
+ }
+
+ return genpool;
+}
+
+/**
+ * svc_smccc_smc() - secure monitor call between normal and secure world
+ * @a0: argument passed in registers 0
+ * @a1: argument passed in registers 1
+ * @a2: argument passed in registers 2
+ * @a3: argument passed in registers 3
+ * @a4: argument passed in registers 4
+ * @a5: argument passed in registers 5
+ * @a6: argument passed in registers 6
+ * @a7: argument passed in registers 7
+ * @res: result values from register 0 to 3
+ */
+static void svc_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+/**
+ * svc_smccc_hvc() - hypervisor call between normal and secure world
+ * @a0: argument passed in registers 0
+ * @a1: argument passed in registers 1
+ * @a2: argument passed in registers 2
+ * @a3: argument passed in registers 3
+ * @a4: argument passed in registers 4
+ * @a5: argument passed in registers 5
+ * @a6: argument passed in registers 6
+ * @a7: argument passed in registers 7
+ * @res: result values from register 0 to 3
+ */
+static void svc_smccc_hvc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+/**
+ * get_invoke_func() - invoke SMC or HVC call
+ * @dev: pointer to device
+ *
+ * Return: function pointer to svc_smccc_smc or svc_smccc_hvc.
+ */
+static svc_invoke_fn *get_invoke_func(struct device *dev)
+{
+ const char *method;
+
+ if (of_property_read_string(dev->of_node, "method", &method)) {
+ dev_warn(dev, "missing \"method\" property\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ if (!strcmp(method, "smc"))
+ return svc_smccc_smc;
+ if (!strcmp(method, "hvc"))
+ return svc_smccc_hvc;
+
+ dev_warn(dev, "invalid \"method\" property: %s\n", method);
+
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * stratix10_svc_request_channel_byname() - request a service channel
+ * @client: pointer to service client
+ * @name: service client name
+ *
+ * This function is used by service client to request a service channel.
+ *
+ * Return: a pointer to channel assigned to the client on success,
+ * or ERR_PTR() on error.
+ */
+struct stratix10_svc_chan *stratix10_svc_request_channel_byname(
+ struct stratix10_svc_client *client, const char *name)
+{
+ struct device *dev = client->dev;
+ struct stratix10_svc_controller *controller;
+ struct stratix10_svc_chan *chan = NULL;
+ unsigned long flag;
+ int i;
+
+ /* if probe was called after client's, or error on probe */
+ if (list_empty(&svc_ctrl))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ controller = list_first_entry(&svc_ctrl,
+ struct stratix10_svc_controller, node);
+ for (i = 0; i < SVC_NUM_CHANNEL; i++) {
+ if (!strcmp(controller->chans[i].name, name)) {
+ chan = &controller->chans[i];
+ break;
+ }
+ }
+
+ /* if there was no channel match */
+ if (i == SVC_NUM_CHANNEL) {
+ dev_err(dev, "%s: channel not allocated\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (chan->scl || !try_module_get(controller->dev->driver->owner)) {
+ dev_dbg(dev, "%s: svc not free\n", __func__);
+ return ERR_PTR(-EBUSY);
+ }
+
+ spin_lock_irqsave(&chan->lock, flag);
+ chan->scl = client;
+ chan->ctrl->num_active_client++;
+ spin_unlock_irqrestore(&chan->lock, flag);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_request_channel_byname);
+
+/**
+ * stratix10_svc_free_channel() - free service channel
+ * @chan: service channel to be freed
+ *
+ * This function is used by service client to free a service channel.
+ */
+void stratix10_svc_free_channel(struct stratix10_svc_chan *chan)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&chan->lock, flag);
+ chan->scl = NULL;
+ chan->ctrl->num_active_client--;
+ module_put(chan->ctrl->dev->driver->owner);
+ spin_unlock_irqrestore(&chan->lock, flag);
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_free_channel);
+
+/**
+ * stratix10_svc_send() - send a message data to the remote
+ * @chan: service channel assigned to the client
+ * @msg: message data to be sent, in the format of
+ * "struct stratix10_svc_client_msg"
+ *
+ * This function is used by service client to add a message to the service
+ * layer driver's queue for being sent to the secure world.
+ *
+ * Return: 0 for success, -ENOMEM or -ENOBUFS on error.
+ */
+int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
+{
+ struct stratix10_svc_client_msg
+ *p_msg = (struct stratix10_svc_client_msg *)msg;
+ struct stratix10_svc_data_mem *p_mem;
+ struct stratix10_svc_data *p_data;
+ int ret = 0;
+ unsigned int cpu = 0;
+
+ p_data = kzalloc(sizeof(*p_data), GFP_KERNEL);
+ if (!p_data)
+ return -ENOMEM;
+
+ /* first client will create kernel thread */
+ if (!chan->ctrl->task) {
+ chan->ctrl->task =
+ kthread_create_on_node(svc_normal_to_secure_thread,
+ (void *)chan->ctrl,
+ cpu_to_node(cpu),
+ "svc_smc_hvc_thread");
+ if (IS_ERR(chan->ctrl->task)) {
+ dev_err(chan->ctrl->dev,
+ "failed to create svc_smc_hvc_thread\n");
+ kfree(p_data);
+ return -EINVAL;
+ }
+ kthread_bind(chan->ctrl->task, cpu);
+ wake_up_process(chan->ctrl->task);
+ }
+
+ pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__,
+ p_msg->payload, p_msg->command,
+ (unsigned int)p_msg->payload_length);
+
+ if (list_empty(&svc_data_mem)) {
+ if (p_msg->command == COMMAND_RECONFIG) {
+ struct stratix10_svc_command_config_type *ct =
+ (struct stratix10_svc_command_config_type *)
+ p_msg->payload;
+ p_data->flag = ct->flags;
+ }
+ } else {
+ list_for_each_entry(p_mem, &svc_data_mem, node)
+ if (p_mem->vaddr == p_msg->payload) {
+ p_data->paddr = p_mem->paddr;
+ break;
+ }
+ }
+
+ p_data->command = p_msg->command;
+ p_data->arg[0] = p_msg->arg[0];
+ p_data->arg[1] = p_msg->arg[1];
+ p_data->arg[2] = p_msg->arg[2];
+ p_data->size = p_msg->payload_length;
+ p_data->chan = chan;
+ pr_debug("%s: put to FIFO pa=0x%016x, cmd=%x, size=%u\n", __func__,
+ (unsigned int)p_data->paddr, p_data->command,
+ (unsigned int)p_data->size);
+ ret = kfifo_in_spinlocked(&chan->ctrl->svc_fifo, p_data,
+ sizeof(*p_data),
+ &chan->ctrl->svc_fifo_lock);
+
+ kfree(p_data);
+
+ if (!ret)
+ return -ENOBUFS;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_send);
+
+/**
+ * stratix10_svc_done() - complete service request transactions
+ * @chan: service channel assigned to the client
+ *
+ * This function should be called when client has finished its request
+ * or there is an error in the request process. It allows the service layer
+ * to stop the running thread to have maximize savings in kernel resources.
+ */
+void stratix10_svc_done(struct stratix10_svc_chan *chan)
+{
+ /* stop thread when thread is running AND only one active client */
+ if (chan->ctrl->task && chan->ctrl->num_active_client <= 1) {
+ pr_debug("svc_smc_hvc_shm_thread is stopped\n");
+ kthread_stop(chan->ctrl->task);
+ chan->ctrl->task = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_done);
+
+/**
+ * stratix10_svc_allocate_memory() - allocate memory
+ * @chan: service channel assigned to the client
+ * @size: memory size requested by a specific service client
+ *
+ * Service layer allocates the requested number of bytes buffer from the
+ * memory pool, service client uses this function to get allocated buffers.
+ *
+ * Return: address of allocated memory on success, or ERR_PTR() on error.
+ */
+void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan,
+ size_t size)
+{
+ struct stratix10_svc_data_mem *pmem;
+ unsigned long va;
+ phys_addr_t pa;
+ struct gen_pool *genpool = chan->ctrl->genpool;
+ size_t s = roundup(size, 1 << genpool->min_alloc_order);
+
+ pmem = devm_kzalloc(chan->ctrl->dev, sizeof(*pmem), GFP_KERNEL);
+ if (!pmem)
+ return ERR_PTR(-ENOMEM);
+
+ va = gen_pool_alloc(genpool, s);
+ if (!va)
+ return ERR_PTR(-ENOMEM);
+
+ memset((void *)va, 0, s);
+ pa = gen_pool_virt_to_phys(genpool, va);
+
+ pmem->vaddr = (void *)va;
+ pmem->paddr = pa;
+ pmem->size = s;
+ list_add_tail(&pmem->node, &svc_data_mem);
+ pr_debug("%s: va=%p, pa=0x%016x\n", __func__,
+ pmem->vaddr, (unsigned int)pmem->paddr);
+
+ return (void *)va;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory);
+
+/**
+ * stratix10_svc_free_memory() - free allocated memory
+ * @chan: service channel assigned to the client
+ * @kaddr: memory to be freed
+ *
+ * This function is used by service client to free allocated buffers.
+ */
+void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
+{
+ struct stratix10_svc_data_mem *pmem;
+
+ list_for_each_entry(pmem, &svc_data_mem, node)
+ if (pmem->vaddr == kaddr) {
+ gen_pool_free(chan->ctrl->genpool,
+ (unsigned long)kaddr, pmem->size);
+ pmem->vaddr = NULL;
+ list_del(&pmem->node);
+ return;
+ }
+
+ list_del(&svc_data_mem);
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);
+
+static const struct of_device_id stratix10_svc_drv_match[] = {
+ {.compatible = "intel,stratix10-svc"},
+ {.compatible = "intel,agilex-svc"},
+ {},
+};
+
+static int stratix10_svc_drv_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stratix10_svc_controller *controller;
+ struct stratix10_svc_chan *chans;
+ struct gen_pool *genpool;
+ struct stratix10_svc_sh_memory *sh_memory;
+ struct stratix10_svc *svc;
+
+ svc_invoke_fn *invoke_fn;
+ size_t fifo_size;
+ int ret;
+
+ /* get SMC or HVC function */
+ invoke_fn = get_invoke_func(dev);
+ if (IS_ERR(invoke_fn))
+ return -EINVAL;
+
+ sh_memory = devm_kzalloc(dev, sizeof(*sh_memory), GFP_KERNEL);
+ if (!sh_memory)
+ return -ENOMEM;
+
+ sh_memory->invoke_fn = invoke_fn;
+ ret = svc_get_sh_memory(pdev, sh_memory);
+ if (ret)
+ return ret;
+
+ genpool = svc_create_memory_pool(pdev, sh_memory);
+ if (IS_ERR(genpool))
+ return PTR_ERR(genpool);
+
+ /* allocate service controller and supporting channel */
+ controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL);
+ if (!controller)
+ return -ENOMEM;
+
+ chans = devm_kmalloc_array(dev, SVC_NUM_CHANNEL,
+ sizeof(*chans), GFP_KERNEL | __GFP_ZERO);
+ if (!chans)
+ return -ENOMEM;
+
+ controller->dev = dev;
+ controller->num_chans = SVC_NUM_CHANNEL;
+ controller->num_active_client = 0;
+ controller->chans = chans;
+ controller->genpool = genpool;
+ controller->task = NULL;
+ controller->invoke_fn = invoke_fn;
+ init_completion(&controller->complete_status);
+
+ fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO;
+ ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL);
+ if (ret) {
+ dev_err(dev, "failed to allocate FIFO\n");
+ return ret;
+ }
+ spin_lock_init(&controller->svc_fifo_lock);
+
+ chans[0].scl = NULL;
+ chans[0].ctrl = controller;
+ chans[0].name = SVC_CLIENT_FPGA;
+ spin_lock_init(&chans[0].lock);
+
+ chans[1].scl = NULL;
+ chans[1].ctrl = controller;
+ chans[1].name = SVC_CLIENT_RSU;
+ spin_lock_init(&chans[1].lock);
+
+ list_add_tail(&controller->node, &svc_ctrl);
+ platform_set_drvdata(pdev, controller);
+
+ /* add svc client device(s) */
+ svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL);
+ if (!svc) {
+ ret = -ENOMEM;
+ goto err_free_kfifo;
+ }
+
+ svc->stratix10_svc_rsu = platform_device_alloc(STRATIX10_RSU, 0);
+ if (!svc->stratix10_svc_rsu) {
+ dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU);
+ ret = -ENOMEM;
+ goto err_free_kfifo;
+ }
+
+ ret = platform_device_add(svc->stratix10_svc_rsu);
+ if (ret)
+ goto err_put_device;
+
+ dev_set_drvdata(dev, svc);
+
+ pr_info("Intel Service Layer Driver Initialized\n");
+
+ return 0;
+
+err_put_device:
+ platform_device_put(svc->stratix10_svc_rsu);
+err_free_kfifo:
+ kfifo_free(&controller->svc_fifo);
+ return ret;
+}
+
+static int stratix10_svc_drv_remove(struct platform_device *pdev)
+{
+ struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
+ struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
+
+ platform_device_unregister(svc->stratix10_svc_rsu);
+
+ kfifo_free(&ctrl->svc_fifo);
+ if (ctrl->task) {
+ kthread_stop(ctrl->task);
+ ctrl->task = NULL;
+ }
+ if (ctrl->genpool)
+ gen_pool_destroy(ctrl->genpool);
+ list_del(&ctrl->node);
+
+ return 0;
+}
+
+static struct platform_driver stratix10_svc_driver = {
+ .probe = stratix10_svc_drv_probe,
+ .remove = stratix10_svc_drv_remove,
+ .driver = {
+ .name = "stratix10-svc",
+ .of_match_table = stratix10_svc_drv_match,
+ },
+};
+
+static int __init stratix10_svc_init(void)
+{
+ struct device_node *fw_np;
+ struct device_node *np;
+ int ret;
+
+ fw_np = of_find_node_by_name(NULL, "firmware");
+ if (!fw_np)
+ return -ENODEV;
+
+ np = of_find_matching_node(fw_np, stratix10_svc_drv_match);
+ if (!np)
+ return -ENODEV;
+
+ of_node_put(np);
+ ret = of_platform_populate(fw_np, stratix10_svc_drv_match, NULL, NULL);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&stratix10_svc_driver);
+}
+
+static void __exit stratix10_svc_exit(void)
+{
+ return platform_driver_unregister(&stratix10_svc_driver);
+}
+
+subsys_initcall(stratix10_svc_init);
+module_exit(stratix10_svc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Stratix10 Service Layer Driver");
+MODULE_AUTHOR("Richard Gong <richard.gong@intel.com>");
+MODULE_ALIAS("platform:stratix10-svc");
diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig
new file mode 100644
index 000000000..1c8ba1f47
--- /dev/null
+++ b/drivers/firmware/tegra/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "Tegra firmware driver"
+
+config TEGRA_IVC
+ bool "Tegra IVC protocol"
+ depends on ARCH_TEGRA
+ help
+ IVC (Inter-VM Communication) protocol is part of the IPC
+ (Inter Processor Communication) framework on Tegra. It maintains the
+ data and the different communication channels in SysRAM or RAM and
+ keeps the content is synchronization between host CPU and remote
+ processors.
+
+config TEGRA_BPMP
+ bool "Tegra BPMP driver"
+ depends on ARCH_TEGRA && TEGRA_HSP_MBOX && TEGRA_IVC
+ help
+ BPMP (Boot and Power Management Processor) is designed to off-loading
+ the PM functions which include clock/DVFS/thermal/power from the CPU.
+ It needs HSP as the HW synchronization and notification module and
+ IVC module as the message communication protocol.
+
+ This driver manages the IPC interface between host CPU and the
+ firmware running on BPMP.
+
+endmenu
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
new file mode 100644
index 000000000..620cf3fdd
--- /dev/null
+++ b/drivers/firmware/tegra/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+tegra-bpmp-y = bpmp.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_210_SOC) += bpmp-tegra210.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_186_SOC) += bpmp-tegra186.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_194_SOC) += bpmp-tegra186.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_234_SOC) += bpmp-tegra186.o
+tegra-bpmp-$(CONFIG_DEBUG_FS) += bpmp-debugfs.o
+obj-$(CONFIG_TEGRA_BPMP) += tegra-bpmp.o
+obj-$(CONFIG_TEGRA_IVC) += ivc.o
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
new file mode 100644
index 000000000..fad97ec8e
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -0,0 +1,766 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+
+static DEFINE_MUTEX(bpmp_debug_lock);
+
+struct seqbuf {
+ char *buf;
+ size_t pos;
+ size_t size;
+};
+
+static void seqbuf_init(struct seqbuf *seqbuf, void *buf, size_t size)
+{
+ seqbuf->buf = buf;
+ seqbuf->size = size;
+ seqbuf->pos = 0;
+}
+
+static size_t seqbuf_avail(struct seqbuf *seqbuf)
+{
+ return seqbuf->pos < seqbuf->size ? seqbuf->size - seqbuf->pos : 0;
+}
+
+static size_t seqbuf_status(struct seqbuf *seqbuf)
+{
+ return seqbuf->pos <= seqbuf->size ? 0 : -EOVERFLOW;
+}
+
+static int seqbuf_eof(struct seqbuf *seqbuf)
+{
+ return seqbuf->pos >= seqbuf->size;
+}
+
+static int seqbuf_read(struct seqbuf *seqbuf, void *buf, size_t nbyte)
+{
+ nbyte = min(nbyte, seqbuf_avail(seqbuf));
+ memcpy(buf, seqbuf->buf + seqbuf->pos, nbyte);
+ seqbuf->pos += nbyte;
+ return seqbuf_status(seqbuf);
+}
+
+static int seqbuf_read_u32(struct seqbuf *seqbuf, uint32_t *v)
+{
+ int err;
+
+ err = seqbuf_read(seqbuf, v, 4);
+ *v = le32_to_cpu(*v);
+ return err;
+}
+
+static int seqbuf_read_str(struct seqbuf *seqbuf, const char **str)
+{
+ *str = seqbuf->buf + seqbuf->pos;
+ seqbuf->pos += strnlen(*str, seqbuf_avail(seqbuf));
+ seqbuf->pos++;
+ return seqbuf_status(seqbuf);
+}
+
+static void seqbuf_seek(struct seqbuf *seqbuf, ssize_t offset)
+{
+ seqbuf->pos += offset;
+}
+
+/* map filename in Linux debugfs to corresponding entry in BPMP */
+static const char *get_filename(struct tegra_bpmp *bpmp,
+ const struct file *file, char *buf, int size)
+{
+ char root_path_buf[512];
+ const char *root_path;
+ const char *filename;
+ size_t root_len;
+
+ root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
+ sizeof(root_path_buf));
+ if (IS_ERR(root_path))
+ return NULL;
+
+ root_len = strlen(root_path);
+
+ filename = dentry_path(file->f_path.dentry, buf, size);
+ if (IS_ERR(filename))
+ return NULL;
+
+ if (strlen(filename) < root_len ||
+ strncmp(filename, root_path, root_len))
+ return NULL;
+
+ filename += root_len;
+
+ return filename;
+}
+
+static int mrq_debug_open(struct tegra_bpmp *bpmp, const char *name,
+ uint32_t *fd, uint32_t *len, bool write)
+{
+ struct mrq_debug_request req = {
+ .cmd = cpu_to_le32(write ? CMD_DEBUG_OPEN_WO : CMD_DEBUG_OPEN_RO),
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ ssize_t sz_name;
+ int err = 0;
+
+ sz_name = strscpy(req.fop.name, name, sizeof(req.fop.name));
+ if (sz_name < 0) {
+ pr_err("File name too large: %s\n", name);
+ return -EINVAL;
+ }
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ *len = resp.fop.datalen;
+ *fd = resp.fop.fd;
+
+ return 0;
+}
+
+static int mrq_debug_close(struct tegra_bpmp *bpmp, uint32_t fd)
+{
+ struct mrq_debug_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUG_CLOSE),
+ .frd = {
+ .fd = fd,
+ },
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err = 0;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mrq_debug_read(struct tegra_bpmp *bpmp, const char *name,
+ char *data, size_t sz_data, uint32_t *nbytes)
+{
+ struct mrq_debug_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUG_READ),
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ uint32_t fd = 0, len = 0;
+ int remaining, err;
+
+ mutex_lock(&bpmp_debug_lock);
+ err = mrq_debug_open(bpmp, name, &fd, &len, 0);
+ if (err)
+ goto out;
+
+ if (len > sz_data) {
+ err = -EFBIG;
+ goto close;
+ }
+
+ req.frd.fd = fd;
+ remaining = len;
+
+ while (remaining > 0) {
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0) {
+ goto close;
+ } else if (msg.rx.ret < 0) {
+ err = -EINVAL;
+ goto close;
+ }
+
+ if (resp.frd.readlen > remaining) {
+ pr_err("%s: read data length invalid\n", __func__);
+ err = -EINVAL;
+ goto close;
+ }
+
+ memcpy(data, resp.frd.data, resp.frd.readlen);
+ data += resp.frd.readlen;
+ remaining -= resp.frd.readlen;
+ }
+
+ *nbytes = len;
+
+close:
+ err = mrq_debug_close(bpmp, fd);
+out:
+ mutex_unlock(&bpmp_debug_lock);
+ return err;
+}
+
+static int mrq_debug_write(struct tegra_bpmp *bpmp, const char *name,
+ uint8_t *data, size_t sz_data)
+{
+ struct mrq_debug_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUG_WRITE)
+ };
+ struct mrq_debug_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUG,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ uint32_t fd = 0, len = 0;
+ size_t remaining;
+ int err;
+
+ mutex_lock(&bpmp_debug_lock);
+ err = mrq_debug_open(bpmp, name, &fd, &len, 1);
+ if (err)
+ goto out;
+
+ if (sz_data > len) {
+ err = -EINVAL;
+ goto close;
+ }
+
+ req.fwr.fd = fd;
+ remaining = sz_data;
+
+ while (remaining > 0) {
+ len = min(remaining, sizeof(req.fwr.data));
+ memcpy(req.fwr.data, data, len);
+ req.fwr.datalen = len;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0) {
+ goto close;
+ } else if (msg.rx.ret < 0) {
+ err = -EINVAL;
+ goto close;
+ }
+
+ data += req.fwr.datalen;
+ remaining -= req.fwr.datalen;
+ }
+
+close:
+ err = mrq_debug_close(bpmp, fd);
+out:
+ mutex_unlock(&bpmp_debug_lock);
+ return err;
+}
+
+static int bpmp_debug_show(struct seq_file *m, void *p)
+{
+ struct file *file = m->private;
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ char *databuf = NULL;
+ char fnamebuf[256];
+ const char *filename;
+ uint32_t nbytes = 0;
+ size_t len;
+ int err;
+
+ len = seq_get_buf(m, &databuf);
+ if (!databuf)
+ return -ENOMEM;
+
+ filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
+ if (!filename)
+ return -ENOENT;
+
+ err = mrq_debug_read(bpmp, filename, databuf, len, &nbytes);
+ if (!err)
+ seq_commit(m, nbytes);
+
+ return err;
+}
+
+static ssize_t bpmp_debug_store(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ char *databuf = NULL;
+ char fnamebuf[256];
+ const char *filename;
+ ssize_t err;
+
+ filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
+ if (!filename)
+ return -ENOENT;
+
+ databuf = kmalloc(count, GFP_KERNEL);
+ if (!databuf)
+ return -ENOMEM;
+
+ if (copy_from_user(databuf, buf, count)) {
+ err = -EFAULT;
+ goto free_ret;
+ }
+
+ err = mrq_debug_write(bpmp, filename, databuf, count);
+
+free_ret:
+ kfree(databuf);
+
+ return err ?: count;
+}
+
+static int bpmp_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open_size(file, bpmp_debug_show, file, SZ_256K);
+}
+
+static const struct file_operations bpmp_debug_fops = {
+ .open = bpmp_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = bpmp_debug_store,
+ .release = single_release,
+};
+
+static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp,
+ struct dentry *parent,
+ char *ppath)
+{
+ const size_t pathlen = SZ_256;
+ const size_t bufsize = SZ_16K;
+ uint32_t dsize, attrs = 0;
+ struct dentry *dentry;
+ struct seqbuf seqbuf;
+ char *buf, *pathbuf;
+ const char *name;
+ int err = 0;
+
+ if (!bpmp || !parent || !ppath)
+ return -EINVAL;
+
+ buf = kmalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pathbuf = kzalloc(pathlen, GFP_KERNEL);
+ if (!pathbuf) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ err = mrq_debug_read(bpmp, ppath, buf, bufsize, &dsize);
+ if (err)
+ goto out;
+
+ seqbuf_init(&seqbuf, buf, dsize);
+
+ while (!seqbuf_eof(&seqbuf)) {
+ err = seqbuf_read_u32(&seqbuf, &attrs);
+ if (err)
+ goto out;
+
+ err = seqbuf_read_str(&seqbuf, &name);
+ if (err < 0)
+ goto out;
+
+ if (attrs & DEBUGFS_S_ISDIR) {
+ size_t len;
+
+ dentry = debugfs_create_dir(name, parent);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out;
+ }
+
+ len = snprintf(pathbuf, pathlen, "%s%s/", ppath, name);
+ if (len >= pathlen) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = bpmp_populate_debugfs_inband(bpmp, dentry,
+ pathbuf);
+ if (err < 0)
+ goto out;
+ } else {
+ umode_t mode;
+
+ mode = attrs & DEBUGFS_S_IRUSR ? 0400 : 0;
+ mode |= attrs & DEBUGFS_S_IWUSR ? 0200 : 0;
+ dentry = debugfs_create_file(name, mode, parent, bpmp,
+ &bpmp_debug_fops);
+ if (IS_ERR(dentry)) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+ }
+
+out:
+ kfree(pathbuf);
+ kfree(buf);
+
+ return err;
+}
+
+static int mrq_debugfs_read(struct tegra_bpmp *bpmp,
+ dma_addr_t name, size_t sz_name,
+ dma_addr_t data, size_t sz_data,
+ size_t *nbytes)
+{
+ struct mrq_debugfs_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUGFS_READ),
+ .fop = {
+ .fnameaddr = cpu_to_le32((uint32_t)name),
+ .fnamelen = cpu_to_le32((uint32_t)sz_name),
+ .dataaddr = cpu_to_le32((uint32_t)data),
+ .datalen = cpu_to_le32((uint32_t)sz_data),
+ },
+ };
+ struct mrq_debugfs_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUGFS,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ *nbytes = (size_t)resp.fop.nbytes;
+
+ return 0;
+}
+
+static int mrq_debugfs_write(struct tegra_bpmp *bpmp,
+ dma_addr_t name, size_t sz_name,
+ dma_addr_t data, size_t sz_data)
+{
+ const struct mrq_debugfs_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUGFS_WRITE),
+ .fop = {
+ .fnameaddr = cpu_to_le32((uint32_t)name),
+ .fnamelen = cpu_to_le32((uint32_t)sz_name),
+ .dataaddr = cpu_to_le32((uint32_t)data),
+ .datalen = cpu_to_le32((uint32_t)sz_data),
+ },
+ };
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUGFS,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ };
+
+ return tegra_bpmp_transfer(bpmp, &msg);
+}
+
+static int mrq_debugfs_dumpdir(struct tegra_bpmp *bpmp, dma_addr_t addr,
+ size_t size, size_t *nbytes)
+{
+ const struct mrq_debugfs_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUGFS_DUMPDIR),
+ .dumpdir = {
+ .dataaddr = cpu_to_le32((uint32_t)addr),
+ .datalen = cpu_to_le32((uint32_t)size),
+ },
+ };
+ struct mrq_debugfs_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUGFS,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ *nbytes = (size_t)resp.dumpdir.nbytes;
+
+ return 0;
+}
+
+static int debugfs_show(struct seq_file *m, void *p)
+{
+ struct file *file = m->private;
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ const size_t datasize = m->size;
+ const size_t namesize = SZ_256;
+ void *datavirt, *namevirt;
+ dma_addr_t dataphys, namephys;
+ char buf[256];
+ const char *filename;
+ size_t len, nbytes;
+ int err;
+
+ filename = get_filename(bpmp, file, buf, sizeof(buf));
+ if (!filename)
+ return -ENOENT;
+
+ namevirt = dma_alloc_coherent(bpmp->dev, namesize, &namephys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!namevirt)
+ return -ENOMEM;
+
+ datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!datavirt) {
+ err = -ENOMEM;
+ goto free_namebuf;
+ }
+
+ len = strlen(filename);
+ strncpy(namevirt, filename, namesize);
+
+ err = mrq_debugfs_read(bpmp, namephys, len, dataphys, datasize,
+ &nbytes);
+
+ if (!err)
+ seq_write(m, datavirt, nbytes);
+
+ dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys);
+free_namebuf:
+ dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
+
+ return err;
+}
+
+static int debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open_size(file, debugfs_show, file, SZ_128K);
+}
+
+static ssize_t debugfs_store(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ const size_t datasize = count;
+ const size_t namesize = SZ_256;
+ void *datavirt, *namevirt;
+ dma_addr_t dataphys, namephys;
+ char fnamebuf[256];
+ const char *filename;
+ size_t len;
+ int err;
+
+ filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
+ if (!filename)
+ return -ENOENT;
+
+ namevirt = dma_alloc_coherent(bpmp->dev, namesize, &namephys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!namevirt)
+ return -ENOMEM;
+
+ datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!datavirt) {
+ err = -ENOMEM;
+ goto free_namebuf;
+ }
+
+ len = strlen(filename);
+ strncpy(namevirt, filename, namesize);
+
+ if (copy_from_user(datavirt, buf, count)) {
+ err = -EFAULT;
+ goto free_databuf;
+ }
+
+ err = mrq_debugfs_write(bpmp, namephys, len, dataphys,
+ count);
+
+free_databuf:
+ dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys);
+free_namebuf:
+ dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
+
+ return err ?: count;
+}
+
+static const struct file_operations debugfs_fops = {
+ .open = debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = debugfs_store,
+ .release = single_release,
+};
+
+static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf,
+ struct dentry *parent, uint32_t depth)
+{
+ int err;
+ uint32_t d, t;
+ const char *name;
+ struct dentry *dentry;
+
+ while (!seqbuf_eof(seqbuf)) {
+ err = seqbuf_read_u32(seqbuf, &d);
+ if (err < 0)
+ return err;
+
+ if (d < depth) {
+ seqbuf_seek(seqbuf, -4);
+ /* go up a level */
+ return 0;
+ } else if (d != depth) {
+ /* malformed data received from BPMP */
+ return -EIO;
+ }
+
+ err = seqbuf_read_u32(seqbuf, &t);
+ if (err < 0)
+ return err;
+ err = seqbuf_read_str(seqbuf, &name);
+ if (err < 0)
+ return err;
+
+ if (t & DEBUGFS_S_ISDIR) {
+ dentry = debugfs_create_dir(name, parent);
+ if (IS_ERR(dentry))
+ return -ENOMEM;
+ err = bpmp_populate_dir(bpmp, seqbuf, dentry, depth+1);
+ if (err < 0)
+ return err;
+ } else {
+ umode_t mode;
+
+ mode = t & DEBUGFS_S_IRUSR ? S_IRUSR : 0;
+ mode |= t & DEBUGFS_S_IWUSR ? S_IWUSR : 0;
+ dentry = debugfs_create_file(name, mode,
+ parent, bpmp,
+ &debugfs_fops);
+ if (IS_ERR(dentry))
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int bpmp_populate_debugfs_shmem(struct tegra_bpmp *bpmp)
+{
+ struct seqbuf seqbuf;
+ const size_t sz = SZ_512K;
+ dma_addr_t phys;
+ size_t nbytes;
+ void *virt;
+ int err;
+
+ virt = dma_alloc_coherent(bpmp->dev, sz, &phys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!virt)
+ return -ENOMEM;
+
+ err = mrq_debugfs_dumpdir(bpmp, phys, sz, &nbytes);
+ if (err < 0) {
+ goto free;
+ } else if (nbytes > sz) {
+ err = -EINVAL;
+ goto free;
+ }
+
+ seqbuf_init(&seqbuf, virt, nbytes);
+ err = bpmp_populate_dir(bpmp, &seqbuf, bpmp->debugfs_mirror, 0);
+free:
+ dma_free_coherent(bpmp->dev, sz, virt, phys);
+
+ return err;
+}
+
+int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp)
+{
+ struct dentry *root;
+ bool inband;
+ int err;
+
+ inband = tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUG);
+
+ if (!inband && !tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUGFS))
+ return 0;
+
+ root = debugfs_create_dir("bpmp", NULL);
+ if (IS_ERR(root))
+ return -ENOMEM;
+
+ bpmp->debugfs_mirror = debugfs_create_dir("debug", root);
+ if (IS_ERR(bpmp->debugfs_mirror)) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (inband)
+ err = bpmp_populate_debugfs_inband(bpmp, bpmp->debugfs_mirror,
+ "/");
+ else
+ err = bpmp_populate_debugfs_shmem(bpmp);
+
+out:
+ if (err < 0)
+ debugfs_remove_recursive(root);
+
+ return err;
+}
diff --git a/drivers/firmware/tegra/bpmp-private.h b/drivers/firmware/tegra/bpmp-private.h
new file mode 100644
index 000000000..182bfe396
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-private.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#ifndef __FIRMWARE_TEGRA_BPMP_PRIVATE_H
+#define __FIRMWARE_TEGRA_BPMP_PRIVATE_H
+
+#include <soc/tegra/bpmp.h>
+
+struct tegra_bpmp_ops {
+ int (*init)(struct tegra_bpmp *bpmp);
+ void (*deinit)(struct tegra_bpmp *bpmp);
+ bool (*is_response_ready)(struct tegra_bpmp_channel *channel);
+ bool (*is_request_ready)(struct tegra_bpmp_channel *channel);
+ int (*ack_response)(struct tegra_bpmp_channel *channel);
+ int (*ack_request)(struct tegra_bpmp_channel *channel);
+ bool (*is_response_channel_free)(struct tegra_bpmp_channel *channel);
+ bool (*is_request_channel_free)(struct tegra_bpmp_channel *channel);
+ int (*post_response)(struct tegra_bpmp_channel *channel);
+ int (*post_request)(struct tegra_bpmp_channel *channel);
+ int (*ring_doorbell)(struct tegra_bpmp *bpmp);
+ int (*resume)(struct tegra_bpmp *bpmp);
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+extern const struct tegra_bpmp_ops tegra186_bpmp_ops;
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+extern const struct tegra_bpmp_ops tegra210_bpmp_ops;
+#endif
+
+#endif
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
new file mode 100644
index 000000000..63ab21d89
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-tegra186.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#include <linux/genalloc.h>
+#include <linux/mailbox_client.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include <soc/tegra/ivc.h>
+
+#include "bpmp-private.h"
+
+struct tegra186_bpmp {
+ struct tegra_bpmp *parent;
+
+ struct {
+ struct gen_pool *pool;
+ dma_addr_t phys;
+ void *virt;
+ } tx, rx;
+
+ struct {
+ struct mbox_client client;
+ struct mbox_chan *channel;
+ } mbox;
+};
+
+static inline struct tegra_bpmp *
+mbox_client_to_bpmp(struct mbox_client *client)
+{
+ struct tegra186_bpmp *priv;
+
+ priv = container_of(client, struct tegra186_bpmp, mbox.client);
+
+ return priv->parent;
+}
+
+static bool tegra186_bpmp_is_message_ready(struct tegra_bpmp_channel *channel)
+{
+ void *frame;
+
+ frame = tegra_ivc_read_get_next_frame(channel->ivc);
+ if (IS_ERR(frame)) {
+ channel->ib = NULL;
+ return false;
+ }
+
+ channel->ib = frame;
+
+ return true;
+}
+
+static bool tegra186_bpmp_is_channel_free(struct tegra_bpmp_channel *channel)
+{
+ void *frame;
+
+ frame = tegra_ivc_write_get_next_frame(channel->ivc);
+ if (IS_ERR(frame)) {
+ channel->ob = NULL;
+ return false;
+ }
+
+ channel->ob = frame;
+
+ return true;
+}
+
+static int tegra186_bpmp_ack_message(struct tegra_bpmp_channel *channel)
+{
+ return tegra_ivc_read_advance(channel->ivc);
+}
+
+static int tegra186_bpmp_post_message(struct tegra_bpmp_channel *channel)
+{
+ return tegra_ivc_write_advance(channel->ivc);
+}
+
+static int tegra186_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ int err;
+
+ err = mbox_send_message(priv->mbox.channel, NULL);
+ if (err < 0)
+ return err;
+
+ mbox_client_txdone(priv->mbox.channel, 0);
+
+ return 0;
+}
+
+static void tegra186_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
+{
+ struct tegra_bpmp *bpmp = data;
+ struct tegra186_bpmp *priv = bpmp->priv;
+
+ if (WARN_ON(priv->mbox.channel == NULL))
+ return;
+
+ tegra186_bpmp_ring_doorbell(bpmp);
+}
+
+static int tegra186_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+ struct tegra_bpmp *bpmp,
+ unsigned int index)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ size_t message_size, queue_size;
+ unsigned int offset;
+ int err;
+
+ channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
+ GFP_KERNEL);
+ if (!channel->ivc)
+ return -ENOMEM;
+
+ message_size = tegra_ivc_align(MSG_MIN_SZ);
+ queue_size = tegra_ivc_total_queue_size(message_size);
+ offset = queue_size * index;
+
+ err = tegra_ivc_init(channel->ivc, NULL,
+ priv->rx.virt + offset, priv->rx.phys + offset,
+ priv->tx.virt + offset, priv->tx.phys + offset,
+ 1, message_size, tegra186_bpmp_ivc_notify,
+ bpmp);
+ if (err < 0) {
+ dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
+ index, err);
+ return err;
+ }
+
+ init_completion(&channel->completion);
+ channel->bpmp = bpmp;
+
+ return 0;
+}
+
+static void tegra186_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
+{
+ /* reset the channel state */
+ tegra_ivc_reset(channel->ivc);
+
+ /* sync the channel state with BPMP */
+ while (tegra_ivc_notified(channel->ivc))
+ ;
+}
+
+static void tegra186_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
+{
+ tegra_ivc_cleanup(channel->ivc);
+}
+
+static void mbox_handle_rx(struct mbox_client *client, void *data)
+{
+ struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
+
+ tegra_bpmp_handle_rx(bpmp);
+}
+
+static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv;
+ unsigned int i;
+ int err;
+
+ priv = devm_kzalloc(bpmp->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ bpmp->priv = priv;
+ priv->parent = bpmp;
+
+ priv->tx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 0);
+ if (!priv->tx.pool) {
+ dev_err(bpmp->dev, "TX shmem pool not found\n");
+ return -EPROBE_DEFER;
+ }
+
+ priv->tx.virt = gen_pool_dma_alloc(priv->tx.pool, 4096, &priv->tx.phys);
+ if (!priv->tx.virt) {
+ dev_err(bpmp->dev, "failed to allocate from TX pool\n");
+ return -ENOMEM;
+ }
+
+ priv->rx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 1);
+ if (!priv->rx.pool) {
+ dev_err(bpmp->dev, "RX shmem pool not found\n");
+ err = -EPROBE_DEFER;
+ goto free_tx;
+ }
+
+ priv->rx.virt = gen_pool_dma_alloc(priv->rx.pool, 4096, &priv->rx.phys);
+ if (!priv->rx.virt) {
+ dev_err(bpmp->dev, "failed to allocate from RX pool\n");
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ err = tegra186_bpmp_channel_init(bpmp->tx_channel, bpmp,
+ bpmp->soc->channels.cpu_tx.offset);
+ if (err < 0)
+ goto free_rx;
+
+ err = tegra186_bpmp_channel_init(bpmp->rx_channel, bpmp,
+ bpmp->soc->channels.cpu_rx.offset);
+ if (err < 0)
+ goto cleanup_tx_channel;
+
+ for (i = 0; i < bpmp->threaded.count; i++) {
+ unsigned int index = bpmp->soc->channels.thread.offset + i;
+
+ err = tegra186_bpmp_channel_init(&bpmp->threaded_channels[i],
+ bpmp, index);
+ if (err < 0)
+ goto cleanup_channels;
+ }
+
+ /* mbox registration */
+ priv->mbox.client.dev = bpmp->dev;
+ priv->mbox.client.rx_callback = mbox_handle_rx;
+ priv->mbox.client.tx_block = false;
+ priv->mbox.client.knows_txdone = false;
+
+ priv->mbox.channel = mbox_request_channel(&priv->mbox.client, 0);
+ if (IS_ERR(priv->mbox.channel)) {
+ err = PTR_ERR(priv->mbox.channel);
+ dev_err(bpmp->dev, "failed to get HSP mailbox: %d\n", err);
+ goto cleanup_channels;
+ }
+
+ tegra186_bpmp_channel_reset(bpmp->tx_channel);
+ tegra186_bpmp_channel_reset(bpmp->rx_channel);
+
+ for (i = 0; i < bpmp->threaded.count; i++)
+ tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+
+ return 0;
+
+cleanup_channels:
+ for (i = 0; i < bpmp->threaded.count; i++) {
+ if (!bpmp->threaded_channels[i].bpmp)
+ continue;
+
+ tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
+ }
+
+ tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
+cleanup_tx_channel:
+ tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
+free_rx:
+ gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096);
+free_tx:
+ gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096);
+
+ return err;
+}
+
+static void tegra186_bpmp_deinit(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ unsigned int i;
+
+ mbox_free_channel(priv->mbox.channel);
+
+ for (i = 0; i < bpmp->threaded.count; i++)
+ tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
+
+ tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
+ tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
+
+ gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096);
+ gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096);
+}
+
+static int tegra186_bpmp_resume(struct tegra_bpmp *bpmp)
+{
+ unsigned int i;
+
+ /* reset message channels */
+ tegra186_bpmp_channel_reset(bpmp->tx_channel);
+ tegra186_bpmp_channel_reset(bpmp->rx_channel);
+
+ for (i = 0; i < bpmp->threaded.count; i++)
+ tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+
+ return 0;
+}
+
+const struct tegra_bpmp_ops tegra186_bpmp_ops = {
+ .init = tegra186_bpmp_init,
+ .deinit = tegra186_bpmp_deinit,
+ .is_response_ready = tegra186_bpmp_is_message_ready,
+ .is_request_ready = tegra186_bpmp_is_message_ready,
+ .ack_response = tegra186_bpmp_ack_message,
+ .ack_request = tegra186_bpmp_ack_message,
+ .is_response_channel_free = tegra186_bpmp_is_channel_free,
+ .is_request_channel_free = tegra186_bpmp_is_channel_free,
+ .post_response = tegra186_bpmp_post_message,
+ .post_request = tegra186_bpmp_post_message,
+ .ring_doorbell = tegra186_bpmp_ring_doorbell,
+ .resume = tegra186_bpmp_resume,
+};
diff --git a/drivers/firmware/tegra/bpmp-tegra210.c b/drivers/firmware/tegra/bpmp-tegra210.c
new file mode 100644
index 000000000..c32754055
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-tegra210.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+
+#include "bpmp-private.h"
+
+#define TRIGGER_OFFSET 0x000
+#define RESULT_OFFSET(id) (0xc00 + id * 4)
+#define TRIGGER_ID_SHIFT 16
+#define TRIGGER_CMD_GET 4
+
+#define STA_OFFSET 0
+#define SET_OFFSET 4
+#define CLR_OFFSET 8
+
+#define CH_MASK(ch) (0x3 << ((ch) * 2))
+#define SL_SIGL(ch) (0x0 << ((ch) * 2))
+#define SL_QUED(ch) (0x1 << ((ch) * 2))
+#define MA_FREE(ch) (0x2 << ((ch) * 2))
+#define MA_ACKD(ch) (0x3 << ((ch) * 2))
+
+struct tegra210_bpmp {
+ void __iomem *atomics;
+ void __iomem *arb_sema;
+ struct irq_data *tx_irq_data;
+};
+
+static u32 bpmp_channel_status(struct tegra_bpmp *bpmp, unsigned int index)
+{
+ struct tegra210_bpmp *priv = bpmp->priv;
+
+ return __raw_readl(priv->arb_sema + STA_OFFSET) & CH_MASK(index);
+}
+
+static bool tegra210_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == MA_ACKD(index);
+}
+
+static bool tegra210_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == SL_SIGL(index);
+}
+
+static bool
+tegra210_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == MA_FREE(index);
+}
+
+static bool
+tegra210_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == SL_QUED(index);
+}
+
+static int tegra210_bpmp_post_request(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(CH_MASK(channel->index), priv->arb_sema + CLR_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_post_response(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(MA_ACKD(channel->index), priv->arb_sema + SET_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_ack_response(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(MA_ACKD(channel->index) ^ MA_FREE(channel->index),
+ priv->arb_sema + CLR_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_ack_request(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(SL_QUED(channel->index), priv->arb_sema + SET_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+ struct tegra210_bpmp *priv = bpmp->priv;
+ struct irq_data *irq_data = priv->tx_irq_data;
+
+ /*
+ * Tegra Legacy Interrupt Controller (LIC) is used to notify BPMP of
+ * available messages
+ */
+ if (irq_data->chip->irq_retrigger)
+ return irq_data->chip->irq_retrigger(irq_data);
+
+ return -EINVAL;
+}
+
+static irqreturn_t rx_irq(int irq, void *data)
+{
+ struct tegra_bpmp *bpmp = data;
+
+ tegra_bpmp_handle_rx(bpmp);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra210_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+ struct tegra_bpmp *bpmp,
+ unsigned int index)
+{
+ struct tegra210_bpmp *priv = bpmp->priv;
+ u32 address;
+ void *p;
+
+ /* Retrieve channel base address from BPMP */
+ writel(index << TRIGGER_ID_SHIFT | TRIGGER_CMD_GET,
+ priv->atomics + TRIGGER_OFFSET);
+ address = readl(priv->atomics + RESULT_OFFSET(index));
+
+ p = devm_ioremap(bpmp->dev, address, 0x80);
+ if (!p)
+ return -ENOMEM;
+
+ channel->ib = p;
+ channel->ob = p;
+ channel->index = index;
+ init_completion(&channel->completion);
+ channel->bpmp = bpmp;
+
+ return 0;
+}
+
+static int tegra210_bpmp_init(struct tegra_bpmp *bpmp)
+{
+ struct platform_device *pdev = to_platform_device(bpmp->dev);
+ struct tegra210_bpmp *priv;
+ struct resource *res;
+ unsigned int i;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ bpmp->priv = priv;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->atomics = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->atomics))
+ return PTR_ERR(priv->atomics);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->arb_sema = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->arb_sema))
+ return PTR_ERR(priv->arb_sema);
+
+ err = tegra210_bpmp_channel_init(bpmp->tx_channel, bpmp,
+ bpmp->soc->channels.cpu_tx.offset);
+ if (err < 0)
+ return err;
+
+ err = tegra210_bpmp_channel_init(bpmp->rx_channel, bpmp,
+ bpmp->soc->channels.cpu_rx.offset);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < bpmp->threaded.count; i++) {
+ unsigned int index = bpmp->soc->channels.thread.offset + i;
+
+ err = tegra210_bpmp_channel_init(&bpmp->threaded_channels[i],
+ bpmp, index);
+ if (err < 0)
+ return err;
+ }
+
+ err = platform_get_irq_byname(pdev, "tx");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get TX IRQ: %d\n", err);
+ return err;
+ }
+
+ priv->tx_irq_data = irq_get_irq_data(err);
+ if (!priv->tx_irq_data) {
+ dev_err(&pdev->dev, "failed to get IRQ data for TX IRQ\n");
+ return -ENOENT;
+ }
+
+ err = platform_get_irq_byname(pdev, "rx");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get rx IRQ: %d\n", err);
+ return err;
+ }
+
+ err = devm_request_irq(&pdev->dev, err, rx_irq,
+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), bpmp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+const struct tegra_bpmp_ops tegra210_bpmp_ops = {
+ .init = tegra210_bpmp_init,
+ .is_response_ready = tegra210_bpmp_is_response_ready,
+ .is_request_ready = tegra210_bpmp_is_request_ready,
+ .ack_response = tegra210_bpmp_ack_response,
+ .ack_request = tegra210_bpmp_ack_request,
+ .is_response_channel_free = tegra210_bpmp_is_response_channel_free,
+ .is_request_channel_free = tegra210_bpmp_is_request_channel_free,
+ .post_response = tegra210_bpmp_post_response,
+ .post_request = tegra210_bpmp_post_request,
+ .ring_doorbell = tegra210_bpmp_ring_doorbell,
+};
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
new file mode 100644
index 000000000..5654c5e98
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp.c
@@ -0,0 +1,879 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk/tegra.h>
+#include <linux/genalloc.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/semaphore.h>
+#include <linux/sched/clock.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include <soc/tegra/ivc.h>
+
+#include "bpmp-private.h"
+
+#define MSG_ACK BIT(0)
+#define MSG_RING BIT(1)
+#define TAG_SZ 32
+
+static inline struct tegra_bpmp *
+mbox_client_to_bpmp(struct mbox_client *client)
+{
+ return container_of(client, struct tegra_bpmp, mbox.client);
+}
+
+static inline const struct tegra_bpmp_ops *
+channel_to_ops(struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+
+ return bpmp->soc->ops;
+}
+
+struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct tegra_bpmp *bpmp;
+ struct device_node *np;
+
+ np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ bpmp = ERR_PTR(-ENODEV);
+ goto put;
+ }
+
+ bpmp = platform_get_drvdata(pdev);
+ if (!bpmp) {
+ bpmp = ERR_PTR(-EPROBE_DEFER);
+ put_device(&pdev->dev);
+ goto put;
+ }
+
+put:
+ of_node_put(np);
+ return bpmp;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_get);
+
+void tegra_bpmp_put(struct tegra_bpmp *bpmp)
+{
+ if (bpmp)
+ put_device(bpmp->dev);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_put);
+
+static int
+tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ unsigned int count;
+ int index;
+
+ count = bpmp->soc->channels.thread.count;
+
+ index = channel - channel->bpmp->threaded_channels;
+ if (index < 0 || index >= count)
+ return -EINVAL;
+
+ return index;
+}
+
+static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
+{
+ return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
+ (msg->rx.size <= MSG_DATA_MIN_SZ) &&
+ (msg->tx.size == 0 || msg->tx.data) &&
+ (msg->rx.size == 0 || msg->rx.data);
+}
+
+static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->is_response_ready(channel);
+}
+
+static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->is_request_ready(channel);
+}
+
+static int tegra_bpmp_wait_response(struct tegra_bpmp_channel *channel)
+{
+ unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
+ ktime_t end;
+
+ end = ktime_add_us(ktime_get(), timeout);
+
+ do {
+ if (tegra_bpmp_is_response_ready(channel))
+ return 0;
+ } while (ktime_before(ktime_get(), end));
+
+ return -ETIMEDOUT;
+}
+
+static int tegra_bpmp_ack_response(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->ack_response(channel);
+}
+
+static int tegra_bpmp_ack_request(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->ack_request(channel);
+}
+
+static bool
+tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->is_request_channel_free(channel);
+}
+
+static bool
+tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->is_response_channel_free(channel);
+}
+
+static int
+tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel *channel)
+{
+ unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
+ ktime_t start, now;
+
+ start = ns_to_ktime(local_clock());
+
+ do {
+ if (tegra_bpmp_is_request_channel_free(channel))
+ return 0;
+
+ now = ns_to_ktime(local_clock());
+ } while (ktime_us_delta(now, start) < timeout);
+
+ return -ETIMEDOUT;
+}
+
+static int tegra_bpmp_post_request(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->post_request(channel);
+}
+
+static int tegra_bpmp_post_response(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->post_response(channel);
+}
+
+static int tegra_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+ return bpmp->soc->ops->ring_doorbell(bpmp);
+}
+
+static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
+ void *data, size_t size, int *ret)
+{
+ int err;
+
+ if (data && size > 0)
+ memcpy(data, channel->ib->data, size);
+
+ err = tegra_bpmp_ack_response(channel);
+ if (err < 0)
+ return err;
+
+ *ret = channel->ib->code;
+
+ return 0;
+}
+
+static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
+ void *data, size_t size, int *ret)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ unsigned long flags;
+ ssize_t err;
+ int index;
+
+ index = tegra_bpmp_channel_get_thread_index(channel);
+ if (index < 0) {
+ err = index;
+ goto unlock;
+ }
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+ err = __tegra_bpmp_channel_read(channel, data, size, ret);
+ clear_bit(index, bpmp->threaded.allocated);
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+
+unlock:
+ up(&bpmp->threaded.lock);
+
+ return err;
+}
+
+static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+ unsigned int mrq, unsigned long flags,
+ const void *data, size_t size)
+{
+ channel->ob->code = mrq;
+ channel->ob->flags = flags;
+
+ if (data && size > 0)
+ memcpy(channel->ob->data, data, size);
+
+ return tegra_bpmp_post_request(channel);
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
+ const void *data, size_t size)
+{
+ unsigned long timeout = bpmp->soc->channels.thread.timeout;
+ unsigned int count = bpmp->soc->channels.thread.count;
+ struct tegra_bpmp_channel *channel;
+ unsigned long flags;
+ unsigned int index;
+ int err;
+
+ err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
+ if (err < 0)
+ return ERR_PTR(err);
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ index = find_first_zero_bit(bpmp->threaded.allocated, count);
+ if (index == count) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ channel = &bpmp->threaded_channels[index];
+
+ if (!tegra_bpmp_is_request_channel_free(channel)) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ set_bit(index, bpmp->threaded.allocated);
+
+ err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
+ data, size);
+ if (err < 0)
+ goto clear_allocated;
+
+ set_bit(index, bpmp->threaded.busy);
+
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+ return channel;
+
+clear_allocated:
+ clear_bit(index, bpmp->threaded.allocated);
+unlock:
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+ up(&bpmp->threaded.lock);
+
+ return ERR_PTR(err);
+}
+
+static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+ unsigned int mrq, unsigned long flags,
+ const void *data, size_t size)
+{
+ int err;
+
+ err = tegra_bpmp_wait_request_channel_free(channel);
+ if (err < 0)
+ return err;
+
+ return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
+}
+
+int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg)
+{
+ struct tegra_bpmp_channel *channel;
+ int err;
+
+ if (WARN_ON(!irqs_disabled()))
+ return -EPERM;
+
+ if (!tegra_bpmp_message_valid(msg))
+ return -EINVAL;
+
+ channel = bpmp->tx_channel;
+
+ spin_lock(&bpmp->atomic_tx_lock);
+
+ err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
+ msg->tx.data, msg->tx.size);
+ if (err < 0) {
+ spin_unlock(&bpmp->atomic_tx_lock);
+ return err;
+ }
+
+ spin_unlock(&bpmp->atomic_tx_lock);
+
+ err = tegra_bpmp_ring_doorbell(bpmp);
+ if (err < 0)
+ return err;
+
+ err = tegra_bpmp_wait_response(channel);
+ if (err < 0)
+ return err;
+
+ return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
+ &msg->rx.ret);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
+
+int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg)
+{
+ struct tegra_bpmp_channel *channel;
+ unsigned long timeout;
+ int err;
+
+ if (WARN_ON(irqs_disabled()))
+ return -EPERM;
+
+ if (!tegra_bpmp_message_valid(msg))
+ return -EINVAL;
+
+ channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
+ msg->tx.size);
+ if (IS_ERR(channel))
+ return PTR_ERR(channel);
+
+ err = tegra_bpmp_ring_doorbell(bpmp);
+ if (err < 0)
+ return err;
+
+ timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
+
+ err = wait_for_completion_timeout(&channel->completion, timeout);
+ if (err == 0)
+ return -ETIMEDOUT;
+
+ return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
+ &msg->rx.ret);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
+
+static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
+ unsigned int mrq)
+{
+ struct tegra_bpmp_mrq *entry;
+
+ list_for_each_entry(entry, &bpmp->mrqs, list)
+ if (entry->mrq == mrq)
+ return entry;
+
+ return NULL;
+}
+
+void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
+ const void *data, size_t size)
+{
+ unsigned long flags = channel->ib->flags;
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ int err;
+
+ if (WARN_ON(size > MSG_DATA_MIN_SZ))
+ return;
+
+ err = tegra_bpmp_ack_request(channel);
+ if (WARN_ON(err < 0))
+ return;
+
+ if ((flags & MSG_ACK) == 0)
+ return;
+
+ if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel)))
+ return;
+
+ channel->ob->code = code;
+
+ if (data && size > 0)
+ memcpy(channel->ob->data, data, size);
+
+ err = tegra_bpmp_post_response(channel);
+ if (WARN_ON(err < 0))
+ return;
+
+ if (flags & MSG_RING) {
+ err = tegra_bpmp_ring_doorbell(bpmp);
+ if (WARN_ON(err < 0))
+ return;
+ }
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
+
+static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
+ unsigned int mrq,
+ struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp_mrq *entry;
+ u32 zero = 0;
+
+ spin_lock(&bpmp->lock);
+
+ entry = tegra_bpmp_find_mrq(bpmp, mrq);
+ if (!entry) {
+ spin_unlock(&bpmp->lock);
+ tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
+ return;
+ }
+
+ entry->handler(mrq, channel, entry->data);
+
+ spin_unlock(&bpmp->lock);
+}
+
+int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
+ tegra_bpmp_mrq_handler_t handler, void *data)
+{
+ struct tegra_bpmp_mrq *entry;
+ unsigned long flags;
+
+ if (!handler)
+ return -EINVAL;
+
+ entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ entry->mrq = mrq;
+ entry->handler = handler;
+ entry->data = data;
+ list_add(&entry->list, &bpmp->mrqs);
+
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
+
+void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
+{
+ struct tegra_bpmp_mrq *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ entry = tegra_bpmp_find_mrq(bpmp, mrq);
+ if (!entry)
+ goto unlock;
+
+ list_del(&entry->list);
+ devm_kfree(bpmp->dev, entry);
+
+unlock:
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
+
+bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
+{
+ struct mrq_query_abi_request req = { .mrq = cpu_to_le32(mrq) };
+ struct mrq_query_abi_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_QUERY_ABI,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err || msg.rx.ret)
+ return false;
+
+ return resp.status == 0;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_is_supported);
+
+static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
+ struct tegra_bpmp_channel *channel,
+ void *data)
+{
+ struct mrq_ping_request *request;
+ struct mrq_ping_response response;
+
+ request = (struct mrq_ping_request *)channel->ib->data;
+
+ memset(&response, 0, sizeof(response));
+ response.reply = request->challenge << 1;
+
+ tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
+}
+
+static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
+{
+ struct mrq_ping_response response;
+ struct mrq_ping_request request;
+ struct tegra_bpmp_message msg;
+ unsigned long flags;
+ ktime_t start, end;
+ int err;
+
+ memset(&request, 0, sizeof(request));
+ request.challenge = 1;
+
+ memset(&response, 0, sizeof(response));
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_PING;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+ msg.rx.data = &response;
+ msg.rx.size = sizeof(response);
+
+ local_irq_save(flags);
+ start = ktime_get();
+ err = tegra_bpmp_transfer_atomic(bpmp, &msg);
+ end = ktime_get();
+ local_irq_restore(flags);
+
+ if (!err)
+ dev_dbg(bpmp->dev,
+ "ping ok: challenge: %u, response: %u, time: %lld\n",
+ request.challenge, response.reply,
+ ktime_to_us(ktime_sub(end, start)));
+
+ return err;
+}
+
+/* deprecated version of tag query */
+static int tegra_bpmp_get_firmware_tag_old(struct tegra_bpmp *bpmp, char *tag,
+ size_t size)
+{
+ struct mrq_query_tag_request request;
+ struct tegra_bpmp_message msg;
+ unsigned long flags;
+ dma_addr_t phys;
+ void *virt;
+ int err;
+
+ if (size != TAG_SZ)
+ return -EINVAL;
+
+ virt = dma_alloc_coherent(bpmp->dev, TAG_SZ, &phys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!virt)
+ return -ENOMEM;
+
+ memset(&request, 0, sizeof(request));
+ request.addr = phys;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_QUERY_TAG;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+
+ local_irq_save(flags);
+ err = tegra_bpmp_transfer_atomic(bpmp, &msg);
+ local_irq_restore(flags);
+
+ if (err == 0)
+ memcpy(tag, virt, TAG_SZ);
+
+ dma_free_coherent(bpmp->dev, TAG_SZ, virt, phys);
+
+ return err;
+}
+
+static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
+ size_t size)
+{
+ if (tegra_bpmp_mrq_is_supported(bpmp, MRQ_QUERY_FW_TAG)) {
+ struct mrq_query_fw_tag_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_QUERY_FW_TAG,
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err;
+
+ if (size != sizeof(resp.tag))
+ return -EINVAL;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+
+ if (err)
+ return err;
+ if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ memcpy(tag, resp.tag, sizeof(resp.tag));
+ return 0;
+ }
+
+ return tegra_bpmp_get_firmware_tag_old(bpmp, tag, size);
+}
+
+static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
+{
+ unsigned long flags = channel->ob->flags;
+
+ if ((flags & MSG_RING) == 0)
+ return;
+
+ complete(&channel->completion);
+}
+
+void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp)
+{
+ struct tegra_bpmp_channel *channel;
+ unsigned int i, count;
+ unsigned long *busy;
+
+ channel = bpmp->rx_channel;
+ count = bpmp->soc->channels.thread.count;
+ busy = bpmp->threaded.busy;
+
+ if (tegra_bpmp_is_request_ready(channel))
+ tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
+
+ spin_lock(&bpmp->lock);
+
+ for_each_set_bit(i, busy, count) {
+ struct tegra_bpmp_channel *channel;
+
+ channel = &bpmp->threaded_channels[i];
+
+ if (tegra_bpmp_is_response_ready(channel)) {
+ tegra_bpmp_channel_signal(channel);
+ clear_bit(i, busy);
+ }
+ }
+
+ spin_unlock(&bpmp->lock);
+}
+
+static int tegra_bpmp_probe(struct platform_device *pdev)
+{
+ struct tegra_bpmp *bpmp;
+ char tag[TAG_SZ];
+ size_t size;
+ int err;
+
+ bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
+ if (!bpmp)
+ return -ENOMEM;
+
+ bpmp->soc = of_device_get_match_data(&pdev->dev);
+ bpmp->dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&bpmp->mrqs);
+ spin_lock_init(&bpmp->lock);
+
+ bpmp->threaded.count = bpmp->soc->channels.thread.count;
+ sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
+
+ size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
+
+ bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!bpmp->threaded.allocated)
+ return -ENOMEM;
+
+ bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!bpmp->threaded.busy)
+ return -ENOMEM;
+
+ spin_lock_init(&bpmp->atomic_tx_lock);
+ bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
+ GFP_KERNEL);
+ if (!bpmp->tx_channel)
+ return -ENOMEM;
+
+ bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
+ GFP_KERNEL);
+ if (!bpmp->rx_channel)
+ return -ENOMEM;
+
+ bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
+ sizeof(*bpmp->threaded_channels),
+ GFP_KERNEL);
+ if (!bpmp->threaded_channels)
+ return -ENOMEM;
+
+ err = bpmp->soc->ops->init(bpmp);
+ if (err < 0)
+ return err;
+
+ err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
+ tegra_bpmp_mrq_handle_ping, bpmp);
+ if (err < 0)
+ goto deinit;
+
+ err = tegra_bpmp_ping(bpmp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
+ goto free_mrq;
+ }
+
+ err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag));
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
+ goto free_mrq;
+ }
+
+ dev_info(&pdev->dev, "firmware: %.*s\n", (int)sizeof(tag), tag);
+
+ platform_set_drvdata(pdev, bpmp);
+
+ err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
+ if (err < 0)
+ goto free_mrq;
+
+ if (of_find_property(pdev->dev.of_node, "#clock-cells", NULL)) {
+ err = tegra_bpmp_init_clocks(bpmp);
+ if (err < 0)
+ goto free_mrq;
+ }
+
+ if (of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) {
+ err = tegra_bpmp_init_resets(bpmp);
+ if (err < 0)
+ goto free_mrq;
+ }
+
+ if (of_find_property(pdev->dev.of_node, "#power-domain-cells", NULL)) {
+ err = tegra_bpmp_init_powergates(bpmp);
+ if (err < 0)
+ goto free_mrq;
+ }
+
+ err = tegra_bpmp_init_debugfs(bpmp);
+ if (err < 0)
+ dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err);
+
+ return 0;
+
+free_mrq:
+ tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
+deinit:
+ if (bpmp->soc->ops->deinit)
+ bpmp->soc->ops->deinit(bpmp);
+
+ return err;
+}
+
+static int __maybe_unused tegra_bpmp_resume(struct device *dev)
+{
+ struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
+
+ if (bpmp->soc->ops->resume)
+ return bpmp->soc->ops->resume(bpmp);
+ else
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_bpmp_pm_ops = {
+ .resume_noirq = tegra_bpmp_resume,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+static const struct tegra_bpmp_soc tegra186_soc = {
+ .channels = {
+ .cpu_tx = {
+ .offset = 3,
+ .timeout = 60 * USEC_PER_SEC,
+ },
+ .thread = {
+ .offset = 0,
+ .count = 3,
+ .timeout = 600 * USEC_PER_SEC,
+ },
+ .cpu_rx = {
+ .offset = 13,
+ .timeout = 0,
+ },
+ },
+ .ops = &tegra186_bpmp_ops,
+ .num_resets = 193,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+static const struct tegra_bpmp_soc tegra210_soc = {
+ .channels = {
+ .cpu_tx = {
+ .offset = 0,
+ .count = 1,
+ .timeout = 60 * USEC_PER_SEC,
+ },
+ .thread = {
+ .offset = 4,
+ .count = 1,
+ .timeout = 600 * USEC_PER_SEC,
+ },
+ .cpu_rx = {
+ .offset = 8,
+ .count = 1,
+ .timeout = 0,
+ },
+ },
+ .ops = &tegra210_bpmp_ops,
+};
+#endif
+
+static const struct of_device_id tegra_bpmp_match[] = {
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+ { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+ { .compatible = "nvidia,tegra210-bpmp", .data = &tegra210_soc },
+#endif
+ { }
+};
+
+static struct platform_driver tegra_bpmp_driver = {
+ .driver = {
+ .name = "tegra-bpmp",
+ .of_match_table = tegra_bpmp_match,
+ .pm = &tegra_bpmp_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tegra_bpmp_probe,
+};
+builtin_platform_driver(tegra_bpmp_driver);
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c
new file mode 100644
index 000000000..e2398cd7c
--- /dev/null
+++ b/drivers/firmware/tegra/ivc.c
@@ -0,0 +1,687 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <soc/tegra/ivc.h>
+
+#define TEGRA_IVC_ALIGN 64
+
+/*
+ * IVC channel reset protocol.
+ *
+ * Each end uses its tx_channel.state to indicate its synchronization state.
+ */
+enum tegra_ivc_state {
+ /*
+ * This value is zero for backwards compatibility with services that
+ * assume channels to be initially zeroed. Such channels are in an
+ * initially valid state, but cannot be asynchronously reset, and must
+ * maintain a valid state at all times.
+ *
+ * The transmitting end can enter the established state from the sync or
+ * ack state when it observes the receiving endpoint in the ack or
+ * established state, indicating that has cleared the counters in our
+ * rx_channel.
+ */
+ TEGRA_IVC_STATE_ESTABLISHED = 0,
+
+ /*
+ * If an endpoint is observed in the sync state, the remote endpoint is
+ * allowed to clear the counters it owns asynchronously with respect to
+ * the current endpoint. Therefore, the current endpoint is no longer
+ * allowed to communicate.
+ */
+ TEGRA_IVC_STATE_SYNC,
+
+ /*
+ * When the transmitting end observes the receiving end in the sync
+ * state, it can clear the w_count and r_count and transition to the ack
+ * state. If the remote endpoint observes us in the ack state, it can
+ * return to the established state once it has cleared its counters.
+ */
+ TEGRA_IVC_STATE_ACK
+};
+
+/*
+ * This structure is divided into two-cache aligned parts, the first is only
+ * written through the tx.channel pointer, while the second is only written
+ * through the rx.channel pointer. This delineates ownership of the cache
+ * lines, which is critical to performance and necessary in non-cache coherent
+ * implementations.
+ */
+struct tegra_ivc_header {
+ union {
+ struct {
+ /* fields owned by the transmitting end */
+ u32 count;
+ u32 state;
+ };
+
+ u8 pad[TEGRA_IVC_ALIGN];
+ } tx;
+
+ union {
+ /* fields owned by the receiving end */
+ u32 count;
+ u8 pad[TEGRA_IVC_ALIGN];
+ } rx;
+};
+
+static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
+{
+ if (!ivc->peer)
+ return;
+
+ dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
+ DMA_FROM_DEVICE);
+}
+
+static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
+{
+ if (!ivc->peer)
+ return;
+
+ dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
+ DMA_TO_DEVICE);
+}
+
+static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header)
+{
+ /*
+ * This function performs multiple checks on the same values with
+ * security implications, so create snapshots with READ_ONCE() to
+ * ensure that these checks use the same values.
+ */
+ u32 tx = READ_ONCE(header->tx.count);
+ u32 rx = READ_ONCE(header->rx.count);
+
+ /*
+ * Perform an over-full check to prevent denial of service attacks
+ * where a server could be easily fooled into believing that there's
+ * an extremely large number of frames ready, since receivers are not
+ * expected to check for full or over-full conditions.
+ *
+ * Although the channel isn't empty, this is an invalid case caused by
+ * a potentially malicious peer, so returning empty is safer, because
+ * it gives the impression that the channel has gone silent.
+ */
+ if (tx - rx > ivc->num_frames)
+ return true;
+
+ return tx == rx;
+}
+
+static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header)
+{
+ u32 tx = READ_ONCE(header->tx.count);
+ u32 rx = READ_ONCE(header->rx.count);
+
+ /*
+ * Invalid cases where the counters indicate that the queue is over
+ * capacity also appear full.
+ */
+ return tx - rx >= ivc->num_frames;
+}
+
+static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header)
+{
+ u32 tx = READ_ONCE(header->tx.count);
+ u32 rx = READ_ONCE(header->rx.count);
+
+ /*
+ * This function isn't expected to be used in scenarios where an
+ * over-full situation can lead to denial of service attacks. See the
+ * comment in tegra_ivc_empty() for an explanation about special
+ * over-full considerations.
+ */
+ return tx - rx;
+}
+
+static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
+{
+ WRITE_ONCE(ivc->tx.channel->tx.count,
+ READ_ONCE(ivc->tx.channel->tx.count) + 1);
+
+ if (ivc->tx.position == ivc->num_frames - 1)
+ ivc->tx.position = 0;
+ else
+ ivc->tx.position++;
+}
+
+static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
+{
+ WRITE_ONCE(ivc->rx.channel->rx.count,
+ READ_ONCE(ivc->rx.channel->rx.count) + 1);
+
+ if (ivc->rx.position == ivc->num_frames - 1)
+ ivc->rx.position = 0;
+ else
+ ivc->rx.position++;
+}
+
+static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * tx.channel->state is set locally, so it is not synchronized with
+ * state from the remote peer. The remote peer cannot reset its
+ * transmit counters until we've acknowledged its synchronization
+ * request, so no additional synchronization is required because an
+ * asynchronous transition of rx.channel->state to
+ * TEGRA_IVC_STATE_ACK is not allowed.
+ */
+ if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -ECONNRESET;
+
+ /*
+ * Avoid unnecessary invalidations when performing repeated accesses
+ * to an IVC channel by checking the old queue pointers first.
+ *
+ * Synchronization is only necessary when these pointers indicate
+ * empty or full.
+ */
+ if (!tegra_ivc_empty(ivc, ivc->rx.channel))
+ return 0;
+
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
+
+ if (tegra_ivc_empty(ivc, ivc->rx.channel))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
+
+ if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -ECONNRESET;
+
+ if (!tegra_ivc_full(ivc, ivc->tx.channel))
+ return 0;
+
+ tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
+
+ if (tegra_ivc_full(ivc, ivc->tx.channel))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header,
+ unsigned int frame)
+{
+ if (WARN_ON(frame >= ivc->num_frames))
+ return ERR_PTR(-EINVAL);
+
+ return (void *)(header + 1) + ivc->frame_size * frame;
+}
+
+static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame)
+{
+ unsigned long offset;
+
+ offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
+
+ return phys + offset;
+}
+
+static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame,
+ unsigned int offset,
+ size_t size)
+{
+ if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
+ return;
+
+ phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
+
+ dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
+}
+
+static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame,
+ unsigned int offset,
+ size_t size)
+{
+ if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
+ return;
+
+ phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
+
+ dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
+}
+
+/* directly peek at the next frame rx'ed */
+void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc)
+{
+ int err;
+
+ if (WARN_ON(ivc == NULL))
+ return ERR_PTR(-EINVAL);
+
+ err = tegra_ivc_check_read(ivc);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ /*
+ * Order observation of ivc->rx.position potentially indicating new
+ * data before data read.
+ */
+ smp_rmb();
+
+ tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
+ ivc->frame_size);
+
+ return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position);
+}
+EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
+
+int tegra_ivc_read_advance(struct tegra_ivc *ivc)
+{
+ unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
+ unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
+ int err;
+
+ /*
+ * No read barriers or synchronization here: the caller is expected to
+ * have already observed the channel non-empty. This check is just to
+ * catch programming errors.
+ */
+ err = tegra_ivc_check_read(ivc);
+ if (err < 0)
+ return err;
+
+ tegra_ivc_advance_rx(ivc);
+
+ tegra_ivc_flush(ivc, ivc->rx.phys + rx);
+
+ /*
+ * Ensure our write to ivc->rx.position occurs before our read from
+ * ivc->tx.position.
+ */
+ smp_mb();
+
+ /*
+ * Notify only upon transition from full to non-full. The available
+ * count can only asynchronously increase, so the worst possible
+ * side-effect will be a spurious notification.
+ */
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
+
+ if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1)
+ ivc->notify(ivc, ivc->notify_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_read_advance);
+
+/* directly poke at the next frame to be tx'ed */
+void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc)
+{
+ int err;
+
+ err = tegra_ivc_check_write(ivc);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position);
+}
+EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
+
+/* advance the tx buffer */
+int tegra_ivc_write_advance(struct tegra_ivc *ivc)
+{
+ unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
+ unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
+ int err;
+
+ err = tegra_ivc_check_write(ivc);
+ if (err < 0)
+ return err;
+
+ tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
+ ivc->frame_size);
+
+ /*
+ * Order any possible stores to the frame before update of
+ * ivc->tx.position.
+ */
+ smp_wmb();
+
+ tegra_ivc_advance_tx(ivc);
+ tegra_ivc_flush(ivc, ivc->tx.phys + tx);
+
+ /*
+ * Ensure our write to ivc->tx.position occurs before our read from
+ * ivc->rx.position.
+ */
+ smp_mb();
+
+ /*
+ * Notify only upon transition from empty to non-empty. The available
+ * count can only asynchronously decrease, so the worst possible
+ * side-effect will be a spurious notification.
+ */
+ tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
+
+ if (tegra_ivc_available(ivc, ivc->tx.channel) == 1)
+ ivc->notify(ivc, ivc->notify_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_write_advance);
+
+void tegra_ivc_reset(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+ ivc->notify(ivc, ivc->notify_data);
+}
+EXPORT_SYMBOL(tegra_ivc_reset);
+
+/*
+ * =======================================================
+ * IVC State Transition Table - see tegra_ivc_notified()
+ * =======================================================
+ *
+ * local remote action
+ * ----- ------ -----------------------------------
+ * SYNC EST <none>
+ * SYNC ACK reset counters; move to EST; notify
+ * SYNC SYNC reset counters; move to ACK; notify
+ * ACK EST move to EST; notify
+ * ACK ACK move to EST; notify
+ * ACK SYNC reset counters; move to ACK; notify
+ * EST EST <none>
+ * EST ACK <none>
+ * EST SYNC reset counters; move to ACK; notify
+ *
+ * ===============================================================
+ */
+
+int tegra_ivc_notified(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+ enum tegra_ivc_state state;
+
+ /* Copy the receiver's state out of shared memory. */
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
+ state = READ_ONCE(ivc->rx.channel->tx.state);
+
+ if (state == TEGRA_IVC_STATE_SYNC) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * Order observation of TEGRA_IVC_STATE_SYNC before stores
+ * clearing tx.channel.
+ */
+ smp_rmb();
+
+ /*
+ * Reset tx.channel counters. The remote end is in the SYNC
+ * state and won't make progress until we change our state,
+ * so the counters are not in use at this time.
+ */
+ ivc->tx.channel->tx.count = 0;
+ ivc->rx.channel->rx.count = 0;
+
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ /*
+ * Ensure that counters appear cleared before new state can be
+ * observed.
+ */
+ smp_wmb();
+
+ /*
+ * Move to ACK state. We have just cleared our counters, so it
+ * is now safe for the remote end to start using these values.
+ */
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC &&
+ state == TEGRA_IVC_STATE_ACK) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * Order observation of ivc_state_sync before stores clearing
+ * tx_channel.
+ */
+ smp_rmb();
+
+ /*
+ * Reset tx.channel counters. The remote end is in the ACK
+ * state and won't make progress until we change our state,
+ * so the counters are not in use at this time.
+ */
+ ivc->tx.channel->tx.count = 0;
+ ivc->rx.channel->rx.count = 0;
+
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ /*
+ * Ensure that counters appear cleared before new state can be
+ * observed.
+ */
+ smp_wmb();
+
+ /*
+ * Move to ESTABLISHED state. We know that the remote end has
+ * already cleared its counters, so it is safe to start
+ * writing/reading on this channel.
+ */
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * At this point, we have observed the peer to be in either
+ * the ACK or ESTABLISHED state. Next, order observation of
+ * peer state before storing to tx.channel.
+ */
+ smp_rmb();
+
+ /*
+ * Move to ESTABLISHED state. We know that we have previously
+ * cleared our counters, and we know that the remote end has
+ * cleared its counters, so it is safe to start writing/reading
+ * on this channel.
+ */
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else {
+ /*
+ * There is no need to handle any further action. Either the
+ * channel is already fully established, or we are waiting for
+ * the remote end to catch up with our current state. Refer
+ * to the diagram in "IVC State Transition Table" above.
+ */
+ }
+
+ if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -EAGAIN;
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_notified);
+
+size_t tegra_ivc_align(size_t size)
+{
+ return ALIGN(size, TEGRA_IVC_ALIGN);
+}
+EXPORT_SYMBOL(tegra_ivc_align);
+
+unsigned tegra_ivc_total_queue_size(unsigned queue_size)
+{
+ if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
+ pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
+ __func__, queue_size, TEGRA_IVC_ALIGN);
+ return 0;
+ }
+
+ return queue_size + sizeof(struct tegra_ivc_header);
+}
+EXPORT_SYMBOL(tegra_ivc_total_queue_size);
+
+static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
+ unsigned int num_frames, size_t frame_size)
+{
+ BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
+ TEGRA_IVC_ALIGN));
+ BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
+ TEGRA_IVC_ALIGN));
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
+ TEGRA_IVC_ALIGN));
+
+ if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
+ pr_err("num_frames * frame_size overflows\n");
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
+ pr_err("frame size not adequately aligned: %zu\n", frame_size);
+ return -EINVAL;
+ }
+
+ /*
+ * The headers must at least be aligned enough for counters
+ * to be accessed atomically.
+ */
+ if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
+ pr_err("IVC channel start not aligned: %#lx\n", rx);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
+ pr_err("IVC channel start not aligned: %#lx\n", tx);
+ return -EINVAL;
+ }
+
+ if (rx < tx) {
+ if (rx + frame_size * num_frames > tx) {
+ pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
+ rx, frame_size * num_frames, tx);
+ return -EINVAL;
+ }
+ } else {
+ if (tx + frame_size * num_frames > rx) {
+ pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
+ tx, frame_size * num_frames, rx);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
+ dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
+ unsigned int num_frames, size_t frame_size,
+ void (*notify)(struct tegra_ivc *ivc, void *data),
+ void *data)
+{
+ size_t queue_size;
+ int err;
+
+ if (WARN_ON(!ivc || !notify))
+ return -EINVAL;
+
+ /*
+ * All sizes that can be returned by communication functions should
+ * fit in an int.
+ */
+ if (frame_size > INT_MAX)
+ return -E2BIG;
+
+ err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx,
+ num_frames, frame_size);
+ if (err < 0)
+ return err;
+
+ queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
+
+ if (peer) {
+ ivc->rx.phys = dma_map_single(peer, rx, queue_size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(peer, ivc->rx.phys))
+ return -ENOMEM;
+
+ ivc->tx.phys = dma_map_single(peer, tx, queue_size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(peer, ivc->tx.phys)) {
+ dma_unmap_single(peer, ivc->rx.phys, queue_size,
+ DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+ }
+ } else {
+ ivc->rx.phys = rx_phys;
+ ivc->tx.phys = tx_phys;
+ }
+
+ ivc->rx.channel = rx;
+ ivc->tx.channel = tx;
+ ivc->peer = peer;
+ ivc->notify = notify;
+ ivc->notify_data = data;
+ ivc->frame_size = frame_size;
+ ivc->num_frames = num_frames;
+
+ /*
+ * These values aren't necessarily correct until the channel has been
+ * reset.
+ */
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_init);
+
+void tegra_ivc_cleanup(struct tegra_ivc *ivc)
+{
+ if (ivc->peer) {
+ size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
+ ivc->frame_size);
+
+ dma_unmap_single(ivc->peer, ivc->rx.phys, size,
+ DMA_BIDIRECTIONAL);
+ dma_unmap_single(ivc->peer, ivc->tx.phys, size,
+ DMA_BIDIRECTIONAL);
+ }
+}
+EXPORT_SYMBOL(tegra_ivc_cleanup);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
new file mode 100644
index 000000000..b0576cec2
--- /dev/null
+++ b/drivers/firmware/ti_sci.c
@@ -0,0 +1,3513 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments System Control Interface Protocol Driver
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
+ * Nishanth Menon
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/soc/ti/ti-msgmgr.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/reboot.h>
+
+#include "ti_sci.h"
+
+/* List of all TI SCI devices active in system */
+static LIST_HEAD(ti_sci_list);
+/* Protection for the entire list */
+static DEFINE_MUTEX(ti_sci_list_mutex);
+
+/**
+ * struct ti_sci_xfer - Structure representing a message flow
+ * @tx_message: Transmit message
+ * @rx_len: Receive message length
+ * @xfer_buf: Preallocated buffer to store receive message
+ * Since we work with request-ACK protocol, we can
+ * reuse the same buffer for the rx path as we
+ * use for the tx path.
+ * @done: completion event
+ */
+struct ti_sci_xfer {
+ struct ti_msgmgr_message tx_message;
+ u8 rx_len;
+ u8 *xfer_buf;
+ struct completion done;
+};
+
+/**
+ * struct ti_sci_xfers_info - Structure to manage transfer information
+ * @sem_xfer_count: Counting Semaphore for managing max simultaneous
+ * Messages.
+ * @xfer_block: Preallocated Message array
+ * @xfer_alloc_table: Bitmap table for allocated messages.
+ * Index of this bitmap table is also used for message
+ * sequence identifier.
+ * @xfer_lock: Protection for message allocation
+ */
+struct ti_sci_xfers_info {
+ struct semaphore sem_xfer_count;
+ struct ti_sci_xfer *xfer_block;
+ unsigned long *xfer_alloc_table;
+ /* protect transfer allocation */
+ spinlock_t xfer_lock;
+};
+
+/**
+ * struct ti_sci_desc - Description of SoC integration
+ * @default_host_id: Host identifier representing the compute entity
+ * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
+ * @max_msgs: Maximum number of messages that can be pending
+ * simultaneously in the system
+ * @max_msg_size: Maximum size of data per message that can be handled.
+ */
+struct ti_sci_desc {
+ u8 default_host_id;
+ int max_rx_timeout_ms;
+ int max_msgs;
+ int max_msg_size;
+};
+
+/**
+ * struct ti_sci_info - Structure representing a TI SCI instance
+ * @dev: Device pointer
+ * @desc: SoC description for this instance
+ * @nb: Reboot Notifier block
+ * @d: Debugfs file entry
+ * @debug_region: Memory region where the debug message are available
+ * @debug_region_size: Debug region size
+ * @debug_buffer: Buffer allocated to copy debug messages.
+ * @handle: Instance of TI SCI handle to send to clients.
+ * @cl: Mailbox Client
+ * @chan_tx: Transmit mailbox channel
+ * @chan_rx: Receive mailbox channel
+ * @minfo: Message info
+ * @node: list head
+ * @host_id: Host ID
+ * @users: Number of users of this instance
+ */
+struct ti_sci_info {
+ struct device *dev;
+ struct notifier_block nb;
+ const struct ti_sci_desc *desc;
+ struct dentry *d;
+ void __iomem *debug_region;
+ char *debug_buffer;
+ size_t debug_region_size;
+ struct ti_sci_handle handle;
+ struct mbox_client cl;
+ struct mbox_chan *chan_tx;
+ struct mbox_chan *chan_rx;
+ struct ti_sci_xfers_info minfo;
+ struct list_head node;
+ u8 host_id;
+ /* protected by ti_sci_list_mutex */
+ int users;
+
+};
+
+#define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
+#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
+#define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
+
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * ti_sci_debug_show() - Helper to dump the debug log
+ * @s: sequence file pointer
+ * @unused: unused.
+ *
+ * Return: 0
+ */
+static int ti_sci_debug_show(struct seq_file *s, void *unused)
+{
+ struct ti_sci_info *info = s->private;
+
+ memcpy_fromio(info->debug_buffer, info->debug_region,
+ info->debug_region_size);
+ /*
+ * We don't trust firmware to leave NULL terminated last byte (hence
+ * we have allocated 1 extra 0 byte). Since we cannot guarantee any
+ * specific data format for debug messages, We just present the data
+ * in the buffer as is - we expect the messages to be self explanatory.
+ */
+ seq_puts(s, info->debug_buffer);
+ return 0;
+}
+
+/* Provide the log file operations interface*/
+DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
+
+/**
+ * ti_sci_debugfs_create() - Create log debug file
+ * @pdev: platform device pointer
+ * @info: Pointer to SCI entity information
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static int ti_sci_debugfs_create(struct platform_device *pdev,
+ struct ti_sci_info *info)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ char debug_name[50];
+
+ /* Debug region is optional */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "debug_messages");
+ info->debug_region = devm_ioremap_resource(dev, res);
+ if (IS_ERR(info->debug_region))
+ return 0;
+ info->debug_region_size = resource_size(res);
+
+ info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
+ sizeof(char), GFP_KERNEL);
+ if (!info->debug_buffer)
+ return -ENOMEM;
+ /* Setup NULL termination */
+ info->debug_buffer[info->debug_region_size] = 0;
+
+ snprintf(debug_name, sizeof(debug_name), "ti_sci_debug@%s",
+ dev_name(dev));
+ info->d = debugfs_create_file(debug_name, 0444, NULL, info,
+ &ti_sci_debug_fops);
+ if (IS_ERR(info->d))
+ return PTR_ERR(info->d);
+
+ dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
+ info->debug_region, info->debug_region_size, res);
+ return 0;
+}
+
+#else /* CONFIG_DEBUG_FS */
+static inline int ti_sci_debugfs_create(struct platform_device *dev,
+ struct ti_sci_info *info)
+{
+ return 0;
+}
+
+static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
+ struct ti_sci_info *info)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * ti_sci_dump_header_dbg() - Helper to dump a message header.
+ * @dev: Device pointer corresponding to the SCI entity
+ * @hdr: pointer to header.
+ */
+static inline void ti_sci_dump_header_dbg(struct device *dev,
+ struct ti_sci_msg_hdr *hdr)
+{
+ dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
+ hdr->type, hdr->host, hdr->seq, hdr->flags);
+}
+
+/**
+ * ti_sci_rx_callback() - mailbox client callback for receive messages
+ * @cl: client pointer
+ * @m: mailbox message
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
+{
+ struct ti_sci_info *info = cl_to_ti_sci_info(cl);
+ struct device *dev = info->dev;
+ struct ti_sci_xfers_info *minfo = &info->minfo;
+ struct ti_msgmgr_message *mbox_msg = m;
+ struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
+ struct ti_sci_xfer *xfer;
+ u8 xfer_id;
+
+ xfer_id = hdr->seq;
+
+ /*
+ * Are we even expecting this?
+ * NOTE: barriers were implicit in locks used for modifying the bitmap
+ */
+ if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
+ dev_err(dev, "Message for %d is not expected!\n", xfer_id);
+ return;
+ }
+
+ xfer = &minfo->xfer_block[xfer_id];
+
+ /* Is the message of valid length? */
+ if (mbox_msg->len > info->desc->max_msg_size) {
+ dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
+ mbox_msg->len, info->desc->max_msg_size);
+ ti_sci_dump_header_dbg(dev, hdr);
+ return;
+ }
+ if (mbox_msg->len < xfer->rx_len) {
+ dev_err(dev, "Recv xfer %zu < expected %d length\n",
+ mbox_msg->len, xfer->rx_len);
+ ti_sci_dump_header_dbg(dev, hdr);
+ return;
+ }
+
+ ti_sci_dump_header_dbg(dev, hdr);
+ /* Take a copy to the rx buffer.. */
+ memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
+ complete(&xfer->done);
+}
+
+/**
+ * ti_sci_get_one_xfer() - Allocate one message
+ * @info: Pointer to SCI entity information
+ * @msg_type: Message type
+ * @msg_flags: Flag to set for the message
+ * @tx_message_size: transmit message size
+ * @rx_message_size: receive message size
+ *
+ * Helper function which is used by various command functions that are
+ * exposed to clients of this driver for allocating a message traffic event.
+ *
+ * This function can sleep depending on pending requests already in the system
+ * for the SCI entity. Further, this also holds a spinlock to maintain integrity
+ * of internal data structures.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
+ u16 msg_type, u32 msg_flags,
+ size_t tx_message_size,
+ size_t rx_message_size)
+{
+ struct ti_sci_xfers_info *minfo = &info->minfo;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_msg_hdr *hdr;
+ unsigned long flags;
+ unsigned long bit_pos;
+ u8 xfer_id;
+ int ret;
+ int timeout;
+
+ /* Ensure we have sane transfer sizes */
+ if (rx_message_size > info->desc->max_msg_size ||
+ tx_message_size > info->desc->max_msg_size ||
+ rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
+ return ERR_PTR(-ERANGE);
+
+ /*
+ * Ensure we have only controlled number of pending messages.
+ * Ideally, we might just have to wait a single message, be
+ * conservative and wait 5 times that..
+ */
+ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
+ ret = down_timeout(&minfo->sem_xfer_count, timeout);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Keep the locked section as small as possible */
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
+ info->desc->max_msgs);
+ set_bit(bit_pos, minfo->xfer_alloc_table);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ /*
+ * We already ensured in probe that we can have max messages that can
+ * fit in hdr.seq - NOTE: this improves access latencies
+ * to predictable O(1) access, BUT, it opens us to risk if
+ * remote misbehaves with corrupted message sequence responses.
+ * If that happens, we are going to be messed up anyways..
+ */
+ xfer_id = (u8)bit_pos;
+
+ xfer = &minfo->xfer_block[xfer_id];
+
+ hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+ xfer->tx_message.len = tx_message_size;
+ xfer->rx_len = (u8)rx_message_size;
+
+ reinit_completion(&xfer->done);
+
+ hdr->seq = xfer_id;
+ hdr->type = msg_type;
+ hdr->host = info->host_id;
+ hdr->flags = msg_flags;
+
+ return xfer;
+}
+
+/**
+ * ti_sci_put_one_xfer() - Release a message
+ * @minfo: transfer info pointer
+ * @xfer: message that was reserved by ti_sci_get_one_xfer
+ *
+ * This holds a spinlock to maintain integrity of internal data structures.
+ */
+static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
+ struct ti_sci_xfer *xfer)
+{
+ unsigned long flags;
+ struct ti_sci_msg_hdr *hdr;
+ u8 xfer_id;
+
+ hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+ xfer_id = hdr->seq;
+
+ /*
+ * Keep the locked section as small as possible
+ * NOTE: we might escape with smp_mb and no lock here..
+ * but just be conservative and symmetric.
+ */
+ spin_lock_irqsave(&minfo->xfer_lock, flags);
+ clear_bit(xfer_id, minfo->xfer_alloc_table);
+ spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+ /* Increment the count for the next user to get through */
+ up(&minfo->sem_xfer_count);
+}
+
+/**
+ * ti_sci_do_xfer() - Do one transfer
+ * @info: Pointer to SCI entity information
+ * @xfer: Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no response, if transmit error,
+ * return corresponding error, else if all goes well,
+ * return 0.
+ */
+static inline int ti_sci_do_xfer(struct ti_sci_info *info,
+ struct ti_sci_xfer *xfer)
+{
+ int ret;
+ int timeout;
+ struct device *dev = info->dev;
+
+ ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
+ if (ret < 0)
+ return ret;
+
+ ret = 0;
+
+ /* And we wait for the response. */
+ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
+ if (!wait_for_completion_timeout(&xfer->done, timeout)) {
+ dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
+ (void *)_RET_IP_);
+ ret = -ETIMEDOUT;
+ }
+ /*
+ * NOTE: we might prefer not to need the mailbox ticker to manage the
+ * transfer queueing since the protocol layer queues things by itself.
+ * Unfortunately, we have to kick the mailbox framework after we have
+ * received our message.
+ */
+ mbox_client_txdone(info->chan_tx, ret);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
+ * @info: Pointer to SCI entity information
+ *
+ * Updates the SCI information in the internal data structure.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
+{
+ struct device *dev = info->dev;
+ struct ti_sci_handle *handle = &info->handle;
+ struct ti_sci_version_info *ver = &handle->version;
+ struct ti_sci_msg_resp_version *rev_info;
+ struct ti_sci_xfer *xfer;
+ int ret;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(struct ti_sci_msg_hdr),
+ sizeof(*rev_info));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ ver->abi_major = rev_info->abi_major;
+ ver->abi_minor = rev_info->abi_minor;
+ ver->firmware_revision = rev_info->firmware_revision;
+ strncpy(ver->firmware_description, rev_info->firmware_description,
+ sizeof(ver->firmware_description));
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ return ret;
+}
+
+/**
+ * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
+ * @r: pointer to response buffer
+ *
+ * Return: true if the response was an ACK, else returns false.
+ */
+static inline bool ti_sci_is_response_ack(void *r)
+{
+ struct ti_sci_msg_hdr *hdr = r;
+
+ return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
+}
+
+/**
+ * ti_sci_set_device_state() - Set device state helper
+ * @handle: pointer to TI SCI handle
+ * @id: Device identifier
+ * @flags: flags to setup for the device
+ * @state: State to move the device to
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
+ u32 id, u32 flags, u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_device_state *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
+ flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
+ req->id = id;
+ req->state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_get_device_state() - Get device state helper
+ * @handle: Handle to the device
+ * @id: Device Identifier
+ * @clcnt: Pointer to Context Loss Count
+ * @resets: pointer to resets
+ * @p_state: pointer to p_state
+ * @c_state: pointer to c_state
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
+ u32 id, u32 *clcnt, u32 *resets,
+ u8 *p_state, u8 *c_state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_device_state *req;
+ struct ti_sci_msg_resp_get_device_state *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ if (!clcnt && !resets && !p_state && !c_state)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
+ req->id = id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (clcnt)
+ *clcnt = resp->context_loss_count;
+ if (resets)
+ *resets = resp->resets;
+ if (p_state)
+ *p_state = resp->programmed_state;
+ if (c_state)
+ *c_state = resp->current_state;
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_device() - command to request for device managed by TISCI
+ * that can be shared with other hosts.
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
+{
+ return ti_sci_set_device_state(handle, id, 0,
+ MSG_DEVICE_SW_STATE_ON);
+}
+
+/**
+ * ti_sci_cmd_get_device_exclusive() - command to request for device managed by
+ * TISCI that is exclusively owned by the
+ * requesting host.
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
+ u32 id)
+{
+ return ti_sci_set_device_state(handle, id,
+ MSG_FLAG_DEVICE_EXCLUSIVE,
+ MSG_DEVICE_SW_STATE_ON);
+}
+
+/**
+ * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
+{
+ return ti_sci_set_device_state(handle, id, 0,
+ MSG_DEVICE_SW_STATE_RETENTION);
+}
+
+/**
+ * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
+ * TISCI that is exclusively owned by
+ * requesting host.
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
+ u32 id)
+{
+ return ti_sci_set_device_state(handle, id,
+ MSG_FLAG_DEVICE_EXCLUSIVE,
+ MSG_DEVICE_SW_STATE_RETENTION);
+}
+
+/**
+ * ti_sci_cmd_put_device() - command to release a device managed by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
+{
+ return ti_sci_set_device_state(handle, id,
+ 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
+}
+
+/**
+ * ti_sci_cmd_dev_is_valid() - Is the device valid
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ *
+ * Return: 0 if all went fine and the device ID is valid, else return
+ * appropriate error.
+ */
+static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
+{
+ u8 unused;
+
+ /* check the device state which will also tell us if the ID is valid */
+ return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
+}
+
+/**
+ * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @count: Pointer to Context Loss counter to populate
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
+ u32 *count)
+{
+ return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
+}
+
+/**
+ * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @r_state: true if requested to be idle
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
+ bool *r_state)
+{
+ int ret;
+ u8 state;
+
+ if (!r_state)
+ return -EINVAL;
+
+ ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
+ if (ret)
+ return ret;
+
+ *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @r_state: true if requested to be stopped
+ * @curr_state: true if currently stopped.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
+ bool *r_state, bool *curr_state)
+{
+ int ret;
+ u8 p_state, c_state;
+
+ if (!r_state && !curr_state)
+ return -EINVAL;
+
+ ret =
+ ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (r_state)
+ *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
+ if (curr_state)
+ *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @r_state: true if requested to be ON
+ * @curr_state: true if currently ON and active
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
+ bool *r_state, bool *curr_state)
+{
+ int ret;
+ u8 p_state, c_state;
+
+ if (!r_state && !curr_state)
+ return -EINVAL;
+
+ ret =
+ ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (r_state)
+ *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
+ if (curr_state)
+ *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @curr_state: true if currently transitioning.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
+ bool *curr_state)
+{
+ int ret;
+ u8 state;
+
+ if (!curr_state)
+ return -EINVAL;
+
+ ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
+ if (ret)
+ return ret;
+
+ *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
+
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_set_device_resets() - command to set resets for device managed
+ * by TISCI
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id: Device Identifier
+ * @reset_state: Device specific reset bit field
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
+ u32 id, u32 reset_state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_device_resets *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
+ req->id = id;
+ req->resets = reset_state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_device_resets() - Get reset state for device managed
+ * by TISCI
+ * @handle: Pointer to TISCI handle
+ * @id: Device Identifier
+ * @reset_state: Pointer to reset state to populate
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
+ u32 id, u32 *reset_state)
+{
+ return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
+ NULL);
+}
+
+/**
+ * ti_sci_set_clock_state() - Set clock state helper
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @flags: Header flags as needed
+ * @state: State to request for the clock.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id,
+ u32 flags, u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_clock_state *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
+ flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ if (clk_id < 255) {
+ req->clk_id = clk_id;
+ } else {
+ req->clk_id = 255;
+ req->clk_id_32 = clk_id;
+ }
+ req->request_state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_clock_state() - Get clock state helper
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @programmed_state: State requested for clock to move to
+ * @current_state: State that the clock is currently in
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id,
+ u8 *programmed_state, u8 *current_state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_state *req;
+ struct ti_sci_msg_resp_get_clock_state *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ if (!programmed_state && !current_state)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ if (clk_id < 255) {
+ req->clk_id = clk_id;
+ } else {
+ req->clk_id = 255;
+ req->clk_id_32 = clk_id;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (programmed_state)
+ *programmed_state = resp->programmed_state;
+ if (current_state)
+ *current_state = resp->current_state;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
+ * @can_change_freq: 'true' if frequency change is desired, else 'false'
+ * @enable_input_term: 'true' if input termination is desired, else 'false'
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
+ u32 clk_id, bool needs_ssc,
+ bool can_change_freq, bool enable_input_term)
+{
+ u32 flags = 0;
+
+ flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
+ flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
+ flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
+
+ return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
+ MSG_CLOCK_SW_STATE_REQ);
+}
+
+/**
+ * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ *
+ * NOTE: This clock must have been requested by get_clock previously.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id)
+{
+ return ti_sci_set_clock_state(handle, dev_id, clk_id,
+ MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
+ MSG_CLOCK_SW_STATE_UNREQ);
+}
+
+/**
+ * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ *
+ * NOTE: This clock must have been requested by get_clock previously.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id)
+{
+ return ti_sci_set_clock_state(handle, dev_id, clk_id,
+ MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
+ MSG_CLOCK_SW_STATE_AUTO);
+}
+
+/**
+ * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @req_state: state indicating if the clock is auto managed
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id, bool *req_state)
+{
+ u8 state = 0;
+ int ret;
+
+ if (!req_state)
+ return -EINVAL;
+
+ ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
+ if (ret)
+ return ret;
+
+ *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_is_on() - Is the clock ON
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @req_state: state indicating if the clock is managed by us and enabled
+ * @curr_state: state indicating if the clock is ready for operation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
+ u32 clk_id, bool *req_state, bool *curr_state)
+{
+ u8 c_state = 0, r_state = 0;
+ int ret;
+
+ if (!req_state && !curr_state)
+ return -EINVAL;
+
+ ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
+ &r_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (req_state)
+ *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
+ if (curr_state)
+ *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_is_off() - Is the clock OFF
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @req_state: state indicating if the clock is managed by us and disabled
+ * @curr_state: state indicating if the clock is NOT ready for operation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
+ u32 clk_id, bool *req_state, bool *curr_state)
+{
+ u8 c_state = 0, r_state = 0;
+ int ret;
+
+ if (!req_state && !curr_state)
+ return -EINVAL;
+
+ ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
+ &r_state, &c_state);
+ if (ret)
+ return ret;
+
+ if (req_state)
+ *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
+ if (curr_state)
+ *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
+ return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @parent_id: Parent clock identifier to set
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id, u32 parent_id)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_clock_parent *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ if (clk_id < 255) {
+ req->clk_id = clk_id;
+ } else {
+ req->clk_id = 255;
+ req->clk_id_32 = clk_id;
+ }
+ if (parent_id < 255) {
+ req->parent_id = parent_id;
+ } else {
+ req->parent_id = 255;
+ req->parent_id_32 = parent_id;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_parent() - Get current parent clock source
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @parent_id: Current clock parent
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id, u32 *parent_id)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_parent *req;
+ struct ti_sci_msg_resp_get_clock_parent *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !parent_id)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ if (clk_id < 255) {
+ req->clk_id = clk_id;
+ } else {
+ req->clk_id = 255;
+ req->clk_id_32 = clk_id;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ } else {
+ if (resp->parent_id < 255)
+ *parent_id = resp->parent_id;
+ else
+ *parent_id = resp->parent_id_32;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @num_parents: Returns he number of parents to the current clock.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id,
+ u32 *num_parents)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_num_parents *req;
+ struct ti_sci_msg_resp_get_clock_num_parents *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !num_parents)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ if (clk_id < 255) {
+ req->clk_id = clk_id;
+ } else {
+ req->clk_id = 255;
+ req->clk_id_32 = clk_id;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ } else {
+ if (resp->num_parents < 255)
+ *num_parents = resp->num_parents;
+ else
+ *num_parents = resp->num_parents_32;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @min_freq: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq: The target clock frequency in Hz. A frequency will be
+ * processed as close to this target frequency as possible.
+ * @max_freq: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @match_freq: Frequency match in Hz response.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id, u64 min_freq,
+ u64 target_freq, u64 max_freq,
+ u64 *match_freq)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_query_clock_freq *req;
+ struct ti_sci_msg_resp_query_clock_freq *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !match_freq)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ if (clk_id < 255) {
+ req->clk_id = clk_id;
+ } else {
+ req->clk_id = 255;
+ req->clk_id_32 = clk_id;
+ }
+ req->min_freq_hz = min_freq;
+ req->target_freq_hz = target_freq;
+ req->max_freq_hz = max_freq;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ *match_freq = resp->freq_hz;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @min_freq: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq: The target clock frequency in Hz. A frequency will be
+ * processed as close to this target frequency as possible.
+ * @max_freq: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id, u64 min_freq,
+ u64 target_freq, u64 max_freq)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_clock_freq *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ if (clk_id < 255) {
+ req->clk_id = clk_id;
+ } else {
+ req->clk_id = 255;
+ req->clk_id_32 = clk_id;
+ }
+ req->min_freq_hz = min_freq;
+ req->target_freq_hz = target_freq;
+ req->max_freq_hz = max_freq;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_freq() - Get current frequency
+ * @handle: pointer to TI SCI handle
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * @freq: Currently frequency in Hz
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
+ u32 dev_id, u32 clk_id, u64 *freq)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_get_clock_freq *req;
+ struct ti_sci_msg_resp_get_clock_freq *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle || !freq)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
+ req->dev_id = dev_id;
+ if (clk_id < 255) {
+ req->clk_id = clk_id;
+ } else {
+ req->clk_id = 255;
+ req->clk_id_32 = clk_id;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ *freq = resp->freq_hz;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_reboot *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+ else
+ ret = 0;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_get_resource_range - Helper to get a range of resources assigned
+ * to a host. Resource is uniquely identified by
+ * type and subtype.
+ * @handle: Pointer to TISCI handle.
+ * @dev_id: TISCI device ID.
+ * @subtype: Resource assignment subtype that is being requested
+ * from the given device.
+ * @s_host: Host processor ID to which the resources are allocated
+ * @range_start: Start index of the resource range
+ * @range_num: Number of resources in the range
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 subtype, u8 s_host,
+ u16 *range_start, u16 *range_num)
+{
+ struct ti_sci_msg_resp_get_resource_range *resp;
+ struct ti_sci_msg_req_get_resource_range *req;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
+ req->secondary_host = s_host;
+ req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
+ req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ } else if (!resp->range_start && !resp->range_num) {
+ ret = -ENODEV;
+ } else {
+ *range_start = resp->range_start;
+ *range_num = resp->range_num;
+ };
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
+ * that is same as ti sci interface host.
+ * @handle: Pointer to TISCI handle.
+ * @dev_id: TISCI device ID.
+ * @subtype: Resource assignment subtype that is being requested
+ * from the given device.
+ * @range_start: Start index of the resource range
+ * @range_num: Number of resources in the range
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 subtype,
+ u16 *range_start, u16 *range_num)
+{
+ return ti_sci_get_resource_range(handle, dev_id, subtype,
+ TI_SCI_IRQ_SECONDARY_HOST_INVALID,
+ range_start, range_num);
+}
+
+/**
+ * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
+ * assigned to a specified host.
+ * @handle: Pointer to TISCI handle.
+ * @dev_id: TISCI device ID.
+ * @subtype: Resource assignment subtype that is being requested
+ * from the given device.
+ * @s_host: Host processor ID to which the resources are allocated
+ * @range_start: Start index of the resource range
+ * @range_num: Number of resources in the range
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static
+int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 subtype, u8 s_host,
+ u16 *range_start, u16 *range_num)
+{
+ return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
+ range_start, range_num);
+}
+
+/**
+ * ti_sci_manage_irq() - Helper api to configure/release the irq route between
+ * the requested source and destination
+ * @handle: Pointer to TISCI handle.
+ * @valid_params: Bit fields defining the validity of certain params
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ * @s_host: Secondary host ID to which the irq/event is being
+ * requested for.
+ * @type: Request type irq set or release.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
+ u32 valid_params, u16 src_id, u16 src_index,
+ u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
+ u16 global_event, u8 vint_status_bit, u8 s_host,
+ u16 type)
+{
+ struct ti_sci_msg_req_manage_irq *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
+ req->valid_params = valid_params;
+ req->src_id = src_id;
+ req->src_index = src_index;
+ req->dst_id = dst_id;
+ req->dst_host_irq = dst_host_irq;
+ req->ia_id = ia_id;
+ req->vint = vint;
+ req->global_event = global_event;
+ req->vint_status_bit = vint_status_bit;
+ req->secondary_host = s_host;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_set_irq() - Helper api to configure the irq route between the
+ * requested source and destination
+ * @handle: Pointer to TISCI handle.
+ * @valid_params: Bit fields defining the validity of certain params
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ * @s_host: Secondary host ID to which the irq/event is being
+ * requested for.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
+ u16 src_id, u16 src_index, u16 dst_id,
+ u16 dst_host_irq, u16 ia_id, u16 vint,
+ u16 global_event, u8 vint_status_bit, u8 s_host)
+{
+ pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
+ __func__, valid_params, src_id, src_index,
+ dst_id, dst_host_irq, ia_id, vint, global_event,
+ vint_status_bit);
+
+ return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
+ dst_id, dst_host_irq, ia_id, vint,
+ global_event, vint_status_bit, s_host,
+ TI_SCI_MSG_SET_IRQ);
+}
+
+/**
+ * ti_sci_free_irq() - Helper api to free the irq route between the
+ * requested source and destination
+ * @handle: Pointer to TISCI handle.
+ * @valid_params: Bit fields defining the validity of certain params
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ * @s_host: Secondary host ID to which the irq/event is being
+ * requested for.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
+ u16 src_id, u16 src_index, u16 dst_id,
+ u16 dst_host_irq, u16 ia_id, u16 vint,
+ u16 global_event, u8 vint_status_bit, u8 s_host)
+{
+ pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
+ __func__, valid_params, src_id, src_index,
+ dst_id, dst_host_irq, ia_id, vint, global_event,
+ vint_status_bit);
+
+ return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
+ dst_id, dst_host_irq, ia_id, vint,
+ global_event, vint_status_bit, s_host,
+ TI_SCI_MSG_FREE_IRQ);
+}
+
+/**
+ * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
+ * source and destination.
+ * @handle: Pointer to TISCI handle.
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @vint_irq: Boolean specifying if this interrupt belongs to
+ * Interrupt Aggregator.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
+ u16 src_index, u16 dst_id, u16 dst_host_irq)
+{
+ u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
+
+ return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
+ dst_host_irq, 0, 0, 0, 0, 0);
+}
+
+/**
+ * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
+ * requested source and Interrupt Aggregator.
+ * @handle: Pointer to TISCI handle.
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
+ u16 src_id, u16 src_index, u16 ia_id,
+ u16 vint, u16 global_event,
+ u8 vint_status_bit)
+{
+ u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
+ MSG_FLAG_GLB_EVNT_VALID |
+ MSG_FLAG_VINT_STS_BIT_VALID;
+
+ return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
+ ia_id, vint, global_event, vint_status_bit, 0);
+}
+
+/**
+ * ti_sci_cmd_free_irq() - Free a host irq route between the between the
+ * requested source and destination.
+ * @handle: Pointer to TISCI handle.
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @vint_irq: Boolean specifying if this interrupt belongs to
+ * Interrupt Aggregator.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
+ u16 src_index, u16 dst_id, u16 dst_host_irq)
+{
+ u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
+
+ return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
+ dst_host_irq, 0, 0, 0, 0, 0);
+}
+
+/**
+ * ti_sci_cmd_free_event_map() - Free an event map between the requested source
+ * and Interrupt Aggregator.
+ * @handle: Pointer to TISCI handle.
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
+ u16 src_id, u16 src_index, u16 ia_id,
+ u16 vint, u16 global_event,
+ u8 vint_status_bit)
+{
+ u32 valid_params = MSG_FLAG_IA_ID_VALID |
+ MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
+ MSG_FLAG_VINT_STS_BIT_VALID;
+
+ return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
+ ia_id, vint, global_event, vint_status_bit, 0);
+}
+
+/**
+ * ti_sci_cmd_ring_config() - configure RA ring
+ * @handle: Pointer to TI SCI handle.
+ * @valid_params: Bitfield defining validity of ring configuration
+ * parameters
+ * @nav_id: Device ID of Navigator Subsystem from which the ring is
+ * allocated
+ * @index: Ring index
+ * @addr_lo: The ring base address lo 32 bits
+ * @addr_hi: The ring base address hi 32 bits
+ * @count: Number of ring elements
+ * @mode: The mode of the ring
+ * @size: The ring element size.
+ * @order_id: Specifies the ring's bus order ID
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_ring_cfg_req for more info.
+ */
+static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
+ u32 valid_params, u16 nav_id, u16 index,
+ u32 addr_lo, u32 addr_hi, u32 count,
+ u8 mode, u8 size, u8 order_id)
+{
+ struct ti_sci_msg_rm_ring_cfg_req *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(handle))
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
+ req->valid_params = valid_params;
+ req->nav_id = nav_id;
+ req->index = index;
+ req->addr_lo = addr_lo;
+ req->addr_hi = addr_hi;
+ req->count = count;
+ req->mode = mode;
+ req->size = size;
+ req->order_id = order_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret);
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_ring_get_config() - get RA ring configuration
+ * @handle: Pointer to TI SCI handle.
+ * @nav_id: Device ID of Navigator Subsystem from which the ring is
+ * allocated
+ * @index: Ring index
+ * @addr_lo: Returns ring's base address lo 32 bits
+ * @addr_hi: Returns ring's base address hi 32 bits
+ * @count: Returns number of ring elements
+ * @mode: Returns mode of the ring
+ * @size: Returns ring element size
+ * @order_id: Returns ring's bus order ID
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
+ */
+static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
+ u32 nav_id, u32 index, u8 *mode,
+ u32 *addr_lo, u32 *addr_hi,
+ u32 *count, u8 *size, u8 *order_id)
+{
+ struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
+ struct ti_sci_msg_rm_ring_get_cfg_req *req;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(handle))
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev,
+ "RM_RA:Message get config failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
+ req->nav_id = nav_id;
+ req->index = index;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ } else {
+ if (mode)
+ *mode = resp->mode;
+ if (addr_lo)
+ *addr_lo = resp->addr_lo;
+ if (addr_hi)
+ *addr_hi = resp->addr_hi;
+ if (count)
+ *count = resp->count;
+ if (size)
+ *size = resp->size;
+ if (order_id)
+ *order_id = resp->order_id;
+ };
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
+ * @handle: Pointer to TI SCI handle.
+ * @nav_id: Device ID of Navigator Subsystem which should be used for
+ * pairing
+ * @src_thread: Source PSI-L thread ID
+ * @dst_thread: Destination PSI-L thread ID
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
+ u32 nav_id, u32 src_thread, u32 dst_thread)
+{
+ struct ti_sci_msg_psil_pair *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
+ req->nav_id = nav_id;
+ req->src_thread = src_thread;
+ req->dst_thread = dst_thread;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
+ * @handle: Pointer to TI SCI handle.
+ * @nav_id: Device ID of Navigator Subsystem which should be used for
+ * unpairing
+ * @src_thread: Source PSI-L thread ID
+ * @dst_thread: Destination PSI-L thread ID
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
+ u32 nav_id, u32 src_thread, u32 dst_thread)
+{
+ struct ti_sci_msg_psil_unpair *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
+ req->nav_id = nav_id;
+ req->src_thread = src_thread;
+ req->dst_thread = dst_thread;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
+ * @handle: Pointer to TI SCI handle.
+ * @params: Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
+ * structure
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
+ * more info.
+ */
+static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
+{
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(handle))
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
+ req->valid_params = params->valid_params;
+ req->nav_id = params->nav_id;
+ req->index = params->index;
+ req->tx_pause_on_err = params->tx_pause_on_err;
+ req->tx_filt_einfo = params->tx_filt_einfo;
+ req->tx_filt_pswords = params->tx_filt_pswords;
+ req->tx_atype = params->tx_atype;
+ req->tx_chan_type = params->tx_chan_type;
+ req->tx_supr_tdpkt = params->tx_supr_tdpkt;
+ req->tx_fetch_size = params->tx_fetch_size;
+ req->tx_credit_count = params->tx_credit_count;
+ req->txcq_qnum = params->txcq_qnum;
+ req->tx_priority = params->tx_priority;
+ req->tx_qos = params->tx_qos;
+ req->tx_orderid = params->tx_orderid;
+ req->fdepth = params->fdepth;
+ req->tx_sched_priority = params->tx_sched_priority;
+ req->tx_burst_size = params->tx_burst_size;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
+ * @handle: Pointer to TI SCI handle.
+ * @params: Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
+ * structure
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
+ * more info.
+ */
+static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
+{
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(handle))
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
+ req->valid_params = params->valid_params;
+ req->nav_id = params->nav_id;
+ req->index = params->index;
+ req->rx_fetch_size = params->rx_fetch_size;
+ req->rxcq_qnum = params->rxcq_qnum;
+ req->rx_priority = params->rx_priority;
+ req->rx_qos = params->rx_qos;
+ req->rx_orderid = params->rx_orderid;
+ req->rx_sched_priority = params->rx_sched_priority;
+ req->flowid_start = params->flowid_start;
+ req->flowid_cnt = params->flowid_cnt;
+ req->rx_pause_on_err = params->rx_pause_on_err;
+ req->rx_atype = params->rx_atype;
+ req->rx_chan_type = params->rx_chan_type;
+ req->rx_ignore_short = params->rx_ignore_short;
+ req->rx_ignore_long = params->rx_ignore_long;
+ req->rx_burst_size = params->rx_burst_size;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
+ * @handle: Pointer to TI SCI handle.
+ * @params: Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
+ * structure
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ *
+ * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
+ * more info.
+ */
+static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_udmap_flow_cfg *params)
+{
+ struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(handle))
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
+ req->valid_params = params->valid_params;
+ req->nav_id = params->nav_id;
+ req->flow_index = params->flow_index;
+ req->rx_einfo_present = params->rx_einfo_present;
+ req->rx_psinfo_present = params->rx_psinfo_present;
+ req->rx_error_handling = params->rx_error_handling;
+ req->rx_desc_type = params->rx_desc_type;
+ req->rx_sop_offset = params->rx_sop_offset;
+ req->rx_dest_qnum = params->rx_dest_qnum;
+ req->rx_src_tag_hi = params->rx_src_tag_hi;
+ req->rx_src_tag_lo = params->rx_src_tag_lo;
+ req->rx_dest_tag_hi = params->rx_dest_tag_hi;
+ req->rx_dest_tag_lo = params->rx_dest_tag_lo;
+ req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
+ req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
+ req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
+ req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
+ req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
+ req->rx_fdq1_qnum = params->rx_fdq1_qnum;
+ req->rx_fdq2_qnum = params->rx_fdq2_qnum;
+ req->rx_fdq3_qnum = params->rx_fdq3_qnum;
+ req->rx_ps_location = params->rx_ps_location;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+ dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_request() - Command to request a physical processor control
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
+ u8 proc_id)
+{
+ struct ti_sci_msg_req_proc_request *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_release() - Command to release a physical processor control
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
+ u8 proc_id)
+{
+ struct ti_sci_msg_req_proc_release *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_handover() - Command to handover a physical processor
+ * control to a host in the processor's access
+ * control list.
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ * @host_id: Host ID to get the control of the processor
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
+ u8 proc_id, u8 host_id)
+{
+ struct ti_sci_msg_req_proc_handover *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+ req->host_id = host_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_set_config() - Command to set the processor boot
+ * configuration flags
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ * @config_flags_set: Configuration flags to be set
+ * @config_flags_clear: Configuration flags to be cleared.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
+ u8 proc_id, u64 bootvector,
+ u32 config_flags_set,
+ u32 config_flags_clear)
+{
+ struct ti_sci_msg_req_set_config *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+ req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
+ req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
+ TI_SCI_ADDR_HIGH_SHIFT;
+ req->config_flags_set = config_flags_set;
+ req->config_flags_clear = config_flags_clear;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_proc_set_control() - Command to set the processor boot
+ * control flags
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ * @control_flags_set: Control flags to be set
+ * @control_flags_clear: Control flags to be cleared
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
+ u8 proc_id, u32 control_flags_set,
+ u32 control_flags_clear)
+{
+ struct ti_sci_msg_req_set_ctrl *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+ req->control_flags_set = control_flags_set;
+ req->control_flags_clear = control_flags_clear;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_boot_status() - Command to get the processor boot status
+ * @handle: Pointer to TI SCI handle
+ * @proc_id: Processor ID this request is for
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
+ u8 proc_id, u64 *bv, u32 *cfg_flags,
+ u32 *ctrl_flags, u32 *sts_flags)
+{
+ struct ti_sci_msg_resp_get_status *resp;
+ struct ti_sci_msg_req_get_status *req;
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (!handle)
+ return -EINVAL;
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
+ req->processor_id = proc_id;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ } else {
+ *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
+ (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
+ TI_SCI_ADDR_HIGH_MASK);
+ *cfg_flags = resp->config_flags;
+ *ctrl_flags = resp->control_flags;
+ *sts_flags = resp->status_flags;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/*
+ * ti_sci_setup_ops() - Setup the operations structures
+ * @info: pointer to TISCI pointer
+ */
+static void ti_sci_setup_ops(struct ti_sci_info *info)
+{
+ struct ti_sci_ops *ops = &info->handle.ops;
+ struct ti_sci_core_ops *core_ops = &ops->core_ops;
+ struct ti_sci_dev_ops *dops = &ops->dev_ops;
+ struct ti_sci_clk_ops *cops = &ops->clk_ops;
+ struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
+ struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
+ struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
+ struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
+ struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
+ struct ti_sci_proc_ops *pops = &ops->proc_ops;
+
+ core_ops->reboot_device = ti_sci_cmd_core_reboot;
+
+ dops->get_device = ti_sci_cmd_get_device;
+ dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
+ dops->idle_device = ti_sci_cmd_idle_device;
+ dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
+ dops->put_device = ti_sci_cmd_put_device;
+
+ dops->is_valid = ti_sci_cmd_dev_is_valid;
+ dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
+ dops->is_idle = ti_sci_cmd_dev_is_idle;
+ dops->is_stop = ti_sci_cmd_dev_is_stop;
+ dops->is_on = ti_sci_cmd_dev_is_on;
+ dops->is_transitioning = ti_sci_cmd_dev_is_trans;
+ dops->set_device_resets = ti_sci_cmd_set_device_resets;
+ dops->get_device_resets = ti_sci_cmd_get_device_resets;
+
+ cops->get_clock = ti_sci_cmd_get_clock;
+ cops->idle_clock = ti_sci_cmd_idle_clock;
+ cops->put_clock = ti_sci_cmd_put_clock;
+ cops->is_auto = ti_sci_cmd_clk_is_auto;
+ cops->is_on = ti_sci_cmd_clk_is_on;
+ cops->is_off = ti_sci_cmd_clk_is_off;
+
+ cops->set_parent = ti_sci_cmd_clk_set_parent;
+ cops->get_parent = ti_sci_cmd_clk_get_parent;
+ cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
+
+ cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
+ cops->set_freq = ti_sci_cmd_clk_set_freq;
+ cops->get_freq = ti_sci_cmd_clk_get_freq;
+
+ rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
+ rm_core_ops->get_range_from_shost =
+ ti_sci_cmd_get_resource_range_from_shost;
+
+ iops->set_irq = ti_sci_cmd_set_irq;
+ iops->set_event_map = ti_sci_cmd_set_event_map;
+ iops->free_irq = ti_sci_cmd_free_irq;
+ iops->free_event_map = ti_sci_cmd_free_event_map;
+
+ rops->config = ti_sci_cmd_ring_config;
+ rops->get_config = ti_sci_cmd_ring_get_config;
+
+ psilops->pair = ti_sci_cmd_rm_psil_pair;
+ psilops->unpair = ti_sci_cmd_rm_psil_unpair;
+
+ udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
+ udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
+ udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
+
+ pops->request = ti_sci_cmd_proc_request;
+ pops->release = ti_sci_cmd_proc_release;
+ pops->handover = ti_sci_cmd_proc_handover;
+ pops->set_config = ti_sci_cmd_proc_set_config;
+ pops->set_control = ti_sci_cmd_proc_set_control;
+ pops->get_status = ti_sci_cmd_proc_get_status;
+}
+
+/**
+ * ti_sci_get_handle() - Get the TI SCI handle for a device
+ * @dev: Pointer to device for which we want SCI handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
+ * Return: pointer to handle if successful, else:
+ * -EPROBE_DEFER if the instance is not ready
+ * -ENODEV if the required node handler is missing
+ * -EINVAL if invalid conditions are encountered.
+ */
+const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
+{
+ struct device_node *ti_sci_np;
+ struct list_head *p;
+ struct ti_sci_handle *handle = NULL;
+ struct ti_sci_info *info;
+
+ if (!dev) {
+ pr_err("I need a device pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+ ti_sci_np = of_get_parent(dev->of_node);
+ if (!ti_sci_np) {
+ dev_err(dev, "No OF information\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mutex_lock(&ti_sci_list_mutex);
+ list_for_each(p, &ti_sci_list) {
+ info = list_entry(p, struct ti_sci_info, node);
+ if (ti_sci_np == info->dev->of_node) {
+ handle = &info->handle;
+ info->users++;
+ break;
+ }
+ }
+ mutex_unlock(&ti_sci_list_mutex);
+ of_node_put(ti_sci_np);
+
+ if (!handle)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_handle);
+
+/**
+ * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
+ * @handle: Handle acquired by ti_sci_get_handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
+ *
+ * Return: 0 is successfully released
+ * if an error pointer was passed, it returns the error value back,
+ * if null was passed, it returns -EINVAL;
+ */
+int ti_sci_put_handle(const struct ti_sci_handle *handle)
+{
+ struct ti_sci_info *info;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ mutex_lock(&ti_sci_list_mutex);
+ if (!WARN_ON(!info->users))
+ info->users--;
+ mutex_unlock(&ti_sci_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ti_sci_put_handle);
+
+static void devm_ti_sci_release(struct device *dev, void *res)
+{
+ const struct ti_sci_handle **ptr = res;
+ const struct ti_sci_handle *handle = *ptr;
+ int ret;
+
+ ret = ti_sci_put_handle(handle);
+ if (ret)
+ dev_err(dev, "failed to put handle %d\n", ret);
+}
+
+/**
+ * devm_ti_sci_get_handle() - Managed get handle
+ * @dev: device for which we want SCI handle for.
+ *
+ * NOTE: This releases the handle once the device resources are
+ * no longer needed. MUST NOT BE released with ti_sci_put_handle.
+ * The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
+{
+ const struct ti_sci_handle **ptr;
+ const struct ti_sci_handle *handle;
+
+ ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+ handle = ti_sci_get_handle(dev);
+
+ if (!IS_ERR(handle)) {
+ *ptr = handle;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
+
+/**
+ * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
+ * @np: device node
+ * @property: property name containing phandle on TISCI node
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
+ * Return: pointer to handle if successful, else:
+ * -EPROBE_DEFER if the instance is not ready
+ * -ENODEV if the required node handler is missing
+ * -EINVAL if invalid conditions are encountered.
+ */
+const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
+ const char *property)
+{
+ struct ti_sci_handle *handle = NULL;
+ struct device_node *ti_sci_np;
+ struct ti_sci_info *info;
+ struct list_head *p;
+
+ if (!np) {
+ pr_err("I need a device pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ti_sci_np = of_parse_phandle(np, property, 0);
+ if (!ti_sci_np)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&ti_sci_list_mutex);
+ list_for_each(p, &ti_sci_list) {
+ info = list_entry(p, struct ti_sci_info, node);
+ if (ti_sci_np == info->dev->of_node) {
+ handle = &info->handle;
+ info->users++;
+ break;
+ }
+ }
+ mutex_unlock(&ti_sci_list_mutex);
+ of_node_put(ti_sci_np);
+
+ if (!handle)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
+
+/**
+ * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
+ * @dev: Device pointer requesting TISCI handle
+ * @property: property name containing phandle on TISCI node
+ *
+ * NOTE: This releases the handle once the device resources are
+ * no longer needed. MUST NOT BE released with ti_sci_put_handle.
+ * The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
+ const char *property)
+{
+ const struct ti_sci_handle *handle;
+ const struct ti_sci_handle **ptr;
+
+ ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+ handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
+
+ if (!IS_ERR(handle)) {
+ *ptr = handle;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
+
+/**
+ * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
+ * @res: Pointer to the TISCI resource
+ *
+ * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
+ */
+u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
+{
+ unsigned long flags;
+ u16 set, free_bit;
+
+ raw_spin_lock_irqsave(&res->lock, flags);
+ for (set = 0; set < res->sets; set++) {
+ free_bit = find_first_zero_bit(res->desc[set].res_map,
+ res->desc[set].num);
+ if (free_bit != res->desc[set].num) {
+ set_bit(free_bit, res->desc[set].res_map);
+ raw_spin_unlock_irqrestore(&res->lock, flags);
+ return res->desc[set].start + free_bit;
+ }
+ }
+ raw_spin_unlock_irqrestore(&res->lock, flags);
+
+ return TI_SCI_RESOURCE_NULL;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
+
+/**
+ * ti_sci_release_resource() - Release a resource from TISCI resource.
+ * @res: Pointer to the TISCI resource
+ * @id: Resource id to be released.
+ */
+void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
+{
+ unsigned long flags;
+ u16 set;
+
+ raw_spin_lock_irqsave(&res->lock, flags);
+ for (set = 0; set < res->sets; set++) {
+ if (res->desc[set].start <= id &&
+ (res->desc[set].num + res->desc[set].start) > id)
+ clear_bit(id - res->desc[set].start,
+ res->desc[set].res_map);
+ }
+ raw_spin_unlock_irqrestore(&res->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ti_sci_release_resource);
+
+/**
+ * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
+ * @res: Pointer to the TISCI resource
+ *
+ * Return: Total number of available resources.
+ */
+u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
+{
+ u32 set, count = 0;
+
+ for (set = 0; set < res->sets; set++)
+ count += res->desc[set].num;
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
+
+/**
+ * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device
+ * @handle: TISCI handle
+ * @dev: Device pointer to which the resource is assigned
+ * @dev_id: TISCI device id to which the resource is assigned
+ * @sub_types: Array of sub_types assigned corresponding to device
+ * @sets: Number of sub_types
+ *
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
+ * error pointer.
+ */
+static struct ti_sci_resource *
+devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
+ struct device *dev, u32 dev_id, u32 *sub_types,
+ u32 sets)
+{
+ struct ti_sci_resource *res;
+ bool valid_set = false;
+ int i, ret;
+
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return ERR_PTR(-ENOMEM);
+
+ res->sets = sets;
+ res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
+ GFP_KERNEL);
+ if (!res->desc)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < res->sets; i++) {
+ ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
+ sub_types[i],
+ &res->desc[i].start,
+ &res->desc[i].num);
+ if (ret) {
+ dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
+ dev_id, sub_types[i]);
+ res->desc[i].start = 0;
+ res->desc[i].num = 0;
+ continue;
+ }
+
+ dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
+ dev_id, sub_types[i], res->desc[i].start,
+ res->desc[i].num);
+
+ valid_set = true;
+ res->desc[i].res_map =
+ devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
+ sizeof(*res->desc[i].res_map), GFP_KERNEL);
+ if (!res->desc[i].res_map)
+ return ERR_PTR(-ENOMEM);
+ }
+ raw_spin_lock_init(&res->lock);
+
+ if (valid_set)
+ return res;
+
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
+ * @handle: TISCI handle
+ * @dev: Device pointer to which the resource is assigned
+ * @dev_id: TISCI device id to which the resource is assigned
+ * @of_prop: property name by which the resource are represented
+ *
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
+ * error pointer.
+ */
+struct ti_sci_resource *
+devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
+ struct device *dev, u32 dev_id, char *of_prop)
+{
+ struct ti_sci_resource *res;
+ u32 *sub_types;
+ int sets;
+
+ sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
+ sizeof(u32));
+ if (sets < 0) {
+ dev_err(dev, "%s resource type ids not available\n", of_prop);
+ return ERR_PTR(sets);
+ }
+
+ sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
+ if (!sub_types)
+ return ERR_PTR(-ENOMEM);
+
+ of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
+ res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
+ sets);
+
+ kfree(sub_types);
+ return res;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
+
+/**
+ * devm_ti_sci_get_resource() - Get a resource range assigned to the device
+ * @handle: TISCI handle
+ * @dev: Device pointer to which the resource is assigned
+ * @dev_id: TISCI device id to which the resource is assigned
+ * @suub_type: TISCI resource subytpe representing the resource.
+ *
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
+ * error pointer.
+ */
+struct ti_sci_resource *
+devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
+ u32 dev_id, u32 sub_type)
+{
+ return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
+
+static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
+ void *cmd)
+{
+ struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
+ const struct ti_sci_handle *handle = &info->handle;
+
+ ti_sci_cmd_core_reboot(handle);
+
+ /* call fail OR pass, we should not be here in the first place */
+ return NOTIFY_BAD;
+}
+
+/* Description for K2G */
+static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
+ .default_host_id = 2,
+ /* Conservative duration */
+ .max_rx_timeout_ms = 1000,
+ /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
+ .max_msgs = 20,
+ .max_msg_size = 64,
+};
+
+/* Description for AM654 */
+static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
+ .default_host_id = 12,
+ /* Conservative duration */
+ .max_rx_timeout_ms = 10000,
+ /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
+ .max_msgs = 20,
+ .max_msg_size = 60,
+};
+
+static const struct of_device_id ti_sci_of_match[] = {
+ {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
+ {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_of_match);
+
+static int ti_sci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ const struct ti_sci_desc *desc;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info = NULL;
+ struct ti_sci_xfers_info *minfo;
+ struct mbox_client *cl;
+ int ret = -EINVAL;
+ int i;
+ int reboot = 0;
+ u32 h_id;
+
+ of_id = of_match_device(ti_sci_of_match, dev);
+ if (!of_id) {
+ dev_err(dev, "OF data missing\n");
+ return -EINVAL;
+ }
+ desc = of_id->data;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ info->desc = desc;
+ ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
+ /* if the property is not present in DT, use a default from desc */
+ if (ret < 0) {
+ info->host_id = info->desc->default_host_id;
+ } else {
+ if (!h_id) {
+ dev_warn(dev, "Host ID 0 is reserved for firmware\n");
+ info->host_id = info->desc->default_host_id;
+ } else {
+ info->host_id = h_id;
+ }
+ }
+
+ reboot = of_property_read_bool(dev->of_node,
+ "ti,system-reboot-controller");
+ INIT_LIST_HEAD(&info->node);
+ minfo = &info->minfo;
+
+ /*
+ * Pre-allocate messages
+ * NEVER allocate more than what we can indicate in hdr.seq
+ * if we have data description bug, force a fix..
+ */
+ if (WARN_ON(desc->max_msgs >=
+ 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
+ return -EINVAL;
+
+ minfo->xfer_block = devm_kcalloc(dev,
+ desc->max_msgs,
+ sizeof(*minfo->xfer_block),
+ GFP_KERNEL);
+ if (!minfo->xfer_block)
+ return -ENOMEM;
+
+ minfo->xfer_alloc_table = devm_kcalloc(dev,
+ BITS_TO_LONGS(desc->max_msgs),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!minfo->xfer_alloc_table)
+ return -ENOMEM;
+ bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
+
+ /* Pre-initialize the buffer pointer to pre-allocated buffers */
+ for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
+ xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
+ GFP_KERNEL);
+ if (!xfer->xfer_buf)
+ return -ENOMEM;
+
+ xfer->tx_message.buf = xfer->xfer_buf;
+ init_completion(&xfer->done);
+ }
+
+ ret = ti_sci_debugfs_create(pdev, info);
+ if (ret)
+ dev_warn(dev, "Failed to create debug file\n");
+
+ platform_set_drvdata(pdev, info);
+
+ cl = &info->cl;
+ cl->dev = dev;
+ cl->tx_block = false;
+ cl->rx_callback = ti_sci_rx_callback;
+ cl->knows_txdone = true;
+
+ spin_lock_init(&minfo->xfer_lock);
+ sema_init(&minfo->sem_xfer_count, desc->max_msgs);
+
+ info->chan_rx = mbox_request_channel_byname(cl, "rx");
+ if (IS_ERR(info->chan_rx)) {
+ ret = PTR_ERR(info->chan_rx);
+ goto out;
+ }
+
+ info->chan_tx = mbox_request_channel_byname(cl, "tx");
+ if (IS_ERR(info->chan_tx)) {
+ ret = PTR_ERR(info->chan_tx);
+ goto out;
+ }
+ ret = ti_sci_cmd_get_revision(info);
+ if (ret) {
+ dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
+ goto out;
+ }
+
+ ti_sci_setup_ops(info);
+
+ if (reboot) {
+ info->nb.notifier_call = tisci_reboot_handler;
+ info->nb.priority = 128;
+
+ ret = register_restart_handler(&info->nb);
+ if (ret) {
+ dev_err(dev, "reboot registration fail(%d)\n", ret);
+ return ret;
+ }
+ }
+
+ dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
+ info->handle.version.abi_major, info->handle.version.abi_minor,
+ info->handle.version.firmware_revision,
+ info->handle.version.firmware_description);
+
+ mutex_lock(&ti_sci_list_mutex);
+ list_add_tail(&info->node, &ti_sci_list);
+ mutex_unlock(&ti_sci_list_mutex);
+
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
+out:
+ if (!IS_ERR(info->chan_tx))
+ mbox_free_channel(info->chan_tx);
+ if (!IS_ERR(info->chan_rx))
+ mbox_free_channel(info->chan_rx);
+ debugfs_remove(info->d);
+ return ret;
+}
+
+static struct platform_driver ti_sci_driver = {
+ .probe = ti_sci_probe,
+ .driver = {
+ .name = "ti-sci",
+ .of_match_table = of_match_ptr(ti_sci_of_match),
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(ti_sci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
+MODULE_AUTHOR("Nishanth Menon");
+MODULE_ALIAS("platform:ti-sci");
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
new file mode 100644
index 000000000..57cd04062
--- /dev/null
+++ b/drivers/firmware/ti_sci.h
@@ -0,0 +1,1417 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Texas Instruments System Control Interface (TISCI) Protocol
+ *
+ * Communication protocol with TI SCI hardware
+ * The system works in a message response protocol
+ * See: http://processors.wiki.ti.com/index.php/TISCI for details
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#ifndef __TI_SCI_H
+#define __TI_SCI_H
+
+/* Generic Messages */
+#define TI_SCI_MSG_ENABLE_WDT 0x0000
+#define TI_SCI_MSG_WAKE_RESET 0x0001
+#define TI_SCI_MSG_VERSION 0x0002
+#define TI_SCI_MSG_WAKE_REASON 0x0003
+#define TI_SCI_MSG_GOODBYE 0x0004
+#define TI_SCI_MSG_SYS_RESET 0x0005
+
+/* Device requests */
+#define TI_SCI_MSG_SET_DEVICE_STATE 0x0200
+#define TI_SCI_MSG_GET_DEVICE_STATE 0x0201
+#define TI_SCI_MSG_SET_DEVICE_RESETS 0x0202
+
+/* Clock requests */
+#define TI_SCI_MSG_SET_CLOCK_STATE 0x0100
+#define TI_SCI_MSG_GET_CLOCK_STATE 0x0101
+#define TI_SCI_MSG_SET_CLOCK_PARENT 0x0102
+#define TI_SCI_MSG_GET_CLOCK_PARENT 0x0103
+#define TI_SCI_MSG_GET_NUM_CLOCK_PARENTS 0x0104
+#define TI_SCI_MSG_SET_CLOCK_FREQ 0x010c
+#define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d
+#define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e
+
+/* Resource Management Requests */
+#define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500
+
+/* IRQ requests */
+#define TI_SCI_MSG_SET_IRQ 0x1000
+#define TI_SCI_MSG_FREE_IRQ 0x1001
+
+/* NAVSS resource management */
+/* Ringacc requests */
+#define TI_SCI_MSG_RM_RING_ALLOCATE 0x1100
+#define TI_SCI_MSG_RM_RING_FREE 0x1101
+#define TI_SCI_MSG_RM_RING_RECONFIG 0x1102
+#define TI_SCI_MSG_RM_RING_RESET 0x1103
+#define TI_SCI_MSG_RM_RING_CFG 0x1110
+#define TI_SCI_MSG_RM_RING_GET_CFG 0x1111
+
+/* PSI-L requests */
+#define TI_SCI_MSG_RM_PSIL_PAIR 0x1280
+#define TI_SCI_MSG_RM_PSIL_UNPAIR 0x1281
+
+#define TI_SCI_MSG_RM_UDMAP_TX_ALLOC 0x1200
+#define TI_SCI_MSG_RM_UDMAP_TX_FREE 0x1201
+#define TI_SCI_MSG_RM_UDMAP_RX_ALLOC 0x1210
+#define TI_SCI_MSG_RM_UDMAP_RX_FREE 0x1211
+#define TI_SCI_MSG_RM_UDMAP_FLOW_CFG 0x1220
+#define TI_SCI_MSG_RM_UDMAP_OPT_FLOW_CFG 0x1221
+
+#define TISCI_MSG_RM_UDMAP_TX_CH_CFG 0x1205
+#define TISCI_MSG_RM_UDMAP_TX_CH_GET_CFG 0x1206
+#define TISCI_MSG_RM_UDMAP_RX_CH_CFG 0x1215
+#define TISCI_MSG_RM_UDMAP_RX_CH_GET_CFG 0x1216
+#define TISCI_MSG_RM_UDMAP_FLOW_CFG 0x1230
+#define TISCI_MSG_RM_UDMAP_FLOW_SIZE_THRESH_CFG 0x1231
+#define TISCI_MSG_RM_UDMAP_FLOW_GET_CFG 0x1232
+#define TISCI_MSG_RM_UDMAP_FLOW_SIZE_THRESH_GET_CFG 0x1233
+
+/* Processor Control requests */
+#define TI_SCI_MSG_PROC_REQUEST 0xc000
+#define TI_SCI_MSG_PROC_RELEASE 0xc001
+#define TI_SCI_MSG_PROC_HANDOVER 0xc005
+#define TI_SCI_MSG_SET_CONFIG 0xc100
+#define TI_SCI_MSG_SET_CTRL 0xc101
+#define TI_SCI_MSG_GET_STATUS 0xc400
+
+/**
+ * struct ti_sci_msg_hdr - Generic Message Header for All messages and responses
+ * @type: Type of messages: One of TI_SCI_MSG* values
+ * @host: Host of the message
+ * @seq: Message identifier indicating a transfer sequence
+ * @flags: Flag for the message
+ */
+struct ti_sci_msg_hdr {
+ u16 type;
+ u8 host;
+ u8 seq;
+#define TI_SCI_MSG_FLAG(val) (1 << (val))
+#define TI_SCI_FLAG_REQ_GENERIC_NORESPONSE 0x0
+#define TI_SCI_FLAG_REQ_ACK_ON_RECEIVED TI_SCI_MSG_FLAG(0)
+#define TI_SCI_FLAG_REQ_ACK_ON_PROCESSED TI_SCI_MSG_FLAG(1)
+#define TI_SCI_FLAG_RESP_GENERIC_NACK 0x0
+#define TI_SCI_FLAG_RESP_GENERIC_ACK TI_SCI_MSG_FLAG(1)
+ /* Additional Flags */
+ u32 flags;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_version - Response for a message
+ * @hdr: Generic header
+ * @firmware_description: String describing the firmware
+ * @firmware_revision: Firmware revision
+ * @abi_major: Major version of the ABI that firmware supports
+ * @abi_minor: Minor version of the ABI that firmware supports
+ *
+ * In general, ABI version changes follow the rule that minor version increments
+ * are backward compatible. Major revision changes in ABI may not be
+ * backward compatible.
+ *
+ * Response to a generic message with message type TI_SCI_MSG_VERSION
+ */
+struct ti_sci_msg_resp_version {
+ struct ti_sci_msg_hdr hdr;
+ char firmware_description[32];
+ u16 firmware_revision;
+ u8 abi_major;
+ u8 abi_minor;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_reboot - Reboot the SoC
+ * @hdr: Generic Header
+ *
+ * Request type is TI_SCI_MSG_SYS_RESET, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_reboot {
+ struct ti_sci_msg_hdr hdr;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_device_state - Set the desired state of the device
+ * @hdr: Generic header
+ * @id: Indicates which device to modify
+ * @reserved: Reserved space in message, must be 0 for backward compatibility
+ * @state: The desired state of the device.
+ *
+ * Certain flags can also be set to alter the device state:
+ * + MSG_FLAG_DEVICE_WAKE_ENABLED - Configure the device to be a wake source.
+ * The meaning of this flag will vary slightly from device to device and from
+ * SoC to SoC but it generally allows the device to wake the SoC out of deep
+ * suspend states.
+ * + MSG_FLAG_DEVICE_RESET_ISO - Enable reset isolation for this device.
+ * + MSG_FLAG_DEVICE_EXCLUSIVE - Claim this device exclusively. When passed
+ * with STATE_RETENTION or STATE_ON, it will claim the device exclusively.
+ * If another host already has this device set to STATE_RETENTION or STATE_ON,
+ * the message will fail. Once successful, other hosts attempting to set
+ * STATE_RETENTION or STATE_ON will fail.
+ *
+ * Request type is TI_SCI_MSG_SET_DEVICE_STATE, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_set_device_state {
+ /* Additional hdr->flags options */
+#define MSG_FLAG_DEVICE_WAKE_ENABLED TI_SCI_MSG_FLAG(8)
+#define MSG_FLAG_DEVICE_RESET_ISO TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_DEVICE_EXCLUSIVE TI_SCI_MSG_FLAG(10)
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+ u32 reserved;
+
+#define MSG_DEVICE_SW_STATE_AUTO_OFF 0
+#define MSG_DEVICE_SW_STATE_RETENTION 1
+#define MSG_DEVICE_SW_STATE_ON 2
+ u8 state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_device_state - Request to get device.
+ * @hdr: Generic header
+ * @id: Device Identifier
+ *
+ * Request type is TI_SCI_MSG_GET_DEVICE_STATE, responded device state
+ * information
+ */
+struct ti_sci_msg_req_get_device_state {
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_device_state - Response to get device request.
+ * @hdr: Generic header
+ * @context_loss_count: Indicates how many times the device has lost context. A
+ * driver can use this monotonic counter to determine if the device has
+ * lost context since the last time this message was exchanged.
+ * @resets: Programmed state of the reset lines.
+ * @programmed_state: The state as programmed by set_device.
+ * - Uses the MSG_DEVICE_SW_* macros
+ * @current_state: The actual state of the hardware.
+ *
+ * Response to request TI_SCI_MSG_GET_DEVICE_STATE.
+ */
+struct ti_sci_msg_resp_get_device_state {
+ struct ti_sci_msg_hdr hdr;
+ u32 context_loss_count;
+ u32 resets;
+ u8 programmed_state;
+#define MSG_DEVICE_HW_STATE_OFF 0
+#define MSG_DEVICE_HW_STATE_ON 1
+#define MSG_DEVICE_HW_STATE_TRANS 2
+ u8 current_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_device_resets - Set the desired resets
+ * configuration of the device
+ * @hdr: Generic header
+ * @id: Indicates which device to modify
+ * @resets: A bit field of resets for the device. The meaning, behavior,
+ * and usage of the reset flags are device specific. 0 for a bit
+ * indicates releasing the reset represented by that bit while 1
+ * indicates keeping it held.
+ *
+ * Request type is TI_SCI_MSG_SET_DEVICE_RESETS, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_set_device_resets {
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+ u32 resets;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_state - Request to setup a Clock state
+ * @hdr: Generic Header, Certain flags can be set specific to the clocks:
+ * MSG_FLAG_CLOCK_ALLOW_SSC: Allow this clock to be modified
+ * via spread spectrum clocking.
+ * MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE: Allow this clock's
+ * frequency to be changed while it is running so long as it
+ * is within the min/max limits.
+ * MSG_FLAG_CLOCK_INPUT_TERM: Enable input termination, this
+ * is only applicable to clock inputs on the SoC pseudo-device.
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify. Set to 255 if clock ID is
+ * greater than or equal to 255.
+ * @request_state: Request the state for the clock to be set to.
+ * MSG_CLOCK_SW_STATE_UNREQ: The IP does not require this clock,
+ * it can be disabled, regardless of the state of the device
+ * MSG_CLOCK_SW_STATE_AUTO: Allow the System Controller to
+ * automatically manage the state of this clock. If the device
+ * is enabled, then the clock is enabled. If the device is set
+ * to off or retention, then the clock is internally set as not
+ * being required by the device.(default)
+ * MSG_CLOCK_SW_STATE_REQ: Configure the clock to be enabled,
+ * regardless of the state of the device.
+ * @clk_id_32: Clock identifier for the device for this request.
+ * Only to be used if the clock ID is greater than or equal to
+ * 255.
+ *
+ * Normally, all required clocks are managed by TISCI entity, this is used
+ * only for specific control *IF* required. Auto managed state is
+ * MSG_CLOCK_SW_STATE_AUTO, in other states, TISCI entity assume remote
+ * will explicitly control.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_STATE, response is a generic
+ * ACK or NACK message.
+ */
+struct ti_sci_msg_req_set_clock_state {
+ /* Additional hdr->flags options */
+#define MSG_FLAG_CLOCK_ALLOW_SSC TI_SCI_MSG_FLAG(8)
+#define MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_CLOCK_INPUT_TERM TI_SCI_MSG_FLAG(10)
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+#define MSG_CLOCK_SW_STATE_UNREQ 0
+#define MSG_CLOCK_SW_STATE_AUTO 1
+#define MSG_CLOCK_SW_STATE_REQ 2
+ u8 request_state;
+ u32 clk_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_state - Request for clock state
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to get state of. Set to 255 if the clock
+ * ID is greater than or equal to 255.
+ * @clk_id_32: Clock identifier for the device for the request.
+ * Only to be used if the clock ID is greater than or equal to
+ * 255.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_STATE, response is state
+ * of the clock
+ */
+struct ti_sci_msg_req_get_clock_state {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+ u32 clk_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_state - Response to get clock state
+ * @hdr: Generic Header
+ * @programmed_state: Any programmed state of the clock. This is one of
+ * MSG_CLOCK_SW_STATE* values.
+ * @current_state: Current state of the clock. This is one of:
+ * MSG_CLOCK_HW_STATE_NOT_READY: Clock is not ready
+ * MSG_CLOCK_HW_STATE_READY: Clock is ready
+ *
+ * Response to TI_SCI_MSG_GET_CLOCK_STATE.
+ */
+struct ti_sci_msg_resp_get_clock_state {
+ struct ti_sci_msg_hdr hdr;
+ u8 programmed_state;
+#define MSG_CLOCK_HW_STATE_NOT_READY 0
+#define MSG_CLOCK_HW_STATE_READY 1
+ u8 current_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_parent - Set the clock parent
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify. Set to 255 if clock ID is
+ * greater than or equal to 255.
+ * @parent_id: The new clock parent is selectable by an index via this
+ * parameter. Set to 255 if clock ID is greater than or
+ * equal to 255.
+ * @clk_id_32: Clock identifier if @clk_id field is 255.
+ * @parent_id_32: Parent identifier if @parent_id is 255.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_PARENT, response is generic
+ * ACK / NACK message.
+ */
+struct ti_sci_msg_req_set_clock_parent {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+ u8 parent_id;
+ u32 clk_id_32;
+ u32 parent_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_parent - Get the clock parent
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to get the parent for. If this field
+ * contains 255, the actual clock identifier is stored in
+ * @clk_id_32.
+ * @clk_id_32: Clock identifier if the @clk_id field contains 255.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_PARENT, response is parent information
+ */
+struct ti_sci_msg_req_get_clock_parent {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+ u32 clk_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_parent - Response with clock parent
+ * @hdr: Generic Header
+ * @parent_id: The current clock parent. If set to 255, the current parent
+ * ID can be found from the @parent_id_32 field.
+ * @parent_id_32: Current clock parent if @parent_id field is set to
+ * 255.
+ *
+ * Response to TI_SCI_MSG_GET_CLOCK_PARENT.
+ */
+struct ti_sci_msg_resp_get_clock_parent {
+ struct ti_sci_msg_hdr hdr;
+ u8 parent_id;
+ u32 parent_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_num_parents - Request to get clock parents
+ * @hdr: Generic header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request. Set to
+ * 255 if clock ID is greater than or equal to 255.
+ * @clk_id_32: Clock identifier if the @clk_id field contains 255.
+ *
+ * This request provides information about how many clock parent options
+ * are available for a given clock to a device. This is typically used
+ * for input clocks.
+ *
+ * Request type is TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, response is appropriate
+ * message, or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_get_clock_num_parents {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+ u32 clk_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_num_parents - Response for get clk parents
+ * @hdr: Generic header
+ * @num_parents: Number of clock parents. If set to 255, the actual
+ * number of parents is stored into @num_parents_32
+ * field instead.
+ * @num_parents_32: Number of clock parents if @num_parents field is
+ * set to 255.
+ *
+ * Response to TI_SCI_MSG_GET_NUM_CLOCK_PARENTS
+ */
+struct ti_sci_msg_resp_get_clock_num_parents {
+ struct ti_sci_msg_hdr hdr;
+ u8 num_parents;
+ u32 num_parents_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_query_clock_freq - Request to query a frequency
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq_hz: The target clock frequency. A frequency will be found
+ * as close to this target frequency as possible.
+ * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @clk_id: Clock identifier for the device for this request. Set to
+ * 255 if clock identifier is greater than or equal to 255.
+ * @clk_id_32: Clock identifier if @clk_id is set to 255.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In case of specific requests, TISCI evaluates capability to achieve
+ * requested frequency within provided range and responds with
+ * result message.
+ *
+ * Request type is TI_SCI_MSG_QUERY_CLOCK_FREQ, response is appropriate message,
+ * or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_query_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u64 min_freq_hz;
+ u64 target_freq_hz;
+ u64 max_freq_hz;
+ u8 clk_id;
+ u32 clk_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_query_clock_freq - Response to a clock frequency query
+ * @hdr: Generic Header
+ * @freq_hz: Frequency that is the best match in Hz.
+ *
+ * Response to request type TI_SCI_MSG_QUERY_CLOCK_FREQ. NOTE: if the request
+ * cannot be satisfied, the message will be of type NACK.
+ */
+struct ti_sci_msg_resp_query_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u64 freq_hz;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_freq - Request to setup a clock frequency
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @target_freq_hz: The target clock frequency. The clock will be programmed
+ * at a rate as close to this target frequency as possible.
+ * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * @clk_id: Clock identifier for the device for this request. Set to
+ * 255 if clock ID is greater than or equal to 255.
+ * @clk_id_32: Clock identifier if @clk_id field is set to 255.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In case of specific requests, TISCI evaluates capability to achieve
+ * requested range and responds with success/failure message.
+ *
+ * This sets the desired frequency for a clock within an allowable
+ * range. This message will fail on an enabled clock unless
+ * MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE is set for the clock. Additionally,
+ * if other clocks have their frequency modified due to this message,
+ * they also must have the MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE or be disabled.
+ *
+ * Calling set frequency on a clock input to the SoC pseudo-device will
+ * inform the PMMC of that clock's frequency. Setting a frequency of
+ * zero will indicate the clock is disabled.
+ *
+ * Calling set frequency on clock outputs from the SoC pseudo-device will
+ * function similarly to setting the clock frequency on a device.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_FREQ, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_set_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u64 min_freq_hz;
+ u64 target_freq_hz;
+ u64 max_freq_hz;
+ u8 clk_id;
+ u32 clk_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_freq - Request to get the clock frequency
+ * @hdr: Generic Header
+ * @dev_id: Device identifier this request is for
+ * @clk_id: Clock identifier for the device for this request. Set to
+ * 255 if clock ID is greater than or equal to 255.
+ * @clk_id_32: Clock identifier if @clk_id field is set to 255.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In some cases, clock frequencies are configured by host.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_FREQ, responded with clock frequency
+ * that the clock is currently at.
+ */
+struct ti_sci_msg_req_get_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u32 dev_id;
+ u8 clk_id;
+ u32 clk_id_32;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_freq - Response of clock frequency request
+ * @hdr: Generic Header
+ * @freq_hz: Frequency that the clock is currently on, in Hz.
+ *
+ * Response to request type TI_SCI_MSG_GET_CLOCK_FREQ.
+ */
+struct ti_sci_msg_resp_get_clock_freq {
+ struct ti_sci_msg_hdr hdr;
+ u64 freq_hz;
+} __packed;
+
+#define TI_SCI_IRQ_SECONDARY_HOST_INVALID 0xff
+
+/**
+ * struct ti_sci_msg_req_get_resource_range - Request to get a host's assigned
+ * range of resources.
+ * @hdr: Generic Header
+ * @type: Unique resource assignment type
+ * @subtype: Resource assignment subtype within the resource type.
+ * @secondary_host: Host processing entity to which the resources are
+ * allocated. This is required only when the destination
+ * host id id different from ti sci interface host id,
+ * else TI_SCI_IRQ_SECONDARY_HOST_INVALID can be passed.
+ *
+ * Request type is TI_SCI_MSG_GET_RESOURCE_RANGE. Responded with requested
+ * resource range which is of type TI_SCI_MSG_GET_RESOURCE_RANGE.
+ */
+struct ti_sci_msg_req_get_resource_range {
+ struct ti_sci_msg_hdr hdr;
+#define MSG_RM_RESOURCE_TYPE_MASK GENMASK(9, 0)
+#define MSG_RM_RESOURCE_SUBTYPE_MASK GENMASK(5, 0)
+ u16 type;
+ u8 subtype;
+ u8 secondary_host;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_resource_range - Response to resource get range.
+ * @hdr: Generic Header
+ * @range_start: Start index of the resource range.
+ * @range_num: Number of resources in the range.
+ *
+ * Response to request TI_SCI_MSG_GET_RESOURCE_RANGE.
+ */
+struct ti_sci_msg_resp_get_resource_range {
+ struct ti_sci_msg_hdr hdr;
+ u16 range_start;
+ u16 range_num;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_manage_irq - Request to configure/release the route
+ * between the dev and the host.
+ * @hdr: Generic Header
+ * @valid_params: Bit fields defining the validity of interrupt source
+ * parameters. If a bit is not set, then corresponding
+ * field is not valid and will not be used for route set.
+ * Bit field definitions:
+ * 0 - Valid bit for @dst_id
+ * 1 - Valid bit for @dst_host_irq
+ * 2 - Valid bit for @ia_id
+ * 3 - Valid bit for @vint
+ * 4 - Valid bit for @global_event
+ * 5 - Valid bit for @vint_status_bit_index
+ * 31 - Valid bit for @secondary_host
+ * @src_id: IRQ source peripheral ID.
+ * @src_index: IRQ source index within the peripheral
+ * @dst_id: IRQ Destination ID. Based on the architecture it can be
+ * IRQ controller or host processor ID.
+ * @dst_host_irq: IRQ number of the destination host IRQ controller
+ * @ia_id: Device ID of the interrupt aggregator in which the
+ * vint resides.
+ * @vint: Virtual interrupt number if the interrupt route
+ * is through an interrupt aggregator.
+ * @global_event: Global event that is to be mapped to interrupt
+ * aggregator virtual interrupt status bit.
+ * @vint_status_bit: Virtual interrupt status bit if the interrupt route
+ * utilizes an interrupt aggregator status bit.
+ * @secondary_host: Host ID of the IRQ destination computing entity. This is
+ * required only when destination host id is different
+ * from ti sci interface host id.
+ *
+ * Request type is TI_SCI_MSG_SET/RELEASE_IRQ.
+ * Response is generic ACK / NACK message.
+ */
+struct ti_sci_msg_req_manage_irq {
+ struct ti_sci_msg_hdr hdr;
+#define MSG_FLAG_DST_ID_VALID TI_SCI_MSG_FLAG(0)
+#define MSG_FLAG_DST_HOST_IRQ_VALID TI_SCI_MSG_FLAG(1)
+#define MSG_FLAG_IA_ID_VALID TI_SCI_MSG_FLAG(2)
+#define MSG_FLAG_VINT_VALID TI_SCI_MSG_FLAG(3)
+#define MSG_FLAG_GLB_EVNT_VALID TI_SCI_MSG_FLAG(4)
+#define MSG_FLAG_VINT_STS_BIT_VALID TI_SCI_MSG_FLAG(5)
+#define MSG_FLAG_SHOST_VALID TI_SCI_MSG_FLAG(31)
+ u32 valid_params;
+ u16 src_id;
+ u16 src_index;
+ u16 dst_id;
+ u16 dst_host_irq;
+ u16 ia_id;
+ u16 vint;
+ u16 global_event;
+ u8 vint_status_bit;
+ u8 secondary_host;
+} __packed;
+
+/**
+ * struct ti_sci_msg_rm_ring_cfg_req - Configure a Navigator Subsystem ring
+ *
+ * Configures the non-real-time registers of a Navigator Subsystem ring.
+ * @hdr: Generic Header
+ * @valid_params: Bitfield defining validity of ring configuration parameters.
+ * The ring configuration fields are not valid, and will not be used for
+ * ring configuration, if their corresponding valid bit is zero.
+ * Valid bit usage:
+ * 0 - Valid bit for @tisci_msg_rm_ring_cfg_req addr_lo
+ * 1 - Valid bit for @tisci_msg_rm_ring_cfg_req addr_hi
+ * 2 - Valid bit for @tisci_msg_rm_ring_cfg_req count
+ * 3 - Valid bit for @tisci_msg_rm_ring_cfg_req mode
+ * 4 - Valid bit for @tisci_msg_rm_ring_cfg_req size
+ * 5 - Valid bit for @tisci_msg_rm_ring_cfg_req order_id
+ * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
+ * @index: ring index to be configured.
+ * @addr_lo: 32 LSBs of ring base address to be programmed into the ring's
+ * RING_BA_LO register
+ * @addr_hi: 16 MSBs of ring base address to be programmed into the ring's
+ * RING_BA_HI register.
+ * @count: Number of ring elements. Must be even if mode is CREDENTIALS or QM
+ * modes.
+ * @mode: Specifies the mode the ring is to be configured.
+ * @size: Specifies encoded ring element size. To calculate the encoded size use
+ * the formula (log2(size_bytes) - 2), where size_bytes cannot be
+ * greater than 256.
+ * @order_id: Specifies the ring's bus order ID.
+ */
+struct ti_sci_msg_rm_ring_cfg_req {
+ struct ti_sci_msg_hdr hdr;
+ u32 valid_params;
+ u16 nav_id;
+ u16 index;
+ u32 addr_lo;
+ u32 addr_hi;
+ u32 count;
+ u8 mode;
+ u8 size;
+ u8 order_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_rm_ring_get_cfg_req - Get RA ring's configuration
+ *
+ * Gets the configuration of the non-real-time register fields of a ring. The
+ * host, or a supervisor of the host, who owns the ring must be the requesting
+ * host. The values of the non-real-time registers are returned in
+ * @ti_sci_msg_rm_ring_get_cfg_resp.
+ *
+ * @hdr: Generic Header
+ * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
+ * @index: ring index.
+ */
+struct ti_sci_msg_rm_ring_get_cfg_req {
+ struct ti_sci_msg_hdr hdr;
+ u16 nav_id;
+ u16 index;
+} __packed;
+
+/**
+ * struct ti_sci_msg_rm_ring_get_cfg_resp - Ring get configuration response
+ *
+ * Response received by host processor after RM has handled
+ * @ti_sci_msg_rm_ring_get_cfg_req. The response contains the ring's
+ * non-real-time register values.
+ *
+ * @hdr: Generic Header
+ * @addr_lo: Ring 32 LSBs of base address
+ * @addr_hi: Ring 16 MSBs of base address.
+ * @count: Ring number of elements.
+ * @mode: Ring mode.
+ * @size: encoded Ring element size
+ * @order_id: ing order ID.
+ */
+struct ti_sci_msg_rm_ring_get_cfg_resp {
+ struct ti_sci_msg_hdr hdr;
+ u32 addr_lo;
+ u32 addr_hi;
+ u32 count;
+ u8 mode;
+ u8 size;
+ u8 order_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_psil_pair - Pairs a PSI-L source thread to a destination
+ * thread
+ * @hdr: Generic Header
+ * @nav_id: SoC Navigator Subsystem device ID whose PSI-L config proxy is
+ * used to pair the source and destination threads.
+ * @src_thread: PSI-L source thread ID within the PSI-L System thread map.
+ *
+ * UDMAP transmit channels mapped to source threads will have their
+ * TCHAN_THRD_ID register programmed with the destination thread if the pairing
+ * is successful.
+
+ * @dst_thread: PSI-L destination thread ID within the PSI-L System thread map.
+ * PSI-L destination threads start at index 0x8000. The request is NACK'd if
+ * the destination thread is not greater than or equal to 0x8000.
+ *
+ * UDMAP receive channels mapped to destination threads will have their
+ * RCHAN_THRD_ID register programmed with the source thread if the pairing
+ * is successful.
+ *
+ * Request type is TI_SCI_MSG_RM_PSIL_PAIR, response is a generic ACK or NACK
+ * message.
+ */
+struct ti_sci_msg_psil_pair {
+ struct ti_sci_msg_hdr hdr;
+ u32 nav_id;
+ u32 src_thread;
+ u32 dst_thread;
+} __packed;
+
+/**
+ * struct ti_sci_msg_psil_unpair - Unpairs a PSI-L source thread from a
+ * destination thread
+ * @hdr: Generic Header
+ * @nav_id: SoC Navigator Subsystem device ID whose PSI-L config proxy is
+ * used to unpair the source and destination threads.
+ * @src_thread: PSI-L source thread ID within the PSI-L System thread map.
+ *
+ * UDMAP transmit channels mapped to source threads will have their
+ * TCHAN_THRD_ID register cleared if the unpairing is successful.
+ *
+ * @dst_thread: PSI-L destination thread ID within the PSI-L System thread map.
+ * PSI-L destination threads start at index 0x8000. The request is NACK'd if
+ * the destination thread is not greater than or equal to 0x8000.
+ *
+ * UDMAP receive channels mapped to destination threads will have their
+ * RCHAN_THRD_ID register cleared if the unpairing is successful.
+ *
+ * Request type is TI_SCI_MSG_RM_PSIL_UNPAIR, response is a generic ACK or NACK
+ * message.
+ */
+struct ti_sci_msg_psil_unpair {
+ struct ti_sci_msg_hdr hdr;
+ u32 nav_id;
+ u32 src_thread;
+ u32 dst_thread;
+} __packed;
+
+/**
+ * struct ti_sci_msg_udmap_rx_flow_cfg - UDMAP receive flow configuration
+ * message
+ * @hdr: Generic Header
+ * @nav_id: SoC Navigator Subsystem device ID from which the receive flow is
+ * allocated
+ * @flow_index: UDMAP receive flow index for non-optional configuration.
+ * @rx_ch_index: Specifies the index of the receive channel using the flow_index
+ * @rx_einfo_present: UDMAP receive flow extended packet info present.
+ * @rx_psinfo_present: UDMAP receive flow PS words present.
+ * @rx_error_handling: UDMAP receive flow error handling configuration. Valid
+ * values are TI_SCI_RM_UDMAP_RX_FLOW_ERR_DROP/RETRY.
+ * @rx_desc_type: UDMAP receive flow descriptor type. It can be one of
+ * TI_SCI_RM_UDMAP_RX_FLOW_DESC_HOST/MONO.
+ * @rx_sop_offset: UDMAP receive flow start of packet offset.
+ * @rx_dest_qnum: UDMAP receive flow destination queue number.
+ * @rx_ps_location: UDMAP receive flow PS words location.
+ * 0 - end of packet descriptor
+ * 1 - Beginning of the data buffer
+ * @rx_src_tag_hi: UDMAP receive flow source tag high byte constant
+ * @rx_src_tag_lo: UDMAP receive flow source tag low byte constant
+ * @rx_dest_tag_hi: UDMAP receive flow destination tag high byte constant
+ * @rx_dest_tag_lo: UDMAP receive flow destination tag low byte constant
+ * @rx_src_tag_hi_sel: UDMAP receive flow source tag high byte selector
+ * @rx_src_tag_lo_sel: UDMAP receive flow source tag low byte selector
+ * @rx_dest_tag_hi_sel: UDMAP receive flow destination tag high byte selector
+ * @rx_dest_tag_lo_sel: UDMAP receive flow destination tag low byte selector
+ * @rx_size_thresh_en: UDMAP receive flow packet size based free buffer queue
+ * enable. If enabled, the ti_sci_rm_udmap_rx_flow_opt_cfg also need to be
+ * configured and sent.
+ * @rx_fdq0_sz0_qnum: UDMAP receive flow free descriptor queue 0.
+ * @rx_fdq1_qnum: UDMAP receive flow free descriptor queue 1.
+ * @rx_fdq2_qnum: UDMAP receive flow free descriptor queue 2.
+ * @rx_fdq3_qnum: UDMAP receive flow free descriptor queue 3.
+ *
+ * For detailed information on the settings, see the UDMAP section of the TRM.
+ */
+struct ti_sci_msg_udmap_rx_flow_cfg {
+ struct ti_sci_msg_hdr hdr;
+ u32 nav_id;
+ u32 flow_index;
+ u32 rx_ch_index;
+ u8 rx_einfo_present;
+ u8 rx_psinfo_present;
+ u8 rx_error_handling;
+ u8 rx_desc_type;
+ u16 rx_sop_offset;
+ u16 rx_dest_qnum;
+ u8 rx_ps_location;
+ u8 rx_src_tag_hi;
+ u8 rx_src_tag_lo;
+ u8 rx_dest_tag_hi;
+ u8 rx_dest_tag_lo;
+ u8 rx_src_tag_hi_sel;
+ u8 rx_src_tag_lo_sel;
+ u8 rx_dest_tag_hi_sel;
+ u8 rx_dest_tag_lo_sel;
+ u8 rx_size_thresh_en;
+ u16 rx_fdq0_sz0_qnum;
+ u16 rx_fdq1_qnum;
+ u16 rx_fdq2_qnum;
+ u16 rx_fdq3_qnum;
+} __packed;
+
+/**
+ * struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg - parameters for UDMAP receive
+ * flow optional configuration
+ * @hdr: Generic Header
+ * @nav_id: SoC Navigator Subsystem device ID from which the receive flow is
+ * allocated
+ * @flow_index: UDMAP receive flow index for optional configuration.
+ * @rx_ch_index: Specifies the index of the receive channel using the flow_index
+ * @rx_size_thresh0: UDMAP receive flow packet size threshold 0.
+ * @rx_size_thresh1: UDMAP receive flow packet size threshold 1.
+ * @rx_size_thresh2: UDMAP receive flow packet size threshold 2.
+ * @rx_fdq0_sz1_qnum: UDMAP receive flow free descriptor queue for size
+ * threshold 1.
+ * @rx_fdq0_sz2_qnum: UDMAP receive flow free descriptor queue for size
+ * threshold 2.
+ * @rx_fdq0_sz3_qnum: UDMAP receive flow free descriptor queue for size
+ * threshold 3.
+ *
+ * For detailed information on the settings, see the UDMAP section of the TRM.
+ */
+struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg {
+ struct ti_sci_msg_hdr hdr;
+ u32 nav_id;
+ u32 flow_index;
+ u32 rx_ch_index;
+ u16 rx_size_thresh0;
+ u16 rx_size_thresh1;
+ u16 rx_size_thresh2;
+ u16 rx_fdq0_sz1_qnum;
+ u16 rx_fdq0_sz2_qnum;
+ u16 rx_fdq0_sz3_qnum;
+} __packed;
+
+/**
+ * Configures a Navigator Subsystem UDMAP transmit channel
+ *
+ * Configures the non-real-time registers of a Navigator Subsystem UDMAP
+ * transmit channel. The channel index must be assigned to the host defined
+ * in the TISCI header via the RM board configuration resource assignment
+ * range list.
+ *
+ * @hdr: Generic Header
+ *
+ * @valid_params: Bitfield defining validity of tx channel configuration
+ * parameters. The tx channel configuration fields are not valid, and will not
+ * be used for ch configuration, if their corresponding valid bit is zero.
+ * Valid bit usage:
+ * 0 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_pause_on_err
+ * 1 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_atype
+ * 2 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_chan_type
+ * 3 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_fetch_size
+ * 4 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::txcq_qnum
+ * 5 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_priority
+ * 6 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_qos
+ * 7 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_orderid
+ * 8 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_sched_priority
+ * 9 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_filt_einfo
+ * 10 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_filt_pswords
+ * 11 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_supr_tdpkt
+ * 12 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_credit_count
+ * 13 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::fdepth
+ * 14 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_burst_size
+ *
+ * @nav_id: SoC device ID of Navigator Subsystem where tx channel is located
+ *
+ * @index: UDMAP transmit channel index.
+ *
+ * @tx_pause_on_err: UDMAP transmit channel pause on error configuration to
+ * be programmed into the tx_pause_on_err field of the channel's TCHAN_TCFG
+ * register.
+ *
+ * @tx_filt_einfo: UDMAP transmit channel extended packet information passing
+ * configuration to be programmed into the tx_filt_einfo field of the
+ * channel's TCHAN_TCFG register.
+ *
+ * @tx_filt_pswords: UDMAP transmit channel protocol specific word passing
+ * configuration to be programmed into the tx_filt_pswords field of the
+ * channel's TCHAN_TCFG register.
+ *
+ * @tx_atype: UDMAP transmit channel non Ring Accelerator access pointer
+ * interpretation configuration to be programmed into the tx_atype field of
+ * the channel's TCHAN_TCFG register.
+ *
+ * @tx_chan_type: UDMAP transmit channel functional channel type and work
+ * passing mechanism configuration to be programmed into the tx_chan_type
+ * field of the channel's TCHAN_TCFG register.
+ *
+ * @tx_supr_tdpkt: UDMAP transmit channel teardown packet generation suppression
+ * configuration to be programmed into the tx_supr_tdpkt field of the channel's
+ * TCHAN_TCFG register.
+ *
+ * @tx_fetch_size: UDMAP transmit channel number of 32-bit descriptor words to
+ * fetch configuration to be programmed into the tx_fetch_size field of the
+ * channel's TCHAN_TCFG register. The user must make sure to set the maximum
+ * word count that can pass through the channel for any allowed descriptor type.
+ *
+ * @tx_credit_count: UDMAP transmit channel transfer request credit count
+ * configuration to be programmed into the count field of the TCHAN_TCREDIT
+ * register. Specifies how many credits for complete TRs are available.
+ *
+ * @txcq_qnum: UDMAP transmit channel completion queue configuration to be
+ * programmed into the txcq_qnum field of the TCHAN_TCQ register. The specified
+ * completion queue must be assigned to the host, or a subordinate of the host,
+ * requesting configuration of the transmit channel.
+ *
+ * @tx_priority: UDMAP transmit channel transmit priority value to be programmed
+ * into the priority field of the channel's TCHAN_TPRI_CTRL register.
+ *
+ * @tx_qos: UDMAP transmit channel transmit qos value to be programmed into the
+ * qos field of the channel's TCHAN_TPRI_CTRL register.
+ *
+ * @tx_orderid: UDMAP transmit channel bus order id value to be programmed into
+ * the orderid field of the channel's TCHAN_TPRI_CTRL register.
+ *
+ * @fdepth: UDMAP transmit channel FIFO depth configuration to be programmed
+ * into the fdepth field of the TCHAN_TFIFO_DEPTH register. Sets the number of
+ * Tx FIFO bytes which are allowed to be stored for the channel. Check the UDMAP
+ * section of the TRM for restrictions regarding this parameter.
+ *
+ * @tx_sched_priority: UDMAP transmit channel tx scheduling priority
+ * configuration to be programmed into the priority field of the channel's
+ * TCHAN_TST_SCHED register.
+ *
+ * @tx_burst_size: UDMAP transmit channel burst size configuration to be
+ * programmed into the tx_burst_size field of the TCHAN_TCFG register.
+ */
+struct ti_sci_msg_rm_udmap_tx_ch_cfg_req {
+ struct ti_sci_msg_hdr hdr;
+ u32 valid_params;
+ u16 nav_id;
+ u16 index;
+ u8 tx_pause_on_err;
+ u8 tx_filt_einfo;
+ u8 tx_filt_pswords;
+ u8 tx_atype;
+ u8 tx_chan_type;
+ u8 tx_supr_tdpkt;
+ u16 tx_fetch_size;
+ u8 tx_credit_count;
+ u16 txcq_qnum;
+ u8 tx_priority;
+ u8 tx_qos;
+ u8 tx_orderid;
+ u16 fdepth;
+ u8 tx_sched_priority;
+ u8 tx_burst_size;
+} __packed;
+
+/**
+ * Configures a Navigator Subsystem UDMAP receive channel
+ *
+ * Configures the non-real-time registers of a Navigator Subsystem UDMAP
+ * receive channel. The channel index must be assigned to the host defined
+ * in the TISCI header via the RM board configuration resource assignment
+ * range list.
+ *
+ * @hdr: Generic Header
+ *
+ * @valid_params: Bitfield defining validity of rx channel configuration
+ * parameters.
+ * The rx channel configuration fields are not valid, and will not be used for
+ * ch configuration, if their corresponding valid bit is zero.
+ * Valid bit usage:
+ * 0 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_pause_on_err
+ * 1 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_atype
+ * 2 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_chan_type
+ * 3 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_fetch_size
+ * 4 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rxcq_qnum
+ * 5 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_priority
+ * 6 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_qos
+ * 7 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_orderid
+ * 8 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_sched_priority
+ * 9 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::flowid_start
+ * 10 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::flowid_cnt
+ * 11 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_ignore_short
+ * 12 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_ignore_long
+ * 14 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_burst_size
+ *
+ * @nav_id: SoC device ID of Navigator Subsystem where rx channel is located
+ *
+ * @index: UDMAP receive channel index.
+ *
+ * @rx_fetch_size: UDMAP receive channel number of 32-bit descriptor words to
+ * fetch configuration to be programmed into the rx_fetch_size field of the
+ * channel's RCHAN_RCFG register.
+ *
+ * @rxcq_qnum: UDMAP receive channel completion queue configuration to be
+ * programmed into the rxcq_qnum field of the RCHAN_RCQ register.
+ * The specified completion queue must be assigned to the host, or a subordinate
+ * of the host, requesting configuration of the receive channel.
+ *
+ * @rx_priority: UDMAP receive channel receive priority value to be programmed
+ * into the priority field of the channel's RCHAN_RPRI_CTRL register.
+ *
+ * @rx_qos: UDMAP receive channel receive qos value to be programmed into the
+ * qos field of the channel's RCHAN_RPRI_CTRL register.
+ *
+ * @rx_orderid: UDMAP receive channel bus order id value to be programmed into
+ * the orderid field of the channel's RCHAN_RPRI_CTRL register.
+ *
+ * @rx_sched_priority: UDMAP receive channel rx scheduling priority
+ * configuration to be programmed into the priority field of the channel's
+ * RCHAN_RST_SCHED register.
+ *
+ * @flowid_start: UDMAP receive channel additional flows starting index
+ * configuration to program into the flow_start field of the RCHAN_RFLOW_RNG
+ * register. Specifies the starting index for flow IDs the receive channel is to
+ * make use of beyond the default flow. flowid_start and @ref flowid_cnt must be
+ * set as valid and configured together. The starting flow ID set by
+ * @ref flowid_cnt must be a flow index within the Navigator Subsystem's subset
+ * of flows beyond the default flows statically mapped to receive channels.
+ * The additional flows must be assigned to the host, or a subordinate of the
+ * host, requesting configuration of the receive channel.
+ *
+ * @flowid_cnt: UDMAP receive channel additional flows count configuration to
+ * program into the flowid_cnt field of the RCHAN_RFLOW_RNG register.
+ * This field specifies how many flow IDs are in the additional contiguous range
+ * of legal flow IDs for the channel. @ref flowid_start and flowid_cnt must be
+ * set as valid and configured together. Disabling the valid_params field bit
+ * for flowid_cnt indicates no flow IDs other than the default are to be
+ * allocated and used by the receive channel. @ref flowid_start plus flowid_cnt
+ * cannot be greater than the number of receive flows in the receive channel's
+ * Navigator Subsystem. The additional flows must be assigned to the host, or a
+ * subordinate of the host, requesting configuration of the receive channel.
+ *
+ * @rx_pause_on_err: UDMAP receive channel pause on error configuration to be
+ * programmed into the rx_pause_on_err field of the channel's RCHAN_RCFG
+ * register.
+ *
+ * @rx_atype: UDMAP receive channel non Ring Accelerator access pointer
+ * interpretation configuration to be programmed into the rx_atype field of the
+ * channel's RCHAN_RCFG register.
+ *
+ * @rx_chan_type: UDMAP receive channel functional channel type and work passing
+ * mechanism configuration to be programmed into the rx_chan_type field of the
+ * channel's RCHAN_RCFG register.
+ *
+ * @rx_ignore_short: UDMAP receive channel short packet treatment configuration
+ * to be programmed into the rx_ignore_short field of the RCHAN_RCFG register.
+ *
+ * @rx_ignore_long: UDMAP receive channel long packet treatment configuration to
+ * be programmed into the rx_ignore_long field of the RCHAN_RCFG register.
+ *
+ * @rx_burst_size: UDMAP receive channel burst size configuration to be
+ * programmed into the rx_burst_size field of the RCHAN_RCFG register.
+ */
+struct ti_sci_msg_rm_udmap_rx_ch_cfg_req {
+ struct ti_sci_msg_hdr hdr;
+ u32 valid_params;
+ u16 nav_id;
+ u16 index;
+ u16 rx_fetch_size;
+ u16 rxcq_qnum;
+ u8 rx_priority;
+ u8 rx_qos;
+ u8 rx_orderid;
+ u8 rx_sched_priority;
+ u16 flowid_start;
+ u16 flowid_cnt;
+ u8 rx_pause_on_err;
+ u8 rx_atype;
+ u8 rx_chan_type;
+ u8 rx_ignore_short;
+ u8 rx_ignore_long;
+ u8 rx_burst_size;
+} __packed;
+
+/**
+ * Configures a Navigator Subsystem UDMAP receive flow
+ *
+ * Configures a Navigator Subsystem UDMAP receive flow's registers.
+ * Configuration does not include the flow registers which handle size-based
+ * free descriptor queue routing.
+ *
+ * The flow index must be assigned to the host defined in the TISCI header via
+ * the RM board configuration resource assignment range list.
+ *
+ * @hdr: Standard TISCI header
+ *
+ * @valid_params
+ * Bitfield defining validity of rx flow configuration parameters. The
+ * rx flow configuration fields are not valid, and will not be used for flow
+ * configuration, if their corresponding valid bit is zero. Valid bit usage:
+ * 0 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_einfo_present
+ * 1 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_psinfo_present
+ * 2 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_error_handling
+ * 3 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_desc_type
+ * 4 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_sop_offset
+ * 5 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_qnum
+ * 6 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_hi
+ * 7 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_lo
+ * 8 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_hi
+ * 9 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_lo
+ * 10 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_hi_sel
+ * 11 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_lo_sel
+ * 12 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_hi_sel
+ * 13 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_lo_sel
+ * 14 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq0_sz0_qnum
+ * 15 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq1_sz0_qnum
+ * 16 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq2_sz0_qnum
+ * 17 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq3_sz0_qnum
+ * 18 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_ps_location
+ *
+ * @nav_id: SoC device ID of Navigator Subsystem from which the receive flow is
+ * allocated
+ *
+ * @flow_index: UDMAP receive flow index for non-optional configuration.
+ *
+ * @rx_einfo_present:
+ * UDMAP receive flow extended packet info present configuration to be
+ * programmed into the rx_einfo_present field of the flow's RFLOW_RFA register.
+ *
+ * @rx_psinfo_present:
+ * UDMAP receive flow PS words present configuration to be programmed into the
+ * rx_psinfo_present field of the flow's RFLOW_RFA register.
+ *
+ * @rx_error_handling:
+ * UDMAP receive flow error handling configuration to be programmed into the
+ * rx_error_handling field of the flow's RFLOW_RFA register.
+ *
+ * @rx_desc_type:
+ * UDMAP receive flow descriptor type configuration to be programmed into the
+ * rx_desc_type field field of the flow's RFLOW_RFA register.
+ *
+ * @rx_sop_offset:
+ * UDMAP receive flow start of packet offset configuration to be programmed
+ * into the rx_sop_offset field of the RFLOW_RFA register. See the UDMAP
+ * section of the TRM for more information on this setting. Valid values for
+ * this field are 0-255 bytes.
+ *
+ * @rx_dest_qnum:
+ * UDMAP receive flow destination queue configuration to be programmed into the
+ * rx_dest_qnum field of the flow's RFLOW_RFA register. The specified
+ * destination queue must be valid within the Navigator Subsystem and must be
+ * owned by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_src_tag_hi:
+ * UDMAP receive flow source tag high byte constant configuration to be
+ * programmed into the rx_src_tag_hi field of the flow's RFLOW_RFB register.
+ * See the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_src_tag_lo:
+ * UDMAP receive flow source tag low byte constant configuration to be
+ * programmed into the rx_src_tag_lo field of the flow's RFLOW_RFB register.
+ * See the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_dest_tag_hi:
+ * UDMAP receive flow destination tag high byte constant configuration to be
+ * programmed into the rx_dest_tag_hi field of the flow's RFLOW_RFB register.
+ * See the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_dest_tag_lo:
+ * UDMAP receive flow destination tag low byte constant configuration to be
+ * programmed into the rx_dest_tag_lo field of the flow's RFLOW_RFB register.
+ * See the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_src_tag_hi_sel:
+ * UDMAP receive flow source tag high byte selector configuration to be
+ * programmed into the rx_src_tag_hi_sel field of the RFLOW_RFC register. See
+ * the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_src_tag_lo_sel:
+ * UDMAP receive flow source tag low byte selector configuration to be
+ * programmed into the rx_src_tag_lo_sel field of the RFLOW_RFC register. See
+ * the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_dest_tag_hi_sel:
+ * UDMAP receive flow destination tag high byte selector configuration to be
+ * programmed into the rx_dest_tag_hi_sel field of the RFLOW_RFC register. See
+ * the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_dest_tag_lo_sel:
+ * UDMAP receive flow destination tag low byte selector configuration to be
+ * programmed into the rx_dest_tag_lo_sel field of the RFLOW_RFC register. See
+ * the UDMAP section of the TRM for more information on this setting.
+ *
+ * @rx_fdq0_sz0_qnum:
+ * UDMAP receive flow free descriptor queue 0 configuration to be programmed
+ * into the rx_fdq0_sz0_qnum field of the flow's RFLOW_RFD register. See the
+ * UDMAP section of the TRM for more information on this setting. The specified
+ * free queue must be valid within the Navigator Subsystem and must be owned
+ * by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_fdq1_qnum:
+ * UDMAP receive flow free descriptor queue 1 configuration to be programmed
+ * into the rx_fdq1_qnum field of the flow's RFLOW_RFD register. See the
+ * UDMAP section of the TRM for more information on this setting. The specified
+ * free queue must be valid within the Navigator Subsystem and must be owned
+ * by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_fdq2_qnum:
+ * UDMAP receive flow free descriptor queue 2 configuration to be programmed
+ * into the rx_fdq2_qnum field of the flow's RFLOW_RFE register. See the
+ * UDMAP section of the TRM for more information on this setting. The specified
+ * free queue must be valid within the Navigator Subsystem and must be owned
+ * by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_fdq3_qnum:
+ * UDMAP receive flow free descriptor queue 3 configuration to be programmed
+ * into the rx_fdq3_qnum field of the flow's RFLOW_RFE register. See the
+ * UDMAP section of the TRM for more information on this setting. The specified
+ * free queue must be valid within the Navigator Subsystem and must be owned
+ * by the host, or a subordinate of the host, requesting allocation and
+ * configuration of the receive flow.
+ *
+ * @rx_ps_location:
+ * UDMAP receive flow PS words location configuration to be programmed into the
+ * rx_ps_location field of the flow's RFLOW_RFA register.
+ */
+struct ti_sci_msg_rm_udmap_flow_cfg_req {
+ struct ti_sci_msg_hdr hdr;
+ u32 valid_params;
+ u16 nav_id;
+ u16 flow_index;
+ u8 rx_einfo_present;
+ u8 rx_psinfo_present;
+ u8 rx_error_handling;
+ u8 rx_desc_type;
+ u16 rx_sop_offset;
+ u16 rx_dest_qnum;
+ u8 rx_src_tag_hi;
+ u8 rx_src_tag_lo;
+ u8 rx_dest_tag_hi;
+ u8 rx_dest_tag_lo;
+ u8 rx_src_tag_hi_sel;
+ u8 rx_src_tag_lo_sel;
+ u8 rx_dest_tag_hi_sel;
+ u8 rx_dest_tag_lo_sel;
+ u16 rx_fdq0_sz0_qnum;
+ u16 rx_fdq1_qnum;
+ u16 rx_fdq2_qnum;
+ u16 rx_fdq3_qnum;
+ u8 rx_ps_location;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_proc_request - Request a processor
+ * @hdr: Generic Header
+ * @processor_id: ID of processor being requested
+ *
+ * Request type is TI_SCI_MSG_PROC_REQUEST, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_proc_request {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_proc_release - Release a processor
+ * @hdr: Generic Header
+ * @processor_id: ID of processor being released
+ *
+ * Request type is TI_SCI_MSG_PROC_RELEASE, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_proc_release {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_proc_handover - Handover a processor to a host
+ * @hdr: Generic Header
+ * @processor_id: ID of processor being handed over
+ * @host_id: Host ID the control needs to be transferred to
+ *
+ * Request type is TI_SCI_MSG_PROC_HANDOVER, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_proc_handover {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+ u8 host_id;
+} __packed;
+
+/* Boot Vector masks */
+#define TI_SCI_ADDR_LOW_MASK GENMASK_ULL(31, 0)
+#define TI_SCI_ADDR_HIGH_MASK GENMASK_ULL(63, 32)
+#define TI_SCI_ADDR_HIGH_SHIFT 32
+
+/**
+ * struct ti_sci_msg_req_set_config - Set Processor boot configuration
+ * @hdr: Generic Header
+ * @processor_id: ID of processor being configured
+ * @bootvector_low: Lower 32 bit address (Little Endian) of boot vector
+ * @bootvector_high: Higher 32 bit address (Little Endian) of boot vector
+ * @config_flags_set: Optional Processor specific Config Flags to set.
+ * Setting a bit here implies the corresponding mode
+ * will be set
+ * @config_flags_clear: Optional Processor specific Config Flags to clear.
+ * Setting a bit here implies the corresponding mode
+ * will be cleared
+ *
+ * Request type is TI_SCI_MSG_PROC_HANDOVER, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_set_config {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+ u32 bootvector_low;
+ u32 bootvector_high;
+ u32 config_flags_set;
+ u32 config_flags_clear;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_ctrl - Set Processor boot control flags
+ * @hdr: Generic Header
+ * @processor_id: ID of processor being configured
+ * @control_flags_set: Optional Processor specific Control Flags to set.
+ * Setting a bit here implies the corresponding mode
+ * will be set
+ * @control_flags_clear:Optional Processor specific Control Flags to clear.
+ * Setting a bit here implies the corresponding mode
+ * will be cleared
+ *
+ * Request type is TI_SCI_MSG_SET_CTRL, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_set_ctrl {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+ u32 control_flags_set;
+ u32 control_flags_clear;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_status - Processor boot status request
+ * @hdr: Generic Header
+ * @processor_id: ID of processor whose status is being requested
+ *
+ * Request type is TI_SCI_MSG_GET_STATUS, response is an appropriate
+ * message, or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_get_status {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_status - Processor boot status response
+ * @hdr: Generic Header
+ * @processor_id: ID of processor whose status is returned
+ * @bootvector_low: Lower 32 bit address (Little Endian) of boot vector
+ * @bootvector_high: Higher 32 bit address (Little Endian) of boot vector
+ * @config_flags: Optional Processor specific Config Flags set currently
+ * @control_flags: Optional Processor specific Control Flags set currently
+ * @status_flags: Optional Processor specific Status Flags set currently
+ *
+ * Response structure to a TI_SCI_MSG_GET_STATUS request.
+ */
+struct ti_sci_msg_resp_get_status {
+ struct ti_sci_msg_hdr hdr;
+ u8 processor_id;
+ u32 bootvector_low;
+ u32 bootvector_high;
+ u32 config_flags;
+ u32 control_flags;
+ u32 status_flags;
+} __packed;
+
+#endif /* __TI_SCI_H */
diff --git a/drivers/firmware/trusted_foundations.c b/drivers/firmware/trusted_foundations.c
new file mode 100644
index 000000000..1389fa941
--- /dev/null
+++ b/drivers/firmware/trusted_foundations.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Trusted Foundations support for ARM CPUs
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+
+#include <linux/firmware/trusted_foundations.h>
+
+#include <asm/firmware.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
+
+#define TF_CACHE_MAINT 0xfffff100
+
+#define TF_CACHE_ENABLE 1
+#define TF_CACHE_DISABLE 2
+#define TF_CACHE_REENABLE 4
+
+#define TF_SET_CPU_BOOT_ADDR_SMC 0xfffff200
+
+#define TF_CPU_PM 0xfffffffc
+#define TF_CPU_PM_S3 0xffffffe3
+#define TF_CPU_PM_S2 0xffffffe6
+#define TF_CPU_PM_S2_NO_MC_CLK 0xffffffe5
+#define TF_CPU_PM_S1 0xffffffe4
+#define TF_CPU_PM_S1_NOFLUSH_L2 0xffffffe7
+
+static unsigned long tf_idle_mode = TF_PM_MODE_NONE;
+static unsigned long cpu_boot_addr;
+
+static void tf_generic_smc(u32 type, u32 arg1, u32 arg2)
+{
+ register u32 r0 asm("r0") = type;
+ register u32 r1 asm("r1") = arg1;
+ register u32 r2 asm("r2") = arg2;
+
+ asm volatile(
+ ".arch_extension sec\n\t"
+ "stmfd sp!, {r4 - r11}\n\t"
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r1")
+ __asmeq("%2", "r2")
+ "mov r3, #0\n\t"
+ "mov r4, #0\n\t"
+ "smc #0\n\t"
+ "ldmfd sp!, {r4 - r11}\n\t"
+ :
+ : "r" (r0), "r" (r1), "r" (r2)
+ : "memory", "r3", "r12", "lr");
+}
+
+static int tf_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
+{
+ cpu_boot_addr = boot_addr;
+ tf_generic_smc(TF_SET_CPU_BOOT_ADDR_SMC, cpu_boot_addr, 0);
+
+ return 0;
+}
+
+static int tf_prepare_idle(unsigned long mode)
+{
+ switch (mode) {
+ case TF_PM_MODE_LP0:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S3, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP1:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP1_NO_MC_CLK:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2_NO_MC_CLK,
+ cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP2:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP2_NOFLUSH_L2:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1_NOFLUSH_L2,
+ cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_NONE:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ tf_idle_mode = mode;
+
+ return 0;
+}
+
+#ifdef CONFIG_CACHE_L2X0
+static void tf_cache_write_sec(unsigned long val, unsigned int reg)
+{
+ u32 enable_op, l2x0_way_mask = 0xff;
+
+ switch (reg) {
+ case L2X0_CTRL:
+ if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_ASSOCIATIVITY_16)
+ l2x0_way_mask = 0xffff;
+
+ switch (tf_idle_mode) {
+ case TF_PM_MODE_LP2:
+ enable_op = TF_CACHE_REENABLE;
+ break;
+
+ default:
+ enable_op = TF_CACHE_ENABLE;
+ break;
+ }
+
+ if (val == L2X0_CTRL_EN)
+ tf_generic_smc(TF_CACHE_MAINT, enable_op,
+ l2x0_saved_regs.aux_ctrl);
+ else
+ tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_DISABLE,
+ l2x0_way_mask);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int tf_init_cache(void)
+{
+ outer_cache.write_sec = tf_cache_write_sec;
+
+ return 0;
+}
+#endif /* CONFIG_CACHE_L2X0 */
+
+static const struct firmware_ops trusted_foundations_ops = {
+ .set_cpu_boot_addr = tf_set_cpu_boot_addr,
+ .prepare_idle = tf_prepare_idle,
+#ifdef CONFIG_CACHE_L2X0
+ .l2x0_init = tf_init_cache,
+#endif
+};
+
+void register_trusted_foundations(struct trusted_foundations_platform_data *pd)
+{
+ /*
+ * we are not using version information for now since currently
+ * supported SMCs are compatible with all TF releases
+ */
+ register_firmware_ops(&trusted_foundations_ops);
+}
+
+void of_register_trusted_foundations(void)
+{
+ struct device_node *node;
+ struct trusted_foundations_platform_data pdata;
+ int err;
+
+ node = of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations");
+ if (!node)
+ return;
+
+ err = of_property_read_u32(node, "tlm,version-major",
+ &pdata.version_major);
+ if (err != 0)
+ panic("Trusted Foundation: missing version-major property\n");
+ err = of_property_read_u32(node, "tlm,version-minor",
+ &pdata.version_minor);
+ if (err != 0)
+ panic("Trusted Foundation: missing version-minor property\n");
+ register_trusted_foundations(&pdata);
+}
+
+bool trusted_foundations_registered(void)
+{
+ return firmware_ops == &trusted_foundations_ops;
+}
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
new file mode 100644
index 000000000..0bef98858
--- /dev/null
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -0,0 +1,590 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Turris Mox rWTM firmware driver
+ *
+ * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ */
+
+#include <linux/armada-37xx-rwtm-mailbox.h>
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/hw_random.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define DRIVER_NAME "turris-mox-rwtm"
+
+/*
+ * The macros and constants below come from Turris Mox's rWTM firmware code.
+ * This firmware is open source and it's sources can be found at
+ * https://gitlab.labs.nic.cz/turris/mox-boot-builder/tree/master/wtmi.
+ */
+
+#define MBOX_STS_SUCCESS (0 << 30)
+#define MBOX_STS_FAIL (1 << 30)
+#define MBOX_STS_BADCMD (2 << 30)
+#define MBOX_STS_ERROR(s) ((s) & (3 << 30))
+#define MBOX_STS_VALUE(s) (((s) >> 10) & 0xfffff)
+#define MBOX_STS_CMD(s) ((s) & 0x3ff)
+
+enum mbox_cmd {
+ MBOX_CMD_GET_RANDOM = 1,
+ MBOX_CMD_BOARD_INFO = 2,
+ MBOX_CMD_ECDSA_PUB_KEY = 3,
+ MBOX_CMD_HASH = 4,
+ MBOX_CMD_SIGN = 5,
+ MBOX_CMD_VERIFY = 6,
+
+ MBOX_CMD_OTP_READ = 7,
+ MBOX_CMD_OTP_WRITE = 8,
+};
+
+struct mox_kobject;
+
+struct mox_rwtm {
+ struct device *dev;
+ struct mbox_client mbox_client;
+ struct mbox_chan *mbox;
+ struct mox_kobject *kobj;
+ struct hwrng hwrng;
+
+ struct armada_37xx_rwtm_rx_msg reply;
+
+ void *buf;
+ dma_addr_t buf_phys;
+
+ struct mutex busy;
+ struct completion cmd_done;
+
+ /* board information */
+ int has_board_info;
+ u64 serial_number;
+ int board_version, ram_size;
+ u8 mac_address1[6], mac_address2[6];
+
+ /* public key burned in eFuse */
+ int has_pubkey;
+ u8 pubkey[135];
+
+#ifdef CONFIG_DEBUG_FS
+ /*
+ * Signature process. This is currently done via debugfs, because it
+ * does not conform to the sysfs standard "one file per attribute".
+ * It should be rewritten via crypto API once akcipher API is available
+ * from userspace.
+ */
+ struct dentry *debugfs_root;
+ u32 last_sig[34];
+ int last_sig_done;
+#endif
+};
+
+struct mox_kobject {
+ struct kobject kobj;
+ struct mox_rwtm *rwtm;
+};
+
+static inline struct kobject *rwtm_to_kobj(struct mox_rwtm *rwtm)
+{
+ return &rwtm->kobj->kobj;
+}
+
+static inline struct mox_rwtm *to_rwtm(struct kobject *kobj)
+{
+ return container_of(kobj, struct mox_kobject, kobj)->rwtm;
+}
+
+static void mox_kobj_release(struct kobject *kobj)
+{
+ kfree(to_rwtm(kobj)->kobj);
+}
+
+static struct kobj_type mox_kobj_ktype = {
+ .release = mox_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static int mox_kobj_create(struct mox_rwtm *rwtm)
+{
+ rwtm->kobj = kzalloc(sizeof(*rwtm->kobj), GFP_KERNEL);
+ if (!rwtm->kobj)
+ return -ENOMEM;
+
+ kobject_init(rwtm_to_kobj(rwtm), &mox_kobj_ktype);
+ if (kobject_add(rwtm_to_kobj(rwtm), firmware_kobj, "turris-mox-rwtm")) {
+ kobject_put(rwtm_to_kobj(rwtm));
+ return -ENXIO;
+ }
+
+ rwtm->kobj->rwtm = rwtm;
+
+ return 0;
+}
+
+#define MOX_ATTR_RO(name, format, cat) \
+static ssize_t \
+name##_show(struct kobject *kobj, struct kobj_attribute *a, \
+ char *buf) \
+{ \
+ struct mox_rwtm *rwtm = to_rwtm(kobj); \
+ if (!rwtm->has_##cat) \
+ return -ENODATA; \
+ return sprintf(buf, format, rwtm->name); \
+} \
+static struct kobj_attribute mox_attr_##name = __ATTR_RO(name)
+
+MOX_ATTR_RO(serial_number, "%016llX\n", board_info);
+MOX_ATTR_RO(board_version, "%i\n", board_info);
+MOX_ATTR_RO(ram_size, "%i\n", board_info);
+MOX_ATTR_RO(mac_address1, "%pM\n", board_info);
+MOX_ATTR_RO(mac_address2, "%pM\n", board_info);
+MOX_ATTR_RO(pubkey, "%s\n", pubkey);
+
+static int mox_get_status(enum mbox_cmd cmd, u32 retval)
+{
+ if (MBOX_STS_CMD(retval) != cmd)
+ return -EIO;
+ else if (MBOX_STS_ERROR(retval) == MBOX_STS_FAIL)
+ return -(int)MBOX_STS_VALUE(retval);
+ else if (MBOX_STS_ERROR(retval) == MBOX_STS_BADCMD)
+ return -ENOSYS;
+ else if (MBOX_STS_ERROR(retval) != MBOX_STS_SUCCESS)
+ return -EIO;
+ else
+ return MBOX_STS_VALUE(retval);
+}
+
+static const struct attribute *mox_rwtm_attrs[] = {
+ &mox_attr_serial_number.attr,
+ &mox_attr_board_version.attr,
+ &mox_attr_ram_size.attr,
+ &mox_attr_mac_address1.attr,
+ &mox_attr_mac_address2.attr,
+ &mox_attr_pubkey.attr,
+ NULL
+};
+
+static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data)
+{
+ struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev);
+ struct armada_37xx_rwtm_rx_msg *msg = data;
+
+ rwtm->reply = *msg;
+ complete(&rwtm->cmd_done);
+}
+
+static void reply_to_mac_addr(u8 *mac, u32 t1, u32 t2)
+{
+ mac[0] = t1 >> 8;
+ mac[1] = t1;
+ mac[2] = t2 >> 24;
+ mac[3] = t2 >> 16;
+ mac[4] = t2 >> 8;
+ mac[5] = t2;
+}
+
+static int mox_get_board_info(struct mox_rwtm *rwtm)
+{
+ struct armada_37xx_rwtm_tx_msg msg;
+ struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
+ int ret;
+
+ msg.command = MBOX_CMD_BOARD_INFO;
+ ret = mbox_send_message(rwtm->mbox, &msg);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+ if (ret < 0)
+ return ret;
+
+ ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval);
+ if (ret == -ENODATA) {
+ dev_warn(rwtm->dev,
+ "Board does not have manufacturing information burned!\n");
+ } else if (ret == -ENOSYS) {
+ dev_notice(rwtm->dev,
+ "Firmware does not support the BOARD_INFO command\n");
+ } else if (ret < 0) {
+ return ret;
+ } else {
+ rwtm->serial_number = reply->status[1];
+ rwtm->serial_number <<= 32;
+ rwtm->serial_number |= reply->status[0];
+ rwtm->board_version = reply->status[2];
+ rwtm->ram_size = reply->status[3];
+ reply_to_mac_addr(rwtm->mac_address1, reply->status[4],
+ reply->status[5]);
+ reply_to_mac_addr(rwtm->mac_address2, reply->status[6],
+ reply->status[7]);
+ rwtm->has_board_info = 1;
+
+ pr_info("Turris Mox serial number %016llX\n",
+ rwtm->serial_number);
+ pr_info(" board version %i\n", rwtm->board_version);
+ pr_info(" burned RAM size %i MiB\n", rwtm->ram_size);
+ }
+
+ msg.command = MBOX_CMD_ECDSA_PUB_KEY;
+ ret = mbox_send_message(rwtm->mbox, &msg);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+ if (ret < 0)
+ return ret;
+
+ ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval);
+ if (ret == -ENODATA) {
+ dev_warn(rwtm->dev, "Board has no public key burned!\n");
+ } else if (ret == -ENOSYS) {
+ dev_notice(rwtm->dev,
+ "Firmware does not support the ECDSA_PUB_KEY command\n");
+ } else if (ret < 0) {
+ return ret;
+ } else {
+ u32 *s = reply->status;
+
+ rwtm->has_pubkey = 1;
+ sprintf(rwtm->pubkey,
+ "%06x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x",
+ ret, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7],
+ s[8], s[9], s[10], s[11], s[12], s[13], s[14], s[15]);
+ }
+
+ return 0;
+}
+
+static int check_get_random_support(struct mox_rwtm *rwtm)
+{
+ struct armada_37xx_rwtm_tx_msg msg;
+ int ret;
+
+ msg.command = MBOX_CMD_GET_RANDOM;
+ msg.args[0] = 1;
+ msg.args[1] = rwtm->buf_phys;
+ msg.args[2] = 4;
+
+ ret = mbox_send_message(rwtm->mbox, &msg);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
+ if (ret < 0)
+ return ret;
+
+ return mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval);
+}
+
+static int mox_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct mox_rwtm *rwtm = (struct mox_rwtm *) rng->priv;
+ struct armada_37xx_rwtm_tx_msg msg;
+ int ret;
+
+ if (max > 4096)
+ max = 4096;
+
+ msg.command = MBOX_CMD_GET_RANDOM;
+ msg.args[0] = 1;
+ msg.args[1] = rwtm->buf_phys;
+ msg.args[2] = (max + 3) & ~3;
+
+ if (!wait) {
+ if (!mutex_trylock(&rwtm->busy))
+ return -EBUSY;
+ } else {
+ mutex_lock(&rwtm->busy);
+ }
+
+ ret = mbox_send_message(rwtm->mbox, &msg);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ ret = wait_for_completion_interruptible(&rwtm->cmd_done);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ ret = mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ memcpy(data, rwtm->buf, max);
+ ret = max;
+
+unlock_mutex:
+ mutex_unlock(&rwtm->busy);
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int rwtm_debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t do_sign_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct mox_rwtm *rwtm = file->private_data;
+ ssize_t ret;
+
+ /* only allow one read, of 136 bytes, from position 0 */
+ if (*ppos != 0)
+ return 0;
+
+ if (len < 136)
+ return -EINVAL;
+
+ if (!rwtm->last_sig_done)
+ return -ENODATA;
+
+ /* 2 arrays of 17 32-bit words are 136 bytes */
+ ret = simple_read_from_buffer(buf, len, ppos, rwtm->last_sig, 136);
+ rwtm->last_sig_done = 0;
+
+ return ret;
+}
+
+static ssize_t do_sign_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct mox_rwtm *rwtm = file->private_data;
+ struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
+ struct armada_37xx_rwtm_tx_msg msg;
+ loff_t dummy = 0;
+ ssize_t ret;
+
+ /* the input is a SHA-512 hash, so exactly 64 bytes have to be read */
+ if (len != 64)
+ return -EINVAL;
+
+ /* if last result is not zero user has not read that information yet */
+ if (rwtm->last_sig_done)
+ return -EBUSY;
+
+ if (!mutex_trylock(&rwtm->busy))
+ return -EBUSY;
+
+ /*
+ * Here we have to send:
+ * 1. Address of the input to sign.
+ * The input is an array of 17 32-bit words, the first (most
+ * significat) is 0, the rest 16 words are copied from the SHA-512
+ * hash given by the user and converted from BE to LE.
+ * 2. Address of the buffer where ECDSA signature value R shall be
+ * stored by the rWTM firmware.
+ * 3. Address of the buffer where ECDSA signature value S shall be
+ * stored by the rWTM firmware.
+ */
+ memset(rwtm->buf, 0, 4);
+ ret = simple_write_to_buffer(rwtm->buf + 4, 64, &dummy, buf, len);
+ if (ret < 0)
+ goto unlock_mutex;
+ be32_to_cpu_array(rwtm->buf, rwtm->buf, 17);
+
+ msg.command = MBOX_CMD_SIGN;
+ msg.args[0] = 1;
+ msg.args[1] = rwtm->buf_phys;
+ msg.args[2] = rwtm->buf_phys + 68;
+ msg.args[3] = rwtm->buf_phys + 2 * 68;
+ ret = mbox_send_message(rwtm->mbox, &msg);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ ret = wait_for_completion_interruptible(&rwtm->cmd_done);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ ret = MBOX_STS_VALUE(reply->retval);
+ if (MBOX_STS_ERROR(reply->retval) != MBOX_STS_SUCCESS)
+ goto unlock_mutex;
+
+ /*
+ * Here we read the R and S values of the ECDSA signature
+ * computed by the rWTM firmware and convert their words from
+ * LE to BE.
+ */
+ memcpy(rwtm->last_sig, rwtm->buf + 68, 136);
+ cpu_to_be32_array(rwtm->last_sig, rwtm->last_sig, 34);
+ rwtm->last_sig_done = 1;
+
+ mutex_unlock(&rwtm->busy);
+ return len;
+unlock_mutex:
+ mutex_unlock(&rwtm->busy);
+ return ret;
+}
+
+static const struct file_operations do_sign_fops = {
+ .owner = THIS_MODULE,
+ .open = rwtm_debug_open,
+ .read = do_sign_read,
+ .write = do_sign_write,
+ .llseek = no_llseek,
+};
+
+static int rwtm_register_debugfs(struct mox_rwtm *rwtm)
+{
+ struct dentry *root, *entry;
+
+ root = debugfs_create_dir("turris-mox-rwtm", NULL);
+
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ entry = debugfs_create_file_unsafe("do_sign", 0600, root, rwtm,
+ &do_sign_fops);
+ if (IS_ERR(entry))
+ goto err_remove;
+
+ rwtm->debugfs_root = root;
+
+ return 0;
+err_remove:
+ debugfs_remove_recursive(root);
+ return PTR_ERR(entry);
+}
+
+static void rwtm_unregister_debugfs(struct mox_rwtm *rwtm)
+{
+ debugfs_remove_recursive(rwtm->debugfs_root);
+}
+#else
+static inline int rwtm_register_debugfs(struct mox_rwtm *rwtm)
+{
+ return 0;
+}
+
+static inline void rwtm_unregister_debugfs(struct mox_rwtm *rwtm)
+{
+}
+#endif
+
+static int turris_mox_rwtm_probe(struct platform_device *pdev)
+{
+ struct mox_rwtm *rwtm;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ rwtm = devm_kzalloc(dev, sizeof(*rwtm), GFP_KERNEL);
+ if (!rwtm)
+ return -ENOMEM;
+
+ rwtm->dev = dev;
+ rwtm->buf = dmam_alloc_coherent(dev, PAGE_SIZE, &rwtm->buf_phys,
+ GFP_KERNEL);
+ if (!rwtm->buf)
+ return -ENOMEM;
+
+ ret = mox_kobj_create(rwtm);
+ if (ret < 0) {
+ dev_err(dev, "Cannot create turris-mox-rwtm kobject!\n");
+ return ret;
+ }
+
+ ret = sysfs_create_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
+ if (ret < 0) {
+ dev_err(dev, "Cannot create sysfs files!\n");
+ goto put_kobj;
+ }
+
+ platform_set_drvdata(pdev, rwtm);
+
+ mutex_init(&rwtm->busy);
+
+ rwtm->mbox_client.dev = dev;
+ rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback;
+
+ rwtm->mbox = mbox_request_channel(&rwtm->mbox_client, 0);
+ if (IS_ERR(rwtm->mbox)) {
+ ret = PTR_ERR(rwtm->mbox);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Cannot request mailbox channel: %i\n",
+ ret);
+ goto remove_files;
+ }
+
+ init_completion(&rwtm->cmd_done);
+
+ ret = mox_get_board_info(rwtm);
+ if (ret < 0)
+ dev_warn(dev, "Cannot read board information: %i\n", ret);
+
+ ret = check_get_random_support(rwtm);
+ if (ret < 0) {
+ dev_notice(dev,
+ "Firmware does not support the GET_RANDOM command\n");
+ goto free_channel;
+ }
+
+ rwtm->hwrng.name = DRIVER_NAME "_hwrng";
+ rwtm->hwrng.read = mox_hwrng_read;
+ rwtm->hwrng.priv = (unsigned long) rwtm;
+ rwtm->hwrng.quality = 1024;
+
+ ret = devm_hwrng_register(dev, &rwtm->hwrng);
+ if (ret < 0) {
+ dev_err(dev, "Cannot register HWRNG: %i\n", ret);
+ goto free_channel;
+ }
+
+ ret = rwtm_register_debugfs(rwtm);
+ if (ret < 0) {
+ dev_err(dev, "Failed creating debugfs entries: %i\n", ret);
+ goto free_channel;
+ }
+
+ dev_info(dev, "HWRNG successfully registered\n");
+
+ return 0;
+
+free_channel:
+ mbox_free_channel(rwtm->mbox);
+remove_files:
+ sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
+put_kobj:
+ kobject_put(rwtm_to_kobj(rwtm));
+ return ret;
+}
+
+static int turris_mox_rwtm_remove(struct platform_device *pdev)
+{
+ struct mox_rwtm *rwtm = platform_get_drvdata(pdev);
+
+ rwtm_unregister_debugfs(rwtm);
+ sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
+ kobject_put(rwtm_to_kobj(rwtm));
+ mbox_free_channel(rwtm->mbox);
+
+ return 0;
+}
+
+static const struct of_device_id turris_mox_rwtm_match[] = {
+ { .compatible = "cznic,turris-mox-rwtm", },
+ { .compatible = "marvell,armada-3700-rwtm-firmware", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, turris_mox_rwtm_match);
+
+static struct platform_driver turris_mox_rwtm_driver = {
+ .probe = turris_mox_rwtm_probe,
+ .remove = turris_mox_rwtm_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = turris_mox_rwtm_match,
+ },
+};
+module_platform_driver(turris_mox_rwtm_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Turris Mox rWTM firmware driver");
+MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
diff --git a/drivers/firmware/xilinx/Kconfig b/drivers/firmware/xilinx/Kconfig
new file mode 100644
index 000000000..9a9bd1908
--- /dev/null
+++ b/drivers/firmware/xilinx/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0
+# Kconfig for Xilinx firmwares
+
+menu "Zynq MPSoC Firmware Drivers"
+ depends on ARCH_ZYNQMP
+
+config ZYNQMP_FIRMWARE
+ bool "Enable Xilinx Zynq MPSoC firmware interface"
+ depends on ARCH_ZYNQMP
+ default y if ARCH_ZYNQMP
+ select MFD_CORE
+ help
+ Firmware interface driver is used by different
+ drivers to communicate with the firmware for
+ various platform management services.
+ Say yes to enable ZynqMP firmware interface driver.
+ If in doubt, say N.
+
+config ZYNQMP_FIRMWARE_DEBUG
+ bool "Enable Xilinx Zynq MPSoC firmware debug APIs"
+ depends on ZYNQMP_FIRMWARE && DEBUG_FS
+ help
+ Say yes to enable ZynqMP firmware interface debug APIs.
+ If in doubt, say N.
+
+endmenu
diff --git a/drivers/firmware/xilinx/Makefile b/drivers/firmware/xilinx/Makefile
new file mode 100644
index 000000000..875a53703
--- /dev/null
+++ b/drivers/firmware/xilinx/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for Xilinx firmwares
+
+obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o
+obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
new file mode 100644
index 000000000..99606b349
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Zynq MPSoC Firmware layer for debugfs APIs
+ *
+ * Copyright (C) 2014-2018 Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ * Davorin Mista <davorin.mista@aggios.com>
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajanv@xilinx.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+#include "zynqmp-debug.h"
+
+#define PM_API_NAME_LEN 50
+
+struct pm_api_info {
+ u32 api_id;
+ char api_name[PM_API_NAME_LEN];
+ char api_name_len;
+};
+
+static char debugfs_buf[PAGE_SIZE];
+
+#define PM_API(id) {id, #id, strlen(#id)}
+static struct pm_api_info pm_api_list[] = {
+ PM_API(PM_GET_API_VERSION),
+ PM_API(PM_QUERY_DATA),
+};
+
+static struct dentry *firmware_debugfs_root;
+
+/**
+ * zynqmp_pm_argument_value() - Extract argument value from a PM-API request
+ * @arg: Entered PM-API argument in string format
+ *
+ * Return: Argument value in unsigned integer format on success
+ * 0 otherwise
+ */
+static u64 zynqmp_pm_argument_value(char *arg)
+{
+ u64 value;
+
+ if (!arg)
+ return 0;
+
+ if (!kstrtou64(arg, 0, &value))
+ return value;
+
+ return 0;
+}
+
+/**
+ * get_pm_api_id() - Extract API-ID from a PM-API request
+ * @pm_api_req: Entered PM-API argument in string format
+ * @pm_id: API-ID
+ *
+ * Return: 0 on success else error code
+ */
+static int get_pm_api_id(char *pm_api_req, u32 *pm_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pm_api_list) ; i++) {
+ if (!strncasecmp(pm_api_req, pm_api_list[i].api_name,
+ pm_api_list[i].api_name_len)) {
+ *pm_id = pm_api_list[i].api_id;
+ break;
+ }
+ }
+
+ /* If no name was entered look for PM-API ID instead */
+ if (i == ARRAY_SIZE(pm_api_list) && kstrtouint(pm_api_req, 10, pm_id))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
+{
+ u32 pm_api_version;
+ int ret;
+ struct zynqmp_pm_query_data qdata = {0};
+
+ switch (pm_id) {
+ case PM_GET_API_VERSION:
+ ret = zynqmp_pm_get_api_version(&pm_api_version);
+ sprintf(debugfs_buf, "PM-API Version = %d.%d\n",
+ pm_api_version >> 16, pm_api_version & 0xffff);
+ break;
+ case PM_QUERY_DATA:
+ qdata.qid = pm_api_arg[0];
+ qdata.arg1 = pm_api_arg[1];
+ qdata.arg2 = pm_api_arg[2];
+ qdata.arg3 = pm_api_arg[3];
+
+ ret = zynqmp_pm_query_data(qdata, pm_api_ret);
+ if (ret)
+ break;
+
+ switch (qdata.qid) {
+ case PM_QID_CLOCK_GET_NAME:
+ sprintf(debugfs_buf, "Clock name = %s\n",
+ (char *)pm_api_ret);
+ break;
+ case PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS:
+ sprintf(debugfs_buf, "Multiplier = %d, Divider = %d\n",
+ pm_api_ret[1], pm_api_ret[2]);
+ break;
+ default:
+ sprintf(debugfs_buf,
+ "data[0] = 0x%08x\ndata[1] = 0x%08x\n data[2] = 0x%08x\ndata[3] = 0x%08x\n",
+ pm_api_ret[0], pm_api_ret[1],
+ pm_api_ret[2], pm_api_ret[3]);
+ }
+ break;
+ default:
+ sprintf(debugfs_buf, "Unsupported PM-API request\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_debugfs_api_write() - debugfs write function
+ * @file: User file
+ * @ptr: User entered PM-API string
+ * @len: Length of the userspace buffer
+ * @off: Offset within the file
+ *
+ * Used for triggering pm api functions by writing
+ * echo <pm_api_id> > /sys/kernel/debug/zynqmp_pm/power or
+ * echo <pm_api_name> > /sys/kernel/debug/zynqmp_pm/power
+ *
+ * Return: Number of bytes copied if PM-API request succeeds,
+ * the corresponding error code otherwise
+ */
+static ssize_t zynqmp_pm_debugfs_api_write(struct file *file,
+ const char __user *ptr, size_t len,
+ loff_t *off)
+{
+ char *kern_buff, *tmp_buff;
+ char *pm_api_req;
+ u32 pm_id = 0;
+ u64 pm_api_arg[4] = {0, 0, 0, 0};
+ /* Return values from PM APIs calls */
+ u32 pm_api_ret[4] = {0, 0, 0, 0};
+
+ int ret;
+ int i = 0;
+
+ strcpy(debugfs_buf, "");
+
+ if (*off != 0 || len <= 1 || len > PAGE_SIZE - 1)
+ return -EINVAL;
+
+ kern_buff = memdup_user_nul(ptr, len);
+ if (IS_ERR(kern_buff))
+ return PTR_ERR(kern_buff);
+ tmp_buff = kern_buff;
+
+ /* Read the API name from a user request */
+ pm_api_req = strsep(&kern_buff, " ");
+
+ ret = get_pm_api_id(pm_api_req, &pm_id);
+ if (ret < 0)
+ goto err;
+
+ /* Read node_id and arguments from the PM-API request */
+ pm_api_req = strsep(&kern_buff, " ");
+ while ((i < ARRAY_SIZE(pm_api_arg)) && pm_api_req) {
+ pm_api_arg[i++] = zynqmp_pm_argument_value(pm_api_req);
+ pm_api_req = strsep(&kern_buff, " ");
+ }
+
+ ret = process_api_request(pm_id, pm_api_arg, pm_api_ret);
+
+err:
+ kfree(tmp_buff);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+/**
+ * zynqmp_pm_debugfs_api_read() - debugfs read function
+ * @file: User file
+ * @ptr: Requested pm_api_version string
+ * @len: Length of the userspace buffer
+ * @off: Offset within the file
+ *
+ * Return: Length of the version string on success
+ * else error code
+ */
+static ssize_t zynqmp_pm_debugfs_api_read(struct file *file, char __user *ptr,
+ size_t len, loff_t *off)
+{
+ return simple_read_from_buffer(ptr, len, off, debugfs_buf,
+ strlen(debugfs_buf));
+}
+
+/* Setup debugfs fops */
+static const struct file_operations fops_zynqmp_pm_dbgfs = {
+ .owner = THIS_MODULE,
+ .write = zynqmp_pm_debugfs_api_write,
+ .read = zynqmp_pm_debugfs_api_read,
+};
+
+/**
+ * zynqmp_pm_api_debugfs_init - Initialize debugfs interface
+ *
+ * Return: None
+ */
+void zynqmp_pm_api_debugfs_init(void)
+{
+ /* Initialize debugfs interface */
+ firmware_debugfs_root = debugfs_create_dir("zynqmp-firmware", NULL);
+ debugfs_create_file("pm", 0660, firmware_debugfs_root, NULL,
+ &fops_zynqmp_pm_dbgfs);
+}
+
+/**
+ * zynqmp_pm_api_debugfs_exit - Remove debugfs interface
+ *
+ * Return: None
+ */
+void zynqmp_pm_api_debugfs_exit(void)
+{
+ debugfs_remove_recursive(firmware_debugfs_root);
+}
diff --git a/drivers/firmware/xilinx/zynqmp-debug.h b/drivers/firmware/xilinx/zynqmp-debug.h
new file mode 100644
index 000000000..9929f8b43
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp-debug.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Zynq MPSoC Firmware layer
+ *
+ * Copyright (C) 2014-2018 Xilinx
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ * Davorin Mista <davorin.mista@aggios.com>
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajanv@xilinx.com>
+ */
+
+#ifndef __FIRMWARE_ZYNQMP_DEBUG_H__
+#define __FIRMWARE_ZYNQMP_DEBUG_H__
+
+#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE_DEBUG)
+void zynqmp_pm_api_debugfs_init(void);
+void zynqmp_pm_api_debugfs_exit(void);
+#else
+static inline void zynqmp_pm_api_debugfs_init(void) { }
+static inline void zynqmp_pm_api_debugfs_exit(void) { }
+#endif
+
+#endif /* __FIRMWARE_ZYNQMP_DEBUG_H__ */
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
new file mode 100644
index 000000000..300ba2991
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -0,0 +1,1313 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Zynq MPSoC Firmware layer
+ *
+ * Copyright (C) 2014-2021 Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ * Davorin Mista <davorin.mista@aggios.com>
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajanv@xilinx.com>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/hashtable.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+#include "zynqmp-debug.h"
+
+/* Max HashMap Order for PM API feature check (1<<7 = 128) */
+#define PM_API_FEATURE_CHECK_MAX_ORDER 7
+
+static bool feature_check_enabled;
+static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
+
+/**
+ * struct pm_api_feature_data - PM API Feature data
+ * @pm_api_id: PM API Id, used as key to index into hashmap
+ * @feature_status: status of PM API feature: valid, invalid
+ * @hentry: hlist_node that hooks this entry into hashtable
+ */
+struct pm_api_feature_data {
+ u32 pm_api_id;
+ int feature_status;
+ struct hlist_node hentry;
+};
+
+static const struct mfd_cell firmware_devs[] = {
+ {
+ .name = "zynqmp_power_controller",
+ },
+};
+
+/**
+ * zynqmp_pm_ret_code() - Convert PMU-FW error codes to Linux error codes
+ * @ret_status: PMUFW return code
+ *
+ * Return: corresponding Linux error code
+ */
+static int zynqmp_pm_ret_code(u32 ret_status)
+{
+ switch (ret_status) {
+ case XST_PM_SUCCESS:
+ case XST_PM_DOUBLE_REQ:
+ return 0;
+ case XST_PM_NO_FEATURE:
+ return -ENOTSUPP;
+ case XST_PM_NO_ACCESS:
+ return -EACCES;
+ case XST_PM_ABORT_SUSPEND:
+ return -ECANCELED;
+ case XST_PM_MULT_USER:
+ return -EUSERS;
+ case XST_PM_INTERNAL:
+ case XST_PM_CONFLICT:
+ case XST_PM_INVALID_NODE:
+ default:
+ return -EINVAL;
+ }
+}
+
+static noinline int do_fw_call_fail(u64 arg0, u64 arg1, u64 arg2,
+ u32 *ret_payload)
+{
+ return -ENODEV;
+}
+
+/*
+ * PM function call wrapper
+ * Invoke do_fw_call_smc or do_fw_call_hvc, depending on the configuration
+ */
+static int (*do_fw_call)(u64, u64, u64, u32 *ret_payload) = do_fw_call_fail;
+
+/**
+ * do_fw_call_smc() - Call system-level platform management layer (SMC)
+ * @arg0: Argument 0 to SMC call
+ * @arg1: Argument 1 to SMC call
+ * @arg2: Argument 2 to SMC call
+ * @ret_payload: Returned value array
+ *
+ * Invoke platform management function via SMC call (no hypervisor present).
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static noinline int do_fw_call_smc(u64 arg0, u64 arg1, u64 arg2,
+ u32 *ret_payload)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(arg0, arg1, arg2, 0, 0, 0, 0, 0, &res);
+
+ if (ret_payload) {
+ ret_payload[0] = lower_32_bits(res.a0);
+ ret_payload[1] = upper_32_bits(res.a0);
+ ret_payload[2] = lower_32_bits(res.a1);
+ ret_payload[3] = upper_32_bits(res.a1);
+ }
+
+ return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
+}
+
+/**
+ * do_fw_call_hvc() - Call system-level platform management layer (HVC)
+ * @arg0: Argument 0 to HVC call
+ * @arg1: Argument 1 to HVC call
+ * @arg2: Argument 2 to HVC call
+ * @ret_payload: Returned value array
+ *
+ * Invoke platform management function via HVC
+ * HVC-based for communication through hypervisor
+ * (no direct communication with ATF).
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2,
+ u32 *ret_payload)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_hvc(arg0, arg1, arg2, 0, 0, 0, 0, 0, &res);
+
+ if (ret_payload) {
+ ret_payload[0] = lower_32_bits(res.a0);
+ ret_payload[1] = upper_32_bits(res.a0);
+ ret_payload[2] = lower_32_bits(res.a1);
+ ret_payload[3] = upper_32_bits(res.a1);
+ }
+
+ return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
+}
+
+/**
+ * zynqmp_pm_feature() - Check weather given feature is supported or not
+ * @api_id: API ID to check
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_feature(u32 api_id)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ u64 smc_arg[2];
+ struct pm_api_feature_data *feature_data;
+
+ if (!feature_check_enabled)
+ return 0;
+
+ /* Check for existing entry in hash table for given api */
+ hash_for_each_possible(pm_api_features_map, feature_data, hentry,
+ api_id) {
+ if (feature_data->pm_api_id == api_id)
+ return feature_data->feature_status;
+ }
+
+ /* Add new entry if not present */
+ feature_data = kmalloc(sizeof(*feature_data), GFP_ATOMIC);
+ if (!feature_data)
+ return -ENOMEM;
+
+ feature_data->pm_api_id = api_id;
+ smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
+ smc_arg[1] = api_id;
+
+ ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
+ if (ret)
+ ret = -EOPNOTSUPP;
+ else
+ ret = ret_payload[1];
+
+ feature_data->feature_status = ret;
+ hash_add(pm_api_features_map, &feature_data->hentry, api_id);
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
+ * caller function depending on the configuration
+ * @pm_api_id: Requested PM-API call
+ * @arg0: Argument 0 to requested PM-API call
+ * @arg1: Argument 1 to requested PM-API call
+ * @arg2: Argument 2 to requested PM-API call
+ * @arg3: Argument 3 to requested PM-API call
+ * @ret_payload: Returned value array
+ *
+ * Invoke platform management function for SMC or HVC call, depending on
+ * configuration.
+ * Following SMC Calling Convention (SMCCC) for SMC64:
+ * Pm Function Identifier,
+ * PM_SIP_SVC + PM_API_ID =
+ * ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT)
+ * ((SMC_64) << FUNCID_CC_SHIFT)
+ * ((SIP_START) << FUNCID_OEN_SHIFT)
+ * ((PM_API_ID) & FUNCID_NUM_MASK))
+ *
+ * PM_SIP_SVC - Registered ZynqMP SIP Service Call.
+ * PM_API_ID - Platform Management API ID.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
+ u32 arg2, u32 arg3, u32 *ret_payload)
+{
+ /*
+ * Added SIP service call Function Identifier
+ * Make sure to stay in x0 register
+ */
+ u64 smc_arg[4];
+ int ret;
+
+ /* Check if feature is supported or not */
+ ret = zynqmp_pm_feature(pm_api_id);
+ if (ret < 0)
+ return ret;
+
+ smc_arg[0] = PM_SIP_SVC | pm_api_id;
+ smc_arg[1] = ((u64)arg1 << 32) | arg0;
+ smc_arg[2] = ((u64)arg3 << 32) | arg2;
+
+ return do_fw_call(smc_arg[0], smc_arg[1], smc_arg[2], ret_payload);
+}
+
+static u32 pm_api_version;
+static u32 pm_tz_version;
+
+/**
+ * zynqmp_pm_get_api_version() - Get version number of PMU PM firmware
+ * @version: Returned version value
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_get_api_version(u32 *version)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!version)
+ return -EINVAL;
+
+ /* Check is PM API version already verified */
+ if (pm_api_version > 0) {
+ *version = pm_api_version;
+ return 0;
+ }
+ ret = zynqmp_pm_invoke_fn(PM_GET_API_VERSION, 0, 0, 0, 0, ret_payload);
+ *version = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_api_version);
+
+/**
+ * zynqmp_pm_get_chipid - Get silicon ID registers
+ * @idcode: IDCODE register
+ * @version: version register
+ *
+ * Return: Returns the status of the operation and the idcode and version
+ * registers in @idcode and @version.
+ */
+int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!idcode || !version)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload);
+ *idcode = ret_payload[1];
+ *version = ret_payload[2];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_chipid);
+
+/**
+ * zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
+ * @version: Returned version value
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_get_trustzone_version(u32 *version)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!version)
+ return -EINVAL;
+
+ /* Check is PM trustzone version already verified */
+ if (pm_tz_version > 0) {
+ *version = pm_tz_version;
+ return 0;
+ }
+ ret = zynqmp_pm_invoke_fn(PM_GET_TRUSTZONE_VERSION, 0, 0,
+ 0, 0, ret_payload);
+ *version = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * get_set_conduit_method() - Choose SMC or HVC based communication
+ * @np: Pointer to the device_node structure
+ *
+ * Use SMC or HVC-based functions to communicate with EL2/EL3.
+ *
+ * Return: Returns 0 on success or error code
+ */
+static int get_set_conduit_method(struct device_node *np)
+{
+ const char *method;
+
+ if (of_property_read_string(np, "method", &method)) {
+ pr_warn("%s missing \"method\" property\n", __func__);
+ return -ENXIO;
+ }
+
+ if (!strcmp("hvc", method)) {
+ do_fw_call = do_fw_call_hvc;
+ } else if (!strcmp("smc", method)) {
+ do_fw_call = do_fw_call_smc;
+ } else {
+ pr_warn("%s Invalid \"method\" property: %s\n",
+ __func__, method);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_pm_query_data() - Get query data from firmware
+ * @qdata: Variable to the zynqmp_pm_query_data structure
+ * @out: Returned output value
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
+{
+ int ret;
+
+ ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, qdata.qid, qdata.arg1,
+ qdata.arg2, qdata.arg3, out);
+
+ /*
+ * For clock name query, all bytes in SMC response are clock name
+ * characters and return code is always success. For invalid clocks,
+ * clock name bytes would be zeros.
+ */
+ return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_query_data);
+
+/**
+ * zynqmp_pm_clock_enable() - Enable the clock for given id
+ * @clock_id: ID of the clock to be enabled
+ *
+ * This function is used by master to enable the clock
+ * including peripherals and PLL clocks.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_enable(u32 clock_id)
+{
+ return zynqmp_pm_invoke_fn(PM_CLOCK_ENABLE, clock_id, 0, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_enable);
+
+/**
+ * zynqmp_pm_clock_disable() - Disable the clock for given id
+ * @clock_id: ID of the clock to be disable
+ *
+ * This function is used by master to disable the clock
+ * including peripherals and PLL clocks.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_disable(u32 clock_id)
+{
+ return zynqmp_pm_invoke_fn(PM_CLOCK_DISABLE, clock_id, 0, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_disable);
+
+/**
+ * zynqmp_pm_clock_getstate() - Get the clock state for given id
+ * @clock_id: ID of the clock to be queried
+ * @state: 1/0 (Enabled/Disabled)
+ *
+ * This function is used by master to get the state of clock
+ * including peripherals and PLL clocks.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETSTATE, clock_id, 0,
+ 0, 0, ret_payload);
+ *state = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getstate);
+
+/**
+ * zynqmp_pm_clock_setdivider() - Set the clock divider for given id
+ * @clock_id: ID of the clock
+ * @divider: divider value
+ *
+ * This function is used by master to set divider for any clock
+ * to achieve desired rate.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
+{
+ return zynqmp_pm_invoke_fn(PM_CLOCK_SETDIVIDER, clock_id, divider,
+ 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setdivider);
+
+/**
+ * zynqmp_pm_clock_getdivider() - Get the clock divider for given id
+ * @clock_id: ID of the clock
+ * @divider: divider value
+ *
+ * This function is used by master to get divider values
+ * for any clock.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETDIVIDER, clock_id, 0,
+ 0, 0, ret_payload);
+ *divider = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getdivider);
+
+/**
+ * zynqmp_pm_clock_setrate() - Set the clock rate for given id
+ * @clock_id: ID of the clock
+ * @rate: rate value in hz
+ *
+ * This function is used by master to set rate for any clock.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
+{
+ return zynqmp_pm_invoke_fn(PM_CLOCK_SETRATE, clock_id,
+ lower_32_bits(rate),
+ upper_32_bits(rate),
+ 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setrate);
+
+/**
+ * zynqmp_pm_clock_getrate() - Get the clock rate for given id
+ * @clock_id: ID of the clock
+ * @rate: rate value in hz
+ *
+ * This function is used by master to get rate
+ * for any clock.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETRATE, clock_id, 0,
+ 0, 0, ret_payload);
+ *rate = ((u64)ret_payload[2] << 32) | ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getrate);
+
+/**
+ * zynqmp_pm_clock_setparent() - Set the clock parent for given id
+ * @clock_id: ID of the clock
+ * @parent_id: parent id
+ *
+ * This function is used by master to set parent for any clock.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
+{
+ return zynqmp_pm_invoke_fn(PM_CLOCK_SETPARENT, clock_id,
+ parent_id, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setparent);
+
+/**
+ * zynqmp_pm_clock_getparent() - Get the clock parent for given id
+ * @clock_id: ID of the clock
+ * @parent_id: parent id
+ *
+ * This function is used by master to get parent index
+ * for any clock.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETPARENT, clock_id, 0,
+ 0, 0, ret_payload);
+ *parent_id = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getparent);
+
+/**
+ * zynqmp_pm_set_pll_frac_mode() - PM API for set PLL mode
+ *
+ * @clk_id: PLL clock ID
+ * @mode: PLL mode (PLL_MODE_FRAC/PLL_MODE_INT)
+ *
+ * This function sets PLL mode
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_MODE,
+ clk_id, mode, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_mode);
+
+/**
+ * zynqmp_pm_get_pll_frac_mode() - PM API for get PLL mode
+ *
+ * @clk_id: PLL clock ID
+ * @mode: PLL mode
+ *
+ * This function return current PLL mode
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_MODE,
+ clk_id, 0, mode);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_mode);
+
+/**
+ * zynqmp_pm_set_pll_frac_data() - PM API for setting pll fraction data
+ *
+ * @clk_id: PLL clock ID
+ * @data: fraction data
+ *
+ * This function sets fraction data.
+ * It is valid for fraction mode only.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_DATA,
+ clk_id, data, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_data);
+
+/**
+ * zynqmp_pm_get_pll_frac_data() - PM API for getting pll fraction data
+ *
+ * @clk_id: PLL clock ID
+ * @data: fraction data
+ *
+ * This function returns fraction data value.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_DATA,
+ clk_id, 0, data);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data);
+
+/**
+ * zynqmp_pm_set_sd_tapdelay() - Set tap delay for the SD device
+ *
+ * @node_id Node ID of the device
+ * @type Type of tap delay to set (input/output)
+ * @value Value to set fot the tap delay
+ *
+ * This function sets input/output tap delay for the SD device.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
+ type, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
+
+/**
+ * zynqmp_pm_sd_dll_reset() - Reset DLL logic
+ *
+ * @node_id Node ID of the device
+ * @type Reset type
+ *
+ * This function resets DLL logic for the SD device.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SD_DLL_RESET,
+ type, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
+
+/**
+ * zynqmp_pm_write_ggs() - PM API for writing global general storage (ggs)
+ * @index GGS register index
+ * @value Register value to be written
+ *
+ * This function writes value to GGS register.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_write_ggs(u32 index, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_GGS,
+ index, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs);
+
+/**
+ * zynqmp_pm_write_ggs() - PM API for reading global general storage (ggs)
+ * @index GGS register index
+ * @value Register value to be written
+ *
+ * This function returns GGS register value.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_read_ggs(u32 index, u32 *value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_GGS,
+ index, 0, value);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_read_ggs);
+
+/**
+ * zynqmp_pm_write_pggs() - PM API for writing persistent global general
+ * storage (pggs)
+ * @index PGGS register index
+ * @value Register value to be written
+ *
+ * This function writes value to PGGS register.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_write_pggs(u32 index, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_PGGS, index, value,
+ NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs);
+
+/**
+ * zynqmp_pm_write_pggs() - PM API for reading persistent global general
+ * storage (pggs)
+ * @index PGGS register index
+ * @value Register value to be written
+ *
+ * This function returns PGGS register value.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_read_pggs(u32 index, u32 *value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_PGGS, index, 0,
+ value);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_read_pggs);
+
+/**
+ * zynqmp_pm_set_boot_health_status() - PM API for setting healthy boot status
+ * @value Status value to be written
+ *
+ * This function sets healthy bit value to indicate boot health status
+ * to firmware.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_boot_health_status(u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_BOOT_HEALTH_STATUS,
+ value, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_reset_assert - Request setting of reset (1 - assert, 0 - release)
+ * @reset: Reset to be configured
+ * @assert_flag: Flag stating should reset be asserted (1) or
+ * released (0)
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag)
+{
+ return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, reset, assert_flag,
+ 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_reset_assert);
+
+/**
+ * zynqmp_pm_reset_get_status - Get status of the reset
+ * @reset: Reset whose status should be returned
+ * @status: Returned status
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!status)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_RESET_GET_STATUS, reset, 0,
+ 0, 0, ret_payload);
+ *status = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_reset_get_status);
+
+/**
+ * zynqmp_pm_fpga_load - Perform the fpga load
+ * @address: Address to write to
+ * @size: pl bitstream size
+ * @flags: Bitstream type
+ * -XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration
+ * -XILINX_ZYNQMP_PM_FPGA_PARTIAL: FPGA partial reconfiguration
+ *
+ * This function provides access to pmufw. To transfer
+ * the required bitstream into PL.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags)
+{
+ return zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address),
+ upper_32_bits(address), size, flags, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_load);
+
+/**
+ * zynqmp_pm_fpga_get_status - Read value from PCAP status register
+ * @value: Value to read
+ *
+ * This function provides access to the pmufw to get the PCAP
+ * status
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_fpga_get_status(u32 *value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_FPGA_GET_STATUS, 0, 0, 0, 0, ret_payload);
+ *value = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
+
+/**
+ * zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
+ * master has initialized its own power management
+ *
+ * This API function is to be used for notify the power management controller
+ * about the completed power management initialization.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_init_finalize(void)
+{
+ return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, 0, 0, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_init_finalize);
+
+/**
+ * zynqmp_pm_set_suspend_mode() - Set system suspend mode
+ * @mode: Mode to set for system suspend
+ *
+ * This API function is used to set mode of system suspend.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_suspend_mode(u32 mode)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, mode, 0, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_suspend_mode);
+
+/**
+ * zynqmp_pm_request_node() - Request a node with specific capabilities
+ * @node: Node ID of the slave
+ * @capabilities: Requested capabilities of the slave
+ * @qos: Quality of service (not supported)
+ * @ack: Flag to specify whether acknowledge is requested
+ *
+ * This function is used by master to request particular node from firmware.
+ * Every master must request node before using it.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos, const enum zynqmp_pm_request_ack ack)
+{
+ return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, node, capabilities,
+ qos, ack, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_request_node);
+
+/**
+ * zynqmp_pm_release_node() - Release a node
+ * @node: Node ID of the slave
+ *
+ * This function is used by master to inform firmware that master
+ * has released node. Once released, master must not use that node
+ * without re-request.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_release_node(const u32 node)
+{
+ return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, node, 0, 0, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_release_node);
+
+/**
+ * zynqmp_pm_set_requirement() - PM call to set requirement for PM slaves
+ * @node: Node ID of the slave
+ * @capabilities: Requested capabilities of the slave
+ * @qos: Quality of service (not supported)
+ * @ack: Flag to specify whether acknowledge is requested
+ *
+ * This API function is to be used for slaves a PU already has requested
+ * to change its capabilities.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, node, capabilities,
+ qos, ack, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_requirement);
+
+/**
+ * zynqmp_pm_aes - Access AES hardware to encrypt/decrypt the data using
+ * AES-GCM core.
+ * @address: Address of the AesParams structure.
+ * @out: Returned output value
+ *
+ * Return: Returns status, either success or error code.
+ */
+int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!out)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_SECURE_AES, upper_32_bits(address),
+ lower_32_bits(address),
+ 0, 0, ret_payload);
+ *out = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine);
+
+/**
+ * zynqmp_pm_system_shutdown - PM call to request a system shutdown or restart
+ * @type: Shutdown or restart? 0 for shutdown, 1 for restart
+ * @subtype: Specifies which system should be restarted or shut down
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
+{
+ return zynqmp_pm_invoke_fn(PM_SYSTEM_SHUTDOWN, type, subtype,
+ 0, 0, NULL);
+}
+
+/**
+ * struct zynqmp_pm_shutdown_scope - Struct for shutdown scope
+ * @subtype: Shutdown subtype
+ * @name: Matching string for scope argument
+ *
+ * This struct encapsulates mapping between shutdown scope ID and string.
+ */
+struct zynqmp_pm_shutdown_scope {
+ const enum zynqmp_pm_shutdown_subtype subtype;
+ const char *name;
+};
+
+static struct zynqmp_pm_shutdown_scope shutdown_scopes[] = {
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
+ .name = "subsystem",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
+ .name = "ps_only",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
+ .name = "system",
+ },
+};
+
+static struct zynqmp_pm_shutdown_scope *selected_scope =
+ &shutdown_scopes[ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM];
+
+/**
+ * zynqmp_pm_is_shutdown_scope_valid - Check if shutdown scope string is valid
+ * @scope_string: Shutdown scope string
+ *
+ * Return: Return pointer to matching shutdown scope struct from
+ * array of available options in system if string is valid,
+ * otherwise returns NULL.
+ */
+static struct zynqmp_pm_shutdown_scope*
+ zynqmp_pm_is_shutdown_scope_valid(const char *scope_string)
+{
+ int count;
+
+ for (count = 0; count < ARRAY_SIZE(shutdown_scopes); count++)
+ if (sysfs_streq(scope_string, shutdown_scopes[count].name))
+ return &shutdown_scopes[count];
+
+ return NULL;
+}
+
+static ssize_t shutdown_scope_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(shutdown_scopes); i++) {
+ if (&shutdown_scopes[i] == selected_scope) {
+ strcat(buf, "[");
+ strcat(buf, shutdown_scopes[i].name);
+ strcat(buf, "]");
+ } else {
+ strcat(buf, shutdown_scopes[i].name);
+ }
+ strcat(buf, " ");
+ }
+ strcat(buf, "\n");
+
+ return strlen(buf);
+}
+
+static ssize_t shutdown_scope_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct zynqmp_pm_shutdown_scope *scope;
+
+ scope = zynqmp_pm_is_shutdown_scope_valid(buf);
+ if (!scope)
+ return -EINVAL;
+
+ ret = zynqmp_pm_system_shutdown(ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+ scope->subtype);
+ if (ret) {
+ pr_err("unable to set shutdown scope %s\n", buf);
+ return ret;
+ }
+
+ selected_scope = scope;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(shutdown_scope);
+
+static ssize_t health_status_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned int value;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_pm_set_boot_health_status(value);
+ if (ret) {
+ dev_err(device, "unable to set healthy bit value to %u\n",
+ value);
+ return ret;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(health_status);
+
+static ssize_t ggs_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ ret = zynqmp_pm_read_ggs(reg, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static ssize_t ggs_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count,
+ u32 reg)
+{
+ long value;
+ int ret;
+
+ if (reg >= GSS_NUM_REGS)
+ return -EINVAL;
+
+ ret = kstrtol(buf, 16, &value);
+ if (ret) {
+ count = -EFAULT;
+ goto err;
+ }
+
+ ret = zynqmp_pm_write_ggs(reg, value);
+ if (ret)
+ count = -EFAULT;
+err:
+ return count;
+}
+
+/* GGS register show functions */
+#define GGS0_SHOW(N) \
+ ssize_t ggs##N##_show(struct device *device, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return ggs_show(device, attr, buf, N); \
+ }
+
+static GGS0_SHOW(0);
+static GGS0_SHOW(1);
+static GGS0_SHOW(2);
+static GGS0_SHOW(3);
+
+/* GGS register store function */
+#define GGS0_STORE(N) \
+ ssize_t ggs##N##_store(struct device *device, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return ggs_store(device, attr, buf, count, N); \
+ }
+
+static GGS0_STORE(0);
+static GGS0_STORE(1);
+static GGS0_STORE(2);
+static GGS0_STORE(3);
+
+static ssize_t pggs_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ ret = zynqmp_pm_read_pggs(reg, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static ssize_t pggs_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count,
+ u32 reg)
+{
+ long value;
+ int ret;
+
+ if (reg >= GSS_NUM_REGS)
+ return -EINVAL;
+
+ ret = kstrtol(buf, 16, &value);
+ if (ret) {
+ count = -EFAULT;
+ goto err;
+ }
+
+ ret = zynqmp_pm_write_pggs(reg, value);
+ if (ret)
+ count = -EFAULT;
+
+err:
+ return count;
+}
+
+#define PGGS0_SHOW(N) \
+ ssize_t pggs##N##_show(struct device *device, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return pggs_show(device, attr, buf, N); \
+ }
+
+#define PGGS0_STORE(N) \
+ ssize_t pggs##N##_store(struct device *device, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return pggs_store(device, attr, buf, count, N); \
+ }
+
+/* PGGS register show functions */
+static PGGS0_SHOW(0);
+static PGGS0_SHOW(1);
+static PGGS0_SHOW(2);
+static PGGS0_SHOW(3);
+
+/* PGGS register store functions */
+static PGGS0_STORE(0);
+static PGGS0_STORE(1);
+static PGGS0_STORE(2);
+static PGGS0_STORE(3);
+
+/* GGS register attributes */
+static DEVICE_ATTR_RW(ggs0);
+static DEVICE_ATTR_RW(ggs1);
+static DEVICE_ATTR_RW(ggs2);
+static DEVICE_ATTR_RW(ggs3);
+
+/* PGGS register attributes */
+static DEVICE_ATTR_RW(pggs0);
+static DEVICE_ATTR_RW(pggs1);
+static DEVICE_ATTR_RW(pggs2);
+static DEVICE_ATTR_RW(pggs3);
+
+static struct attribute *zynqmp_firmware_attrs[] = {
+ &dev_attr_ggs0.attr,
+ &dev_attr_ggs1.attr,
+ &dev_attr_ggs2.attr,
+ &dev_attr_ggs3.attr,
+ &dev_attr_pggs0.attr,
+ &dev_attr_pggs1.attr,
+ &dev_attr_pggs2.attr,
+ &dev_attr_pggs3.attr,
+ &dev_attr_shutdown_scope.attr,
+ &dev_attr_health_status.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(zynqmp_firmware);
+
+static int zynqmp_firmware_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ int ret;
+
+ np = of_find_compatible_node(NULL, NULL, "xlnx,zynqmp");
+ if (!np) {
+ np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
+ if (!np)
+ return 0;
+
+ feature_check_enabled = true;
+ }
+ of_node_put(np);
+
+ ret = get_set_conduit_method(dev->of_node);
+ if (ret)
+ return ret;
+
+ /* Check PM API version number */
+ zynqmp_pm_get_api_version(&pm_api_version);
+ if (pm_api_version < ZYNQMP_PM_VERSION) {
+ panic("%s Platform Management API version error. Expected: v%d.%d - Found: v%d.%d\n",
+ __func__,
+ ZYNQMP_PM_VERSION_MAJOR, ZYNQMP_PM_VERSION_MINOR,
+ pm_api_version >> 16, pm_api_version & 0xFFFF);
+ }
+
+ pr_info("%s Platform Management API v%d.%d\n", __func__,
+ pm_api_version >> 16, pm_api_version & 0xFFFF);
+
+ /* Check trustzone version number */
+ ret = zynqmp_pm_get_trustzone_version(&pm_tz_version);
+ if (ret)
+ panic("Legacy trustzone found without version support\n");
+
+ if (pm_tz_version < ZYNQMP_TZ_VERSION)
+ panic("%s Trustzone version error. Expected: v%d.%d - Found: v%d.%d\n",
+ __func__,
+ ZYNQMP_TZ_VERSION_MAJOR, ZYNQMP_TZ_VERSION_MINOR,
+ pm_tz_version >> 16, pm_tz_version & 0xFFFF);
+
+ pr_info("%s Trustzone version v%d.%d\n", __func__,
+ pm_tz_version >> 16, pm_tz_version & 0xFFFF);
+
+ ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
+ ARRAY_SIZE(firmware_devs), NULL, 0, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add MFD devices %d\n", ret);
+ return ret;
+ }
+
+ zynqmp_pm_api_debugfs_init();
+
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
+}
+
+static int zynqmp_firmware_remove(struct platform_device *pdev)
+{
+ struct pm_api_feature_data *feature_data;
+ struct hlist_node *tmp;
+ int i;
+
+ mfd_remove_devices(&pdev->dev);
+ zynqmp_pm_api_debugfs_exit();
+
+ hash_for_each_safe(pm_api_features_map, i, tmp, feature_data, hentry) {
+ hash_del(&feature_data->hentry);
+ kfree(feature_data);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_firmware_of_match[] = {
+ {.compatible = "xlnx,zynqmp-firmware"},
+ {.compatible = "xlnx,versal-firmware"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, zynqmp_firmware_of_match);
+
+static struct platform_driver zynqmp_firmware_driver = {
+ .driver = {
+ .name = "zynqmp_firmware",
+ .of_match_table = zynqmp_firmware_of_match,
+ .dev_groups = zynqmp_firmware_groups,
+ },
+ .probe = zynqmp_firmware_probe,
+ .remove = zynqmp_firmware_remove,
+};
+module_platform_driver(zynqmp_firmware_driver);