summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/cpufreq
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/cpufreq/Kconfig325
-rw-r--r--drivers/cpufreq/Kconfig.arm359
-rw-r--r--drivers/cpufreq/Kconfig.powerpc56
-rw-r--r--drivers/cpufreq/Kconfig.x86324
-rw-r--r--drivers/cpufreq/Makefile116
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c1073
-rw-r--r--drivers/cpufreq/amd-pstate-trace.c2
-rw-r--r--drivers/cpufreq/amd-pstate-trace.h97
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c283
-rw-r--r--drivers/cpufreq/amd-pstate.c720
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c159
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c564
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c215
-rw-r--r--drivers/cpufreq/bmips-cpufreq.c189
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c786
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c1014
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c213
-rw-r--r--drivers/cpufreq/cpufreq-dt.c372
-rw-r--r--drivers/cpufreq/cpufreq-dt.h25
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c438
-rw-r--r--drivers/cpufreq/cpufreq.c2953
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c344
-rw-r--r--drivers/cpufreq/cpufreq_governor.c579
-rw-r--r--drivers/cpufreq/cpufreq_governor.h181
-rw-r--r--drivers/cpufreq/cpufreq_governor_attr_set.c76
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c486
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.h26
-rw-r--r--drivers/cpufreq/cpufreq_performance.c45
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c39
-rw-r--r--drivers/cpufreq/cpufreq_stats.c290
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c142
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c158
-rw-r--r--drivers/cpufreq/e_powersaver.c424
-rw-r--r--drivers/cpufreq/elanfreq.c228
-rw-r--r--drivers/cpufreq/freq_table.c370
-rw-r--r--drivers/cpufreq/gx-suspmod.c498
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c113
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c353
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c197
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c549
-rw-r--r--drivers/cpufreq/intel_pstate.c3537
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c205
-rw-r--r--drivers/cpufreq/longhaul.c1000
-rw-r--r--drivers/cpufreq/longhaul.h352
-rw-r--r--drivers/cpufreq/longrun.c317
-rw-r--r--drivers/cpufreq/loongson1-cpufreq.c222
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c184
-rw-r--r--drivers/cpufreq/maple-cpufreq.c241
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c336
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c807
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c101
-rw-r--r--drivers/cpufreq/omap-cpufreq.c200
-rw-r--r--drivers/cpufreq/p4-clockmod.c273
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c275
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c632
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c693
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c674
-rw-r--r--drivers/cpufreq/powernow-k6.c311
-rw-r--r--drivers/cpufreq/powernow-k7.c696
-rw-r--r--drivers/cpufreq/powernow-k7.h41
-rw-r--r--drivers/cpufreq/powernow-k8.c1221
-rw-r--r--drivers/cpufreq/powernow-k8.h188
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c1166
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c173
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.h33
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c102
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c151
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c321
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c235
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c733
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c457
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c310
-rw-r--r--drivers/cpufreq/raspberrypi-cpufreq.c97
-rw-r--r--drivers/cpufreq/s3c2410-cpufreq.c155
-rw-r--r--drivers/cpufreq/s3c2412-cpufreq.c240
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c492
-rw-r--r--drivers/cpufreq/s3c2440-cpufreq.c321
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq-debugfs.c163
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c648
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c208
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c687
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c206
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c373
-rw-r--r--drivers/cpufreq/sc520_freq.c138
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c350
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c230
-rw-r--r--drivers/cpufreq/sh-cpufreq.c175
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c377
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c225
-rw-r--r--drivers/cpufreq/spear-cpufreq.c247
-rw-r--r--drivers/cpufreq/speedstep-centrino.c561
-rw-r--r--drivers/cpufreq/speedstep-ich.c386
-rw-r--r--drivers/cpufreq/speedstep-lib.c479
-rw-r--r--drivers/cpufreq/speedstep-lib.h48
-rw-r--r--drivers/cpufreq/speedstep-smi.c393
-rw-r--r--drivers/cpufreq/sti-cpufreq.c303
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c218
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c224
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c290
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c607
-rw-r--r--drivers/cpufreq/tegra20-cpufreq.c113
-rw-r--r--drivers/cpufreq/ti-cpufreq.c425
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c579
103 files changed, 41526 insertions, 0 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
new file mode 100644
index 000000000..2a84fc633
--- /dev/null
+++ b/drivers/cpufreq/Kconfig
@@ -0,0 +1,325 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "CPU Frequency scaling"
+
+config CPU_FREQ
+ bool "CPU Frequency scaling"
+ select SRCU
+ help
+ CPU Frequency scaling allows you to change the clock speed of
+ CPUs on the fly. This is a nice method to save power, because
+ the lower the CPU clock speed, the less power the CPU consumes.
+
+ Note that this driver doesn't automatically change the CPU
+ clock speed, you need to either enable a dynamic cpufreq governor
+ (see below) after boot, or use a userspace tool.
+
+ For details, take a look at
+ <file:Documentation/admin-guide/pm/cpufreq.rst>.
+
+ If in doubt, say N.
+
+if CPU_FREQ
+
+config CPU_FREQ_GOV_ATTR_SET
+ bool
+
+config CPU_FREQ_GOV_COMMON
+ select CPU_FREQ_GOV_ATTR_SET
+ select IRQ_WORK
+ bool
+
+config CPU_FREQ_STAT
+ bool "CPU frequency transition statistics"
+ help
+ Export CPU frequency statistics information through sysfs.
+
+ If in doubt, say N.
+
+choice
+ prompt "Default CPUFreq governor"
+ default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
+ default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if ARM64 || ARM
+ default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if X86_INTEL_PSTATE && SMP
+ default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
+ help
+ This option sets which CPUFreq governor shall be loaded at
+ startup. If in doubt, use the default setting.
+
+config CPU_FREQ_DEFAULT_GOV_PERFORMANCE
+ bool "performance"
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the CPUFreq governor 'performance' as default. This sets
+ the frequency statically to the highest frequency supported by
+ the CPU.
+
+config CPU_FREQ_DEFAULT_GOV_POWERSAVE
+ bool "powersave"
+ select CPU_FREQ_GOV_POWERSAVE
+ help
+ Use the CPUFreq governor 'powersave' as default. This sets
+ the frequency statically to the lowest frequency supported by
+ the CPU.
+
+config CPU_FREQ_DEFAULT_GOV_USERSPACE
+ bool "userspace"
+ select CPU_FREQ_GOV_USERSPACE
+ help
+ Use the CPUFreq governor 'userspace' as default. This allows
+ you to set the CPU frequency manually or when a userspace
+ program shall be able to set the CPU dynamically without having
+ to enable the userspace governor manually.
+
+config CPU_FREQ_DEFAULT_GOV_ONDEMAND
+ bool "ondemand"
+ depends on !(X86_INTEL_PSTATE && SMP)
+ select CPU_FREQ_GOV_ONDEMAND
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the CPUFreq governor 'ondemand' as default. This allows
+ you to get a full dynamic frequency capable system by simply
+ loading your cpufreq low-level hardware driver.
+ Be aware that not all cpufreq drivers support the ondemand
+ governor. If unsure have a look at the help section of the
+ driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
+ bool "conservative"
+ depends on !(X86_INTEL_PSTATE && SMP)
+ select CPU_FREQ_GOV_CONSERVATIVE
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the CPUFreq governor 'conservative' as default. This allows
+ you to get a full dynamic frequency capable system by simply
+ loading your cpufreq low-level hardware driver.
+ Be aware that not all cpufreq drivers support the conservative
+ governor. If unsure have a look at the help section of the
+ driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
+ bool "schedutil"
+ depends on SMP
+ select CPU_FREQ_GOV_SCHEDUTIL
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the 'schedutil' CPUFreq governor by default. If unsure,
+ have a look at the help section of that governor. The fallback
+ governor will be 'performance'.
+
+endchoice
+
+config CPU_FREQ_GOV_PERFORMANCE
+ tristate "'performance' governor"
+ help
+ This cpufreq governor sets the frequency statically to the
+ highest available CPU frequency.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_performance.
+
+ If in doubt, say Y.
+
+config CPU_FREQ_GOV_POWERSAVE
+ tristate "'powersave' governor"
+ help
+ This cpufreq governor sets the frequency statically to the
+ lowest available CPU frequency.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_powersave.
+
+ If in doubt, say Y.
+
+config CPU_FREQ_GOV_USERSPACE
+ tristate "'userspace' governor for userspace frequency scaling"
+ help
+ Enable this cpufreq governor when you either want to set the
+ CPU frequency manually or when a userspace program shall
+ be able to set the CPU dynamically, like on LART
+ <http://www.lartmaker.nl/>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_userspace.
+
+ If in doubt, say Y.
+
+config CPU_FREQ_GOV_ONDEMAND
+ tristate "'ondemand' cpufreq policy governor"
+ select CPU_FREQ_GOV_COMMON
+ help
+ 'ondemand' - This driver adds a dynamic cpufreq policy governor.
+ The governor does a periodic polling and
+ changes frequency based on the CPU utilization.
+ The support for this governor depends on CPU capability to
+ do fast frequency switching (i.e, very low latency frequency
+ transitions).
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_ondemand.
+
+ For details, take a look at
+ <file:Documentation/admin-guide/pm/cpufreq.rst>.
+
+ If in doubt, say N.
+
+config CPU_FREQ_GOV_CONSERVATIVE
+ tristate "'conservative' cpufreq governor"
+ depends on CPU_FREQ
+ select CPU_FREQ_GOV_COMMON
+ help
+ 'conservative' - this driver is rather similar to the 'ondemand'
+ governor both in its source code and its purpose, the difference is
+ its optimisation for better suitability in a battery powered
+ environment. The frequency is gracefully increased and decreased
+ rather than jumping to 100% when speed is required.
+
+ If you have a desktop machine then you should really be considering
+ the 'ondemand' governor instead, however if you are using a laptop,
+ PDA or even an AMD64 based computer (due to the unacceptable
+ step-by-step latency issues between the minimum and maximum frequency
+ transitions in the CPU) you will probably want to use this governor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_conservative.
+
+ For details, take a look at
+ <file:Documentation/admin-guide/pm/cpufreq.rst>.
+
+ If in doubt, say N.
+
+config CPU_FREQ_GOV_SCHEDUTIL
+ bool "'schedutil' cpufreq policy governor"
+ depends on CPU_FREQ && SMP
+ select CPU_FREQ_GOV_ATTR_SET
+ select IRQ_WORK
+ help
+ This governor makes decisions based on the utilization data provided
+ by the scheduler. It sets the CPU frequency to be proportional to
+ the utilization/capacity ratio coming from the scheduler. If the
+ utilization is frequency-invariant, the new frequency is also
+ proportional to the maximum available frequency. If that is not the
+ case, it is proportional to the current frequency of the CPU. The
+ frequency tipping point is at utilization/capacity equal to 80% in
+ both cases.
+
+ If in doubt, say N.
+
+comment "CPU frequency scaling drivers"
+
+config CPUFREQ_DT
+ tristate "Generic DT based cpufreq driver"
+ depends on HAVE_CLK && OF
+ select CPUFREQ_DT_PLATDEV
+ select PM_OPP
+ help
+ This adds a generic DT based cpufreq driver for frequency management.
+ It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
+ systems.
+
+ If in doubt, say N.
+
+config CPUFREQ_DT_PLATDEV
+ bool
+ help
+ This adds a generic DT based cpufreq platdev driver for frequency
+ management. This creates a 'cpufreq-dt' platform device, on the
+ supported platforms.
+
+ If in doubt, say N.
+
+if X86
+source "drivers/cpufreq/Kconfig.x86"
+endif
+
+if ARM || ARM64
+source "drivers/cpufreq/Kconfig.arm"
+endif
+
+if PPC32 || PPC64
+source "drivers/cpufreq/Kconfig.powerpc"
+endif
+
+if IA64
+config IA64_ACPI_CPUFREQ
+ tristate "ACPI Processor P-States driver"
+ depends on ACPI_PROCESSOR
+ help
+ This driver adds a CPUFreq driver which utilizes the ACPI
+ Processor Performance States.
+
+ If in doubt, say N.
+endif
+
+if MIPS
+config BMIPS_CPUFREQ
+ tristate "BMIPS CPUfreq Driver"
+ help
+ This option adds a CPUfreq driver for BMIPS processors with
+ support for configurable CPU frequency.
+
+ For now, BMIPS5 chips are supported (such as the Broadcom 7425).
+
+ If in doubt, say N.
+
+config LOONGSON2_CPUFREQ
+ tristate "Loongson2 CPUFreq Driver"
+ depends on LEMOTE_MACH2F
+ help
+ This option adds a CPUFreq driver for loongson processors which
+ support software configurable cpu frequency.
+
+ Loongson2F and its successors support this feature.
+
+ If in doubt, say N.
+
+config LOONGSON1_CPUFREQ
+ tristate "Loongson1 CPUFreq Driver"
+ depends on LOONGSON1_LS1B
+ help
+ This option adds a CPUFreq driver for loongson1 processors which
+ support software configurable cpu frequency.
+
+ If in doubt, say N.
+endif
+
+if SPARC64
+config SPARC_US3_CPUFREQ
+ tristate "UltraSPARC-III CPU Frequency driver"
+ help
+ This adds the CPUFreq driver for UltraSPARC-III processors.
+
+ If in doubt, say N.
+
+config SPARC_US2E_CPUFREQ
+ tristate "UltraSPARC-IIe CPU Frequency driver"
+ help
+ This adds the CPUFreq driver for UltraSPARC-IIe processors.
+
+ If in doubt, say N.
+endif
+
+if SUPERH
+config SH_CPU_FREQ
+ tristate "SuperH CPU Frequency driver"
+ help
+ This adds the cpufreq driver for SuperH. Any CPU that supports
+ clock rate rounding through the clock framework can use this
+ driver. While it will make the kernel slightly larger, this is
+ harmless for CPUs that don't support rate rounding. The driver
+ will also generate a notice in the boot log before disabling
+ itself if the CPU in question is not capable of rate rounding.
+
+ If unsure, say N.
+endif
+
+config QORIQ_CPUFREQ
+ tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
+ depends on OF && COMMON_CLK
+ depends on PPC_E500MC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
+ select CLK_QORIQ
+ help
+ This adds the CPUFreq driver support for Freescale QorIQ SoCs
+ which are capable of changing the CPU's frequency dynamically.
+
+endif
+endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
new file mode 100644
index 000000000..82e5de1f6
--- /dev/null
+++ b/drivers/cpufreq/Kconfig.arm
@@ -0,0 +1,359 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# ARM CPU Frequency scaling drivers
+#
+
+config ACPI_CPPC_CPUFREQ
+ tristate "CPUFreq driver based on the ACPI CPPC spec"
+ depends on ACPI_PROCESSOR
+ select ACPI_CPPC_LIB
+ help
+ This adds a CPUFreq driver which uses CPPC methods
+ as described in the ACPIv5.1 spec. CPPC stands for
+ Collaborative Processor Performance Controls. It
+ is based on an abstract continuous scale of CPU
+ performance values which allows the remote power
+ processor to flexibly optimize for power and
+ performance. CPPC relies on power management firmware
+ support for its operation.
+
+ If in doubt, say N.
+
+config ACPI_CPPC_CPUFREQ_FIE
+ bool "Frequency Invariance support for CPPC cpufreq driver"
+ depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
+ default y
+ help
+ This extends frequency invariance support in the CPPC cpufreq driver,
+ by using CPPC delivered and reference performance counters.
+
+ If in doubt, say N.
+
+config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
+ tristate "Allwinner nvmem based SUN50I CPUFreq driver"
+ depends on ARCH_SUNXI
+ depends on NVMEM_SUNXI_SID
+ select PM_OPP
+ help
+ This adds the nvmem based CPUFreq driver for Allwinner
+ h6 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sun50i-cpufreq-nvmem.
+
+config ARM_ARMADA_37XX_CPUFREQ
+ tristate "Armada 37xx CPUFreq support"
+ depends on ARCH_MVEBU && CPUFREQ_DT
+ help
+ This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
+ The Armada 37xx PMU supports 4 frequency and VDD levels.
+
+config ARM_ARMADA_8K_CPUFREQ
+ tristate "Armada 8K CPUFreq driver"
+ depends on ARCH_MVEBU && CPUFREQ_DT
+ select ARMADA_AP_CPU_CLK
+ help
+ This enables the CPUFreq driver support for Marvell
+ Armada8k SOCs.
+ Armada8K device has the AP806 which supports scaling
+ to any full integer divider.
+
+ If in doubt, say N.
+
+config ARM_SCPI_CPUFREQ
+ tristate "SCPI based CPUfreq driver"
+ depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
+ help
+ This adds the CPUfreq driver support for ARM platforms using SCPI
+ protocol for CPU power management.
+
+ This driver uses SCPI Message Protocol driver to interact with the
+ firmware providing the CPU DVFS functionality.
+
+config ARM_VEXPRESS_SPC_CPUFREQ
+ tristate "Versatile Express SPC based CPUfreq driver"
+ depends on ARM_CPU_TOPOLOGY && HAVE_CLK
+ depends on ARCH_VEXPRESS_SPC
+ select PM_OPP
+ help
+ This add the CPUfreq driver support for Versatile Express
+ big.LITTLE platforms using SPC for power management.
+
+config ARM_BRCMSTB_AVS_CPUFREQ
+ tristate "Broadcom STB AVS CPUfreq driver"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default y
+ help
+ Some Broadcom STB SoCs use a co-processor running proprietary firmware
+ ("AVS") to handle voltage and frequency scaling. This driver provides
+ a standard CPUfreq interface to to the firmware.
+
+ Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS.
+
+config ARM_HIGHBANK_CPUFREQ
+ tristate "Calxeda Highbank-based"
+ depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR
+ default m
+ help
+ This adds the CPUFreq driver for Calxeda Highbank SoC
+ based boards.
+
+ If in doubt, say N.
+
+config ARM_IMX6Q_CPUFREQ
+ tristate "Freescale i.MX6 cpufreq support"
+ depends on ARCH_MXC
+ depends on REGULATOR_ANATOP
+ depends on NVMEM_IMX_OCOTP || COMPILE_TEST
+ select PM_OPP
+ help
+ This adds cpufreq driver support for Freescale i.MX6 series SoCs.
+
+ If in doubt, say N.
+
+config ARM_IMX_CPUFREQ_DT
+ tristate "Freescale i.MX8M cpufreq support"
+ depends on ARCH_MXC && CPUFREQ_DT
+ help
+ This adds cpufreq driver support for Freescale i.MX8M series SoCs,
+ based on cpufreq-dt.
+
+ If in doubt, say N.
+
+config ARM_KIRKWOOD_CPUFREQ
+ def_bool MACH_KIRKWOOD
+ help
+ This adds the CPUFreq driver for Marvell Kirkwood
+ SoCs.
+
+config ARM_MEDIATEK_CPUFREQ
+ tristate "CPU Frequency scaling support for MediaTek SoCs"
+ depends on ARCH_MEDIATEK && REGULATOR
+ select PM_OPP
+ help
+ This adds the CPUFreq driver support for MediaTek SoCs.
+
+config ARM_MEDIATEK_CPUFREQ_HW
+ tristate "MediaTek CPUFreq HW driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ default m
+ help
+ Support for the CPUFreq HW driver.
+ Some MediaTek chipsets have a HW engine to offload the steps
+ necessary for changing the frequency of the CPUs. Firmware loaded
+ in this engine exposes a programming interface to the OS.
+ The driver implements the cpufreq interface for this HW engine.
+ Say Y if you want to support CPUFreq HW.
+
+config ARM_OMAP2PLUS_CPUFREQ
+ bool "TI OMAP2+"
+ depends on ARCH_OMAP2PLUS
+ default ARCH_OMAP2PLUS
+
+config ARM_QCOM_CPUFREQ_NVMEM
+ tristate "Qualcomm nvmem based CPUFreq"
+ depends on ARCH_QCOM
+ depends on NVMEM_QCOM_QFPROM
+ depends on QCOM_SMEM
+ select PM_OPP
+ help
+ This adds the CPUFreq driver for Qualcomm Kryo SoC based boards.
+
+ If in doubt, say N.
+
+config ARM_QCOM_CPUFREQ_HW
+ tristate "QCOM CPUFreq HW driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ Support for the CPUFreq HW driver.
+ Some QCOM chipsets have a HW engine to offload the steps
+ necessary for changing the frequency of the CPUs. Firmware loaded
+ in this engine exposes a programming interface to the OS.
+ The driver implements the cpufreq interface for this HW engine.
+ Say Y if you want to support CPUFreq HW.
+
+config ARM_RASPBERRYPI_CPUFREQ
+ tristate "Raspberry Pi cpufreq support"
+ depends on CLK_RASPBERRYPI || COMPILE_TEST
+ help
+ This adds the CPUFreq driver for Raspberry Pi
+
+ If in doubt, say N.
+
+config ARM_S3C_CPUFREQ
+ bool
+ help
+ Internal configuration node for common cpufreq on Samsung SoC
+
+config ARM_S3C24XX_CPUFREQ
+ bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)"
+ depends on ARCH_S3C24XX
+ select ARM_S3C_CPUFREQ
+ help
+ This enables the CPUfreq driver for the Samsung S3C24XX family
+ of CPUs.
+
+ For details, take a look at <file:Documentation/cpu-freq>.
+
+ If in doubt, say N.
+
+config ARM_S3C24XX_CPUFREQ_DEBUG
+ bool "Debug CPUfreq Samsung driver core"
+ depends on ARM_S3C24XX_CPUFREQ
+ help
+ Enable s3c_freq_dbg for the Samsung S3C CPUfreq core
+
+config ARM_S3C24XX_CPUFREQ_IODEBUG
+ bool "Debug CPUfreq Samsung driver IO timing"
+ depends on ARM_S3C24XX_CPUFREQ
+ help
+ Enable s3c_freq_iodbg for the Samsung S3C CPUfreq core
+
+config ARM_S3C24XX_CPUFREQ_DEBUGFS
+ bool "Export debugfs for CPUFreq"
+ depends on ARM_S3C24XX_CPUFREQ && DEBUG_FS
+ help
+ Export status information via debugfs.
+
+config ARM_S3C2410_CPUFREQ
+ bool
+ depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2410
+ help
+ CPU Frequency scaling support for S3C2410
+
+config ARM_S3C2412_CPUFREQ
+ bool
+ depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2412
+ default y
+ select S3C2412_IOTIMING
+ help
+ CPU Frequency scaling support for S3C2412 and S3C2413 SoC CPUs.
+
+config ARM_S3C2416_CPUFREQ
+ bool "S3C2416 CPU Frequency scaling support"
+ depends on CPU_S3C2416
+ help
+ This adds the CPUFreq driver for the Samsung S3C2416 and
+ S3C2450 SoC. The S3C2416 supports changing the rate of the
+ armdiv clock source and also entering a so called dynamic
+ voltage scaling mode in which it is possible to reduce the
+ core voltage of the CPU.
+
+ If in doubt, say N.
+
+config ARM_S3C2416_CPUFREQ_VCORESCALE
+ bool "Allow voltage scaling for S3C2416 arm core"
+ depends on ARM_S3C2416_CPUFREQ && REGULATOR
+ help
+ Enable CPU voltage scaling when entering the dvs mode.
+ It uses information gathered through existing hardware and
+ tests but not documented in any datasheet.
+
+ If in doubt, say N.
+
+config ARM_S3C2440_CPUFREQ
+ bool "S3C2440/S3C2442 CPU Frequency scaling support"
+ depends on ARM_S3C24XX_CPUFREQ && (CPU_S3C2440 || CPU_S3C2442)
+ default y
+ help
+ CPU Frequency scaling support for S3C2440 and S3C2442 SoC CPUs.
+
+config ARM_S3C64XX_CPUFREQ
+ bool "Samsung S3C64XX"
+ depends on CPU_S3C6410
+ default y
+ help
+ This adds the CPUFreq driver for Samsung S3C6410 SoC.
+
+ If in doubt, say N.
+
+config ARM_S5PV210_CPUFREQ
+ bool "Samsung S5PV210 and S5PC110"
+ depends on CPU_S5PV210
+ default y
+ help
+ This adds the CPUFreq driver for Samsung S5PV210 and
+ S5PC110 SoCs.
+
+ If in doubt, say N.
+
+config ARM_SA1100_CPUFREQ
+ bool
+
+config ARM_SA1110_CPUFREQ
+ bool
+
+config ARM_SCMI_CPUFREQ
+ tristate "SCMI based CPUfreq driver"
+ depends on ARM_SCMI_PROTOCOL || COMPILE_TEST
+ select PM_OPP
+ help
+ This adds the CPUfreq driver support for ARM platforms using SCMI
+ protocol for CPU power management.
+
+ This driver uses SCMI Message Protocol driver to interact with the
+ firmware providing the CPU DVFS functionality.
+
+config ARM_SPEAR_CPUFREQ
+ bool "SPEAr CPUFreq support"
+ depends on PLAT_SPEAR
+ default y
+ help
+ This adds the CPUFreq driver support for SPEAr SOCs.
+
+config ARM_STI_CPUFREQ
+ tristate "STi CPUFreq support"
+ depends on CPUFREQ_DT && SOC_STIH407
+ help
+ This driver uses the generic OPP framework to match the running
+ platform with a predefined set of suitable values. If not provided
+ we will fall-back so safe-values contained in Device Tree. Enable
+ this config option if you wish to add CPUFreq support for STi based
+ SoCs.
+
+config ARM_TEGRA20_CPUFREQ
+ tristate "Tegra20/30 CPUFreq support"
+ depends on ARCH_TEGRA && CPUFREQ_DT
+ default y
+ help
+ This adds the CPUFreq driver support for Tegra20/30 SOCs.
+
+config ARM_TEGRA124_CPUFREQ
+ bool "Tegra124 CPUFreq support"
+ depends on ARCH_TEGRA && CPUFREQ_DT
+ default y
+ help
+ This adds the CPUFreq driver support for Tegra124 SOCs.
+
+config ARM_TEGRA186_CPUFREQ
+ tristate "Tegra186 CPUFreq support"
+ depends on ARCH_TEGRA && TEGRA_BPMP
+ help
+ This adds the CPUFreq driver support for Tegra186 SOCs.
+
+config ARM_TEGRA194_CPUFREQ
+ tristate "Tegra194 CPUFreq support"
+ depends on ARCH_TEGRA_194_SOC && TEGRA_BPMP
+ default y
+ help
+ This adds CPU frequency driver support for Tegra194 SOCs.
+
+config ARM_TI_CPUFREQ
+ bool "Texas Instruments CPUFreq support"
+ depends on ARCH_OMAP2PLUS
+ default ARCH_OMAP2PLUS
+ help
+ This driver enables valid OPPs on the running platform based on
+ values contained within the SoC in use. Enable this in order to
+ use the cpufreq-dt driver on all Texas Instruments platforms that
+ provide dt based operating-points-v2 tables with opp-supported-hw
+ data provided. Required for cpufreq support on AM335x, AM437x,
+ DRA7x, and AM57x platforms.
+
+config ARM_PXA2xx_CPUFREQ
+ tristate "Intel PXA2xx CPUfreq driver"
+ depends on PXA27x || PXA25x
+ help
+ This add the CPUFreq driver support for Intel PXA2xx SOCs.
+
+ If in doubt, say N.
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
new file mode 100644
index 000000000..58151ca56
--- /dev/null
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CPU_FREQ_CBE
+ tristate "CBE frequency scaling"
+ depends on CBE_RAS && PPC_CELL
+ default m
+ help
+ This adds the cpufreq driver for Cell BE processors.
+ For details, take a look at <file:Documentation/cpu-freq/>.
+ If you don't have such processor, say N
+
+config CPU_FREQ_CBE_PMI
+ bool "CBE frequency scaling using PMI interface"
+ depends on CPU_FREQ_CBE
+ default n
+ help
+ Select this, if you want to use the PMI interface to switch
+ frequencies. Using PMI, the processor will not only be able to run at
+ lower speed, but also at lower core voltage.
+
+config CPU_FREQ_MAPLE
+ bool "Support for Maple 970FX Evaluation Board"
+ depends on PPC_MAPLE
+ help
+ This adds support for frequency switching on Maple 970FX
+ Evaluation Board and compatible boards (IBM JS2x blades).
+
+config CPU_FREQ_PMAC
+ bool "Support for Apple PowerBooks"
+ depends on ADB_PMU && PPC32
+ help
+ This adds support for frequency switching on Apple PowerBooks,
+ this currently includes some models of iBook & Titanium
+ PowerBook.
+
+config CPU_FREQ_PMAC64
+ bool "Support for some Apple G5s"
+ depends on PPC_PMAC && PPC64
+ help
+ This adds support for frequency switching on Apple iMac G5,
+ and some of the more recent desktop G5 machines as well.
+
+config PPC_PASEMI_CPUFREQ
+ bool "Support for PA Semi PWRficient"
+ depends on PPC_PASEMI
+ default y
+ help
+ This adds the support for frequency switching on PA Semi
+ PWRficient processors.
+
+config POWERNV_CPUFREQ
+ tristate "CPU frequency scaling for IBM POWERNV platform"
+ depends on PPC_POWERNV
+ default y
+ help
+ This adds support for CPU frequency switching on IBM POWERNV
+ platform
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
new file mode 100644
index 000000000..00476e94d
--- /dev/null
+++ b/drivers/cpufreq/Kconfig.x86
@@ -0,0 +1,324 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# x86 CPU Frequency scaling drivers
+#
+
+config X86_INTEL_PSTATE
+ bool "Intel P state control"
+ depends on X86
+ select ACPI_PROCESSOR if ACPI
+ select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO
+ select CPU_FREQ_GOV_PERFORMANCE
+ select CPU_FREQ_GOV_SCHEDUTIL if SMP
+ help
+ This driver provides a P state for Intel core processors.
+ The driver implements an internal governor and will become
+ the scaling driver and governor for Sandy bridge processors.
+
+ When this driver is enabled it will become the preferred
+ scaling driver for Sandy bridge processors.
+
+ If in doubt, say N.
+
+config X86_PCC_CPUFREQ
+ tristate "Processor Clocking Control interface driver"
+ depends on ACPI && ACPI_PROCESSOR
+ help
+ This driver adds support for the PCC interface.
+
+ For details, take a look at:
+ <file:Documentation/admin-guide/pm/cpufreq_drivers.rst>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pcc-cpufreq.
+
+ If in doubt, say N.
+
+config X86_AMD_PSTATE
+ bool "AMD Processor P-State driver"
+ depends on X86 && ACPI
+ select ACPI_PROCESSOR
+ select ACPI_CPPC_LIB if X86_64
+ select CPU_FREQ_GOV_SCHEDUTIL if SMP
+ help
+ This driver adds a CPUFreq driver which utilizes a fine grain
+ processor performance frequency control range instead of legacy
+ performance levels. _CPC needs to be present in the ACPI tables
+ of the system.
+
+ For details, take a look at:
+ <file:Documentation/admin-guide/pm/amd-pstate.rst>.
+
+ If in doubt, say N.
+
+config X86_AMD_PSTATE_UT
+ tristate "selftest for AMD Processor P-State driver"
+ depends on X86 && ACPI_PROCESSOR
+ default n
+ help
+ This kernel module is used for testing. It's safe to say M here.
+
+ It can also be built-in without X86_AMD_PSTATE enabled.
+ Currently, only tests for amd-pstate are supported. If X86_AMD_PSTATE
+ is set disabled, it can tell the users test can only run on amd-pstate
+ driver, please set X86_AMD_PSTATE enabled.
+ In the future, comparison tests will be added. It can set amd-pstate
+ disabled and set acpi-cpufreq enabled to run test cases, then compare
+ the test results.
+
+config X86_ACPI_CPUFREQ
+ tristate "ACPI Processor P-States driver"
+ depends on ACPI_PROCESSOR
+ help
+ This driver adds a CPUFreq driver which utilizes the ACPI
+ Processor Performance States.
+ This driver also supports Intel Enhanced Speedstep and newer
+ AMD CPUs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called acpi-cpufreq.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_ACPI_CPUFREQ_CPB
+ default y
+ bool "Legacy cpb sysfs knob support for AMD CPUs"
+ depends on X86_ACPI_CPUFREQ && CPU_SUP_AMD
+ help
+ The powernow-k8 driver used to provide a sysfs knob called "cpb"
+ to disable the Core Performance Boosting feature of AMD CPUs. This
+ file has now been superseded by the more generic "boost" entry.
+
+ By enabling this option the acpi_cpufreq driver provides the old
+ entry in addition to the new boost ones, for compatibility reasons.
+
+config ELAN_CPUFREQ
+ tristate "AMD Elan SC400 and SC410"
+ depends on MELAN
+ help
+ This adds the CPUFreq driver for AMD Elan SC400 and SC410
+ processors.
+
+ You need to specify the processor maximum speed as boot
+ parameter: elanfreq=maxspeed (in kHz) or as module
+ parameter "max_freq".
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config SC520_CPUFREQ
+ tristate "AMD Elan SC520"
+ depends on MELAN
+ help
+ This adds the CPUFreq driver for AMD Elan SC520 processor.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+
+config X86_POWERNOW_K6
+ tristate "AMD Mobile K6-2/K6-3 PowerNow!"
+ depends on X86_32
+ help
+ This adds the CPUFreq driver for mobile AMD K6-2+ and mobile
+ AMD K6-3+ processors.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_POWERNOW_K7
+ tristate "AMD Mobile Athlon/Duron PowerNow!"
+ depends on X86_32
+ help
+ This adds the CPUFreq driver for mobile AMD K7 mobile processors.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_POWERNOW_K7_ACPI
+ bool
+ depends on X86_POWERNOW_K7 && ACPI_PROCESSOR
+ depends on !(X86_POWERNOW_K7 = y && ACPI_PROCESSOR = m)
+ depends on X86_32
+ default y
+
+config X86_POWERNOW_K8
+ tristate "AMD Opteron/Athlon64 PowerNow!"
+ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
+ help
+ This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
+ Support for K10 and newer processors is now in acpi-cpufreq.
+
+ To compile this driver as a module, choose M here: the
+ module will be called powernow-k8.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+config X86_AMD_FREQ_SENSITIVITY
+ tristate "AMD frequency sensitivity feedback powersave bias"
+ depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD
+ help
+ This adds AMD-specific powersave bias function to the ondemand
+ governor, which allows it to make more power-conscious frequency
+ change decisions based on feedback from hardware (available on AMD
+ Family 16h and above).
+
+ Hardware feedback tells software how "sensitive" to frequency changes
+ the CPUs' workloads are. CPU-bound workloads will be more sensitive
+ -- they will perform better as frequency increases. Memory/IO-bound
+ workloads will be less sensitive -- they will not necessarily perform
+ better as frequency increases.
+
+ If in doubt, say N.
+
+config X86_GX_SUSPMOD
+ tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
+ depends on X86_32 && PCI
+ help
+ This add the CPUFreq driver for NatSemi Geode processors which
+ support suspend modulation.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_SPEEDSTEP_CENTRINO
+ tristate "Intel Enhanced SpeedStep (deprecated)"
+ select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32
+ depends on X86_32 || (X86_64 && ACPI_PROCESSOR)
+ help
+ This is deprecated and this functionality is now merged into
+ acpi_cpufreq (X86_ACPI_CPUFREQ). Use that driver instead of
+ speedstep_centrino.
+ This adds the CPUFreq driver for Enhanced SpeedStep enabled
+ mobile CPUs. This means Intel Pentium M (Centrino) CPUs
+ or 64bit enabled Intel Xeons.
+
+ To compile this driver as a module, choose M here: the
+ module will be called speedstep-centrino.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_SPEEDSTEP_CENTRINO_TABLE
+ bool "Built-in tables for Banias CPUs"
+ depends on X86_32 && X86_SPEEDSTEP_CENTRINO
+ default y
+ help
+ Use built-in tables for Banias CPUs if ACPI encoding
+ is not available.
+
+ If in doubt, say N.
+
+config X86_SPEEDSTEP_ICH
+ tristate "Intel Speedstep on ICH-M chipsets (ioport interface)"
+ depends on X86_32
+ help
+ This adds the CPUFreq driver for certain mobile Intel Pentium III
+ (Coppermine), all mobile Intel Pentium III-M (Tualatin) and all
+ mobile Intel Pentium 4 P4-M on systems which have an Intel ICH2,
+ ICH3 or ICH4 southbridge.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_SPEEDSTEP_SMI
+ tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)"
+ depends on X86_32
+ help
+ This adds the CPUFreq driver for certain mobile Intel Pentium III
+ (Coppermine), all mobile Intel Pentium III-M (Tualatin)
+ on systems which have an Intel 440BX/ZX/MX southbridge.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_P4_CLOCKMOD
+ tristate "Intel Pentium 4 clock modulation"
+ help
+ This adds the CPUFreq driver for Intel Pentium 4 / XEON
+ processors. When enabled it will lower CPU temperature by skipping
+ clocks.
+
+ This driver should be only used in exceptional
+ circumstances when very low power is needed because it causes severe
+ slowdowns and noticeable latencies. Normally Speedstep should be used
+ instead.
+
+ To compile this driver as a module, choose M here: the
+ module will be called p4-clockmod.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ Unless you are absolutely sure say N.
+
+config X86_CPUFREQ_NFORCE2
+ tristate "nVidia nForce2 FSB changing"
+ depends on X86_32
+ help
+ This adds the CPUFreq driver for FSB changing on nVidia nForce2
+ platforms.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_LONGRUN
+ tristate "Transmeta LongRun"
+ depends on X86_32
+ help
+ This adds the CPUFreq driver for Transmeta Crusoe and Efficeon processors
+ which support LongRun.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_LONGHAUL
+ tristate "VIA Cyrix III Longhaul"
+ depends on X86_32 && ACPI_PROCESSOR
+ help
+ This adds the CPUFreq driver for VIA Samuel/CyrixIII,
+ VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T
+ processors.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_E_POWERSAVER
+ tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
+ depends on X86_32 && ACPI_PROCESSOR
+ help
+ This adds the CPUFreq driver for VIA C7 processors. However, this driver
+ does not have any safeguards to prevent operating the CPU out of spec
+ and is thus considered dangerous. Please use the regular ACPI cpufreq
+ driver, enabled by CONFIG_X86_ACPI_CPUFREQ.
+
+ If in doubt, say N.
+
+comment "shared options"
+
+config X86_SPEEDSTEP_LIB
+ tristate
+ default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD)
+
+config X86_SPEEDSTEP_RELAXED_CAP_CHECK
+ bool "Relaxed speedstep capability checks"
+ depends on X86_32 && (X86_SPEEDSTEP_SMI || X86_SPEEDSTEP_ICH)
+ help
+ Don't perform all checks for a speedstep capable system which would
+ normally be done. Some ancient or strange systems, though speedstep
+ capable, don't always indicate that they are speedstep capable. This
+ option lets the probing code bypass some of those checks if the
+ parameter "relaxed_check=1" is passed to the module.
+
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
new file mode 100644
index 000000000..49b98c62c
--- /dev/null
+++ b/drivers/cpufreq/Makefile
@@ -0,0 +1,116 @@
+# SPDX-License-Identifier: GPL-2.0
+# CPUfreq core
+obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o
+
+# CPUfreq stats
+obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
+
+# CPUfreq governors
+obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
+obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
+obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
+obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
+obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
+obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o
+
+obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
+obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
+
+# Traces
+CFLAGS_amd-pstate-trace.o := -I$(src)
+amd_pstate-y := amd-pstate.o amd-pstate-trace.o
+
+##################################################################################
+# x86 drivers.
+# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
+# K8 systems. This is still the case but acpi-cpufreq errors out so that
+# powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers.
+# speedstep-* is preferred over p4-clockmod.
+
+obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
+obj-$(CONFIG_X86_AMD_PSTATE) += amd_pstate.o
+obj-$(CONFIG_X86_AMD_PSTATE_UT) += amd-pstate-ut.o
+obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
+obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
+obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
+obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
+obj-$(CONFIG_X86_LONGHAUL) += longhaul.o
+obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o
+obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o
+obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o
+obj-$(CONFIG_X86_LONGRUN) += longrun.o
+obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o
+obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
+obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
+obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
+obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
+obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
+obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
+obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
+obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
+
+##################################################################################
+# ARM SoC drivers
+obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
+obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
+obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
+obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
+obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
+obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
+obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
+obj-$(CONFIG_ARM_IMX_CPUFREQ_DT) += imx-cpufreq-dt.o
+obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
+obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o
+obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ_HW) += mediatek-cpufreq-hw.o
+obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
+obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
+obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
+obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
+obj-$(CONFIG_ARM_QCOM_CPUFREQ_HW) += qcom-cpufreq-hw.o
+obj-$(CONFIG_ARM_QCOM_CPUFREQ_NVMEM) += qcom-cpufreq-nvmem.o
+obj-$(CONFIG_ARM_RASPBERRYPI_CPUFREQ) += raspberrypi-cpufreq.o
+obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
+obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
+obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
+obj-$(CONFIG_ARM_S3C2440_CPUFREQ) += s3c2440-cpufreq.o
+obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
+obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
+obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
+obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
+obj-$(CONFIG_ARM_SCMI_CPUFREQ) += scmi-cpufreq.o
+obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o
+obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
+obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o
+obj-$(CONFIG_ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM) += sun50i-cpufreq-nvmem.o
+obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
+obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
+obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o
+obj-$(CONFIG_ARM_TEGRA194_CPUFREQ) += tegra194-cpufreq.o
+obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-cpufreq.o
+obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
+
+
+##################################################################################
+# PowerPC platform drivers
+obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o
+ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
+obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o
+obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o
+obj-$(CONFIG_QORIQ_CPUFREQ) += qoriq-cpufreq.o
+obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o
+obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o
+obj-$(CONFIG_PPC_PASEMI_CPUFREQ) += pasemi-cpufreq.o
+obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o
+
+##################################################################################
+# Other platform drivers
+obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o
+obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
+obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
+obj-$(CONFIG_LOONGSON1_CPUFREQ) += loongson1-cpufreq.o
+obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
+obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
+obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
new file mode 100644
index 000000000..1bb2b90eb
--- /dev/null
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -0,0 +1,1073 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * acpi-cpufreq.c - ACPI Processor P-States Driver
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
+ * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/compiler.h>
+#include <linux/dmi.h>
+#include <linux/slab.h>
+
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+
+#include <acpi/processor.h>
+#include <acpi/cppc_acpi.h>
+
+#include <asm/msr.h>
+#include <asm/processor.h>
+#include <asm/cpufeature.h>
+#include <asm/cpu_device_id.h>
+
+MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
+MODULE_DESCRIPTION("ACPI Processor P-States Driver");
+MODULE_LICENSE("GPL");
+
+enum {
+ UNDEFINED_CAPABLE = 0,
+ SYSTEM_INTEL_MSR_CAPABLE,
+ SYSTEM_AMD_MSR_CAPABLE,
+ SYSTEM_IO_CAPABLE,
+};
+
+#define INTEL_MSR_RANGE (0xffff)
+#define AMD_MSR_RANGE (0x7)
+#define HYGON_MSR_RANGE (0x7)
+
+#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
+
+struct acpi_cpufreq_data {
+ unsigned int resume;
+ unsigned int cpu_feature;
+ unsigned int acpi_perf_cpu;
+ cpumask_var_t freqdomain_cpus;
+ void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
+ u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
+};
+
+/* acpi_perf_data is a pointer to percpu data. */
+static struct acpi_processor_performance __percpu *acpi_perf_data;
+
+static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
+{
+ return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
+}
+
+static struct cpufreq_driver acpi_cpufreq_driver;
+
+static unsigned int acpi_pstate_strict;
+
+static bool boost_state(unsigned int cpu)
+{
+ u32 lo, hi;
+ u64 msr;
+
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+ case X86_VENDOR_CENTAUR:
+ case X86_VENDOR_ZHAOXIN:
+ rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
+ msr = lo | ((u64)hi << 32);
+ return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
+ case X86_VENDOR_HYGON:
+ case X86_VENDOR_AMD:
+ rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
+ msr = lo | ((u64)hi << 32);
+ return !(msr & MSR_K7_HWCR_CPB_DIS);
+ }
+ return false;
+}
+
+static int boost_set_msr(bool enable)
+{
+ u32 msr_addr;
+ u64 msr_mask, val;
+
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+ case X86_VENDOR_CENTAUR:
+ case X86_VENDOR_ZHAOXIN:
+ msr_addr = MSR_IA32_MISC_ENABLE;
+ msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
+ break;
+ case X86_VENDOR_HYGON:
+ case X86_VENDOR_AMD:
+ msr_addr = MSR_K7_HWCR;
+ msr_mask = MSR_K7_HWCR_CPB_DIS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rdmsrl(msr_addr, val);
+
+ if (enable)
+ val &= ~msr_mask;
+ else
+ val |= msr_mask;
+
+ wrmsrl(msr_addr, val);
+ return 0;
+}
+
+static void boost_set_msr_each(void *p_en)
+{
+ bool enable = (bool) p_en;
+
+ boost_set_msr(enable);
+}
+
+static int set_boost(struct cpufreq_policy *policy, int val)
+{
+ on_each_cpu_mask(policy->cpus, boost_set_msr_each,
+ (void *)(long)val, 1);
+ pr_debug("CPU %*pbl: Core Boosting %sabled.\n",
+ cpumask_pr_args(policy->cpus), val ? "en" : "dis");
+
+ return 0;
+}
+
+static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
+{
+ struct acpi_cpufreq_data *data = policy->driver_data;
+
+ if (unlikely(!data))
+ return -ENODEV;
+
+ return cpufreq_show_cpus(data->freqdomain_cpus, buf);
+}
+
+cpufreq_freq_attr_ro(freqdomain_cpus);
+
+#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
+static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
+ size_t count)
+{
+ int ret;
+ unsigned int val = 0;
+
+ if (!acpi_cpufreq_driver.set_boost)
+ return -EINVAL;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret || val > 1)
+ return -EINVAL;
+
+ cpus_read_lock();
+ set_boost(policy, val);
+ cpus_read_unlock();
+
+ return count;
+}
+
+static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled);
+}
+
+cpufreq_freq_attr_rw(cpb);
+#endif
+
+static int check_est_cpu(unsigned int cpuid)
+{
+ struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
+
+ return cpu_has(cpu, X86_FEATURE_EST);
+}
+
+static int check_amd_hwpstate_cpu(unsigned int cpuid)
+{
+ struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
+
+ return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
+}
+
+static unsigned extract_io(struct cpufreq_policy *policy, u32 value)
+{
+ struct acpi_cpufreq_data *data = policy->driver_data;
+ struct acpi_processor_performance *perf;
+ int i;
+
+ perf = to_perf_data(data);
+
+ for (i = 0; i < perf->state_count; i++) {
+ if (value == perf->states[i].status)
+ return policy->freq_table[i].frequency;
+ }
+ return 0;
+}
+
+static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
+{
+ struct acpi_cpufreq_data *data = policy->driver_data;
+ struct cpufreq_frequency_table *pos;
+ struct acpi_processor_performance *perf;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ msr &= AMD_MSR_RANGE;
+ else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+ msr &= HYGON_MSR_RANGE;
+ else
+ msr &= INTEL_MSR_RANGE;
+
+ perf = to_perf_data(data);
+
+ cpufreq_for_each_entry(pos, policy->freq_table)
+ if (msr == perf->states[pos->driver_data].status)
+ return pos->frequency;
+ return policy->freq_table[0].frequency;
+}
+
+static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
+{
+ struct acpi_cpufreq_data *data = policy->driver_data;
+
+ switch (data->cpu_feature) {
+ case SYSTEM_INTEL_MSR_CAPABLE:
+ case SYSTEM_AMD_MSR_CAPABLE:
+ return extract_msr(policy, val);
+ case SYSTEM_IO_CAPABLE:
+ return extract_io(policy, val);
+ default:
+ return 0;
+ }
+}
+
+static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
+{
+ u32 val, dummy __always_unused;
+
+ rdmsr(MSR_IA32_PERF_CTL, val, dummy);
+ return val;
+}
+
+static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
+{
+ u32 lo, hi;
+
+ rdmsr(MSR_IA32_PERF_CTL, lo, hi);
+ lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE);
+ wrmsr(MSR_IA32_PERF_CTL, lo, hi);
+}
+
+static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
+{
+ u32 val, dummy __always_unused;
+
+ rdmsr(MSR_AMD_PERF_CTL, val, dummy);
+ return val;
+}
+
+static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val)
+{
+ wrmsr(MSR_AMD_PERF_CTL, val, 0);
+}
+
+static u32 cpu_freq_read_io(struct acpi_pct_register *reg)
+{
+ u32 val;
+
+ acpi_os_read_port(reg->address, &val, reg->bit_width);
+ return val;
+}
+
+static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val)
+{
+ acpi_os_write_port(reg->address, val, reg->bit_width);
+}
+
+struct drv_cmd {
+ struct acpi_pct_register *reg;
+ u32 val;
+ union {
+ void (*write)(struct acpi_pct_register *reg, u32 val);
+ u32 (*read)(struct acpi_pct_register *reg);
+ } func;
+};
+
+/* Called via smp_call_function_single(), on the target CPU */
+static void do_drv_read(void *_cmd)
+{
+ struct drv_cmd *cmd = _cmd;
+
+ cmd->val = cmd->func.read(cmd->reg);
+}
+
+static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
+{
+ struct acpi_processor_performance *perf = to_perf_data(data);
+ struct drv_cmd cmd = {
+ .reg = &perf->control_register,
+ .func.read = data->cpu_freq_read,
+ };
+ int err;
+
+ err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
+ WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
+ return cmd.val;
+}
+
+/* Called via smp_call_function_many(), on the target CPUs */
+static void do_drv_write(void *_cmd)
+{
+ struct drv_cmd *cmd = _cmd;
+
+ cmd->func.write(cmd->reg, cmd->val);
+}
+
+static void drv_write(struct acpi_cpufreq_data *data,
+ const struct cpumask *mask, u32 val)
+{
+ struct acpi_processor_performance *perf = to_perf_data(data);
+ struct drv_cmd cmd = {
+ .reg = &perf->control_register,
+ .val = val,
+ .func.write = data->cpu_freq_write,
+ };
+ int this_cpu;
+
+ this_cpu = get_cpu();
+ if (cpumask_test_cpu(this_cpu, mask))
+ do_drv_write(&cmd);
+
+ smp_call_function_many(mask, do_drv_write, &cmd, 1);
+ put_cpu();
+}
+
+static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
+{
+ u32 val;
+
+ if (unlikely(cpumask_empty(mask)))
+ return 0;
+
+ val = drv_read(data, mask);
+
+ pr_debug("%s = %u\n", __func__, val);
+
+ return val;
+}
+
+static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
+{
+ struct acpi_cpufreq_data *data;
+ struct cpufreq_policy *policy;
+ unsigned int freq;
+ unsigned int cached_freq;
+
+ pr_debug("%s (%d)\n", __func__, cpu);
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(!policy))
+ return 0;
+
+ data = policy->driver_data;
+ if (unlikely(!data || !policy->freq_table))
+ return 0;
+
+ cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
+ freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
+ if (freq != cached_freq) {
+ /*
+ * The dreaded BIOS frequency change behind our back.
+ * Force set the frequency on next target call.
+ */
+ data->resume = 1;
+ }
+
+ pr_debug("cur freq = %u\n", freq);
+
+ return freq;
+}
+
+static unsigned int check_freqs(struct cpufreq_policy *policy,
+ const struct cpumask *mask, unsigned int freq)
+{
+ struct acpi_cpufreq_data *data = policy->driver_data;
+ unsigned int cur_freq;
+ unsigned int i;
+
+ for (i = 0; i < 100; i++) {
+ cur_freq = extract_freq(policy, get_cur_val(mask, data));
+ if (cur_freq == freq)
+ return 1;
+ udelay(10);
+ }
+ return 0;
+}
+
+static int acpi_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct acpi_cpufreq_data *data = policy->driver_data;
+ struct acpi_processor_performance *perf;
+ const struct cpumask *mask;
+ unsigned int next_perf_state = 0; /* Index into perf table */
+ int result = 0;
+
+ if (unlikely(!data)) {
+ return -ENODEV;
+ }
+
+ perf = to_perf_data(data);
+ next_perf_state = policy->freq_table[index].driver_data;
+ if (perf->state == next_perf_state) {
+ if (unlikely(data->resume)) {
+ pr_debug("Called after resume, resetting to P%d\n",
+ next_perf_state);
+ data->resume = 0;
+ } else {
+ pr_debug("Already at target state (P%d)\n",
+ next_perf_state);
+ return 0;
+ }
+ }
+
+ /*
+ * The core won't allow CPUs to go away until the governor has been
+ * stopped, so we can rely on the stability of policy->cpus.
+ */
+ mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ?
+ cpumask_of(policy->cpu) : policy->cpus;
+
+ drv_write(data, mask, perf->states[next_perf_state].control);
+
+ if (acpi_pstate_strict) {
+ if (!check_freqs(policy, mask,
+ policy->freq_table[index].frequency)) {
+ pr_debug("%s (%d)\n", __func__, policy->cpu);
+ result = -EAGAIN;
+ }
+ }
+
+ if (!result)
+ perf->state = next_perf_state;
+
+ return result;
+}
+
+static unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct acpi_cpufreq_data *data = policy->driver_data;
+ struct acpi_processor_performance *perf;
+ struct cpufreq_frequency_table *entry;
+ unsigned int next_perf_state, next_freq, index;
+
+ /*
+ * Find the closest frequency above target_freq.
+ */
+ if (policy->cached_target_freq == target_freq)
+ index = policy->cached_resolved_idx;
+ else
+ index = cpufreq_table_find_index_dl(policy, target_freq,
+ false);
+
+ entry = &policy->freq_table[index];
+ next_freq = entry->frequency;
+ next_perf_state = entry->driver_data;
+
+ perf = to_perf_data(data);
+ if (perf->state == next_perf_state) {
+ if (unlikely(data->resume))
+ data->resume = 0;
+ else
+ return next_freq;
+ }
+
+ data->cpu_freq_write(&perf->control_register,
+ perf->states[next_perf_state].control);
+ perf->state = next_perf_state;
+ return next_freq;
+}
+
+static unsigned long
+acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
+{
+ struct acpi_processor_performance *perf;
+
+ perf = to_perf_data(data);
+ if (cpu_khz) {
+ /* search the closest match to cpu_khz */
+ unsigned int i;
+ unsigned long freq;
+ unsigned long freqn = perf->states[0].core_frequency * 1000;
+
+ for (i = 0; i < (perf->state_count-1); i++) {
+ freq = freqn;
+ freqn = perf->states[i+1].core_frequency * 1000;
+ if ((2 * cpu_khz) > (freqn + freq)) {
+ perf->state = i;
+ return freq;
+ }
+ }
+ perf->state = perf->state_count-1;
+ return freqn;
+ } else {
+ /* assume CPU is at P0... */
+ perf->state = 0;
+ return perf->states[0].core_frequency * 1000;
+ }
+}
+
+static void free_acpi_perf_data(void)
+{
+ unsigned int i;
+
+ /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
+ for_each_possible_cpu(i)
+ free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
+ ->shared_cpu_map);
+ free_percpu(acpi_perf_data);
+}
+
+static int cpufreq_boost_online(unsigned int cpu)
+{
+ /*
+ * On the CPU_UP path we simply keep the boost-disable flag
+ * in sync with the current global state.
+ */
+ return boost_set_msr(acpi_cpufreq_driver.boost_enabled);
+}
+
+static int cpufreq_boost_down_prep(unsigned int cpu)
+{
+ /*
+ * Clear the boost-disable bit on the CPU_DOWN path so that
+ * this cpu cannot block the remaining ones from boosting.
+ */
+ return boost_set_msr(1);
+}
+
+/*
+ * acpi_cpufreq_early_init - initialize ACPI P-States library
+ *
+ * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
+ * in order to determine correct frequency and voltage pairings. We can
+ * do _PDC and _PSD and find out the processor dependency for the
+ * actual init that will happen later...
+ */
+static int __init acpi_cpufreq_early_init(void)
+{
+ unsigned int i;
+ pr_debug("%s\n", __func__);
+
+ acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
+ if (!acpi_perf_data) {
+ pr_debug("Memory allocation error for acpi_perf_data.\n");
+ return -ENOMEM;
+ }
+ for_each_possible_cpu(i) {
+ if (!zalloc_cpumask_var_node(
+ &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
+ GFP_KERNEL, cpu_to_node(i))) {
+
+ /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
+ free_acpi_perf_data();
+ return -ENOMEM;
+ }
+ }
+
+ /* Do initialization in ACPI core */
+ acpi_processor_preregister_performance(acpi_perf_data);
+ return 0;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * Some BIOSes do SW_ANY coordination internally, either set it up in hw
+ * or do it in BIOS firmware and won't inform about it to OS. If not
+ * detected, this has a side effect of making CPU run at a different speed
+ * than OS intended it to run at. Detect it and handle it cleanly.
+ */
+static int bios_with_sw_any_bug;
+
+static int sw_any_bug_found(const struct dmi_system_id *d)
+{
+ bios_with_sw_any_bug = 1;
+ return 0;
+}
+
+static const struct dmi_system_id sw_any_bug_dmi_table[] = {
+ {
+ .callback = sw_any_bug_found,
+ .ident = "Supermicro Server X6DLP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+ DMI_MATCH(DMI_BIOS_VERSION, "080010"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
+ },
+ },
+ { }
+};
+
+static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
+{
+ /* Intel Xeon Processor 7100 Series Specification Update
+ * https://www.intel.com/Assets/PDF/specupdate/314554.pdf
+ * AL30: A Machine Check Exception (MCE) Occurring during an
+ * Enhanced Intel SpeedStep Technology Ratio Change May Cause
+ * Both Processor Cores to Lock Up. */
+ if (c->x86_vendor == X86_VENDOR_INTEL) {
+ if ((c->x86 == 15) &&
+ (c->x86_model == 6) &&
+ (c->x86_stepping == 8)) {
+ pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_ACPI_CPPC_LIB
+static u64 get_max_boost_ratio(unsigned int cpu)
+{
+ struct cppc_perf_caps perf_caps;
+ u64 highest_perf, nominal_perf;
+ int ret;
+
+ if (acpi_pstate_strict)
+ return 0;
+
+ ret = cppc_get_perf_caps(cpu, &perf_caps);
+ if (ret) {
+ pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
+ cpu, ret);
+ return 0;
+ }
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ highest_perf = amd_get_highest_perf();
+ else
+ highest_perf = perf_caps.highest_perf;
+
+ nominal_perf = perf_caps.nominal_perf;
+
+ if (!highest_perf || !nominal_perf) {
+ pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
+ return 0;
+ }
+
+ if (highest_perf < nominal_perf) {
+ pr_debug("CPU%d: nominal performance above highest\n", cpu);
+ return 0;
+ }
+
+ return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
+}
+#else
+static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
+#endif
+
+static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *freq_table;
+ struct acpi_processor_performance *perf;
+ struct acpi_cpufreq_data *data;
+ unsigned int cpu = policy->cpu;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ unsigned int valid_states = 0;
+ unsigned int result = 0;
+ u64 max_boost_ratio;
+ unsigned int i;
+#ifdef CONFIG_SMP
+ static int blacklisted;
+#endif
+
+ pr_debug("%s\n", __func__);
+
+#ifdef CONFIG_SMP
+ if (blacklisted)
+ return blacklisted;
+ blacklisted = acpi_cpufreq_blacklist(c);
+ if (blacklisted)
+ return blacklisted;
+#endif
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
+ result = -ENOMEM;
+ goto err_free;
+ }
+
+ perf = per_cpu_ptr(acpi_perf_data, cpu);
+ data->acpi_perf_cpu = cpu;
+ policy->driver_data = data;
+
+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
+ acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
+
+ result = acpi_processor_register_performance(perf, cpu);
+ if (result)
+ goto err_free_mask;
+
+ policy->shared_type = perf->shared_type;
+
+ /*
+ * Will let policy->cpus know about dependency only when software
+ * coordination is required.
+ */
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
+ policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
+ cpumask_copy(policy->cpus, perf->shared_cpu_map);
+ }
+ cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
+
+#ifdef CONFIG_SMP
+ dmi_check_system(sw_any_bug_dmi_table);
+ if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
+ policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
+ }
+
+ if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 &&
+ !acpi_pstate_strict) {
+ cpumask_clear(policy->cpus);
+ cpumask_set_cpu(cpu, policy->cpus);
+ cpumask_copy(data->freqdomain_cpus,
+ topology_sibling_cpumask(cpu));
+ policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
+ pr_info_once("overriding BIOS provided _PSD data\n");
+ }
+#endif
+
+ /* capability check */
+ if (perf->state_count <= 1) {
+ pr_debug("No P-States\n");
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ if (perf->control_register.space_id != perf->status_register.space_id) {
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ switch (perf->control_register.space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 == 0xf) {
+ pr_debug("AMD K8 systems must use native drivers.\n");
+ result = -ENODEV;
+ goto err_unreg;
+ }
+ pr_debug("SYSTEM IO addr space\n");
+ data->cpu_feature = SYSTEM_IO_CAPABLE;
+ data->cpu_freq_read = cpu_freq_read_io;
+ data->cpu_freq_write = cpu_freq_write_io;
+ break;
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ pr_debug("HARDWARE addr space\n");
+ if (check_est_cpu(cpu)) {
+ data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
+ data->cpu_freq_read = cpu_freq_read_intel;
+ data->cpu_freq_write = cpu_freq_write_intel;
+ break;
+ }
+ if (check_amd_hwpstate_cpu(cpu)) {
+ data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
+ data->cpu_freq_read = cpu_freq_read_amd;
+ data->cpu_freq_write = cpu_freq_write_amd;
+ break;
+ }
+ result = -ENODEV;
+ goto err_unreg;
+ default:
+ pr_debug("Unknown addr space %d\n",
+ (u32) (perf->control_register.space_id));
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
+ GFP_KERNEL);
+ if (!freq_table) {
+ result = -ENOMEM;
+ goto err_unreg;
+ }
+
+ /* detect transition latency */
+ policy->cpuinfo.transition_latency = 0;
+ for (i = 0; i < perf->state_count; i++) {
+ if ((perf->states[i].transition_latency * 1000) >
+ policy->cpuinfo.transition_latency)
+ policy->cpuinfo.transition_latency =
+ perf->states[i].transition_latency * 1000;
+ }
+
+ /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
+ if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
+ policy->cpuinfo.transition_latency > 20 * 1000) {
+ policy->cpuinfo.transition_latency = 20 * 1000;
+ pr_info_once("P-state transition latency capped at 20 uS\n");
+ }
+
+ /* table init */
+ for (i = 0; i < perf->state_count; i++) {
+ if (i > 0 && perf->states[i].core_frequency >=
+ freq_table[valid_states-1].frequency / 1000)
+ continue;
+
+ freq_table[valid_states].driver_data = i;
+ freq_table[valid_states].frequency =
+ perf->states[i].core_frequency * 1000;
+ valid_states++;
+ }
+ freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+
+ max_boost_ratio = get_max_boost_ratio(cpu);
+ if (max_boost_ratio) {
+ unsigned int freq = freq_table[0].frequency;
+
+ /*
+ * Because the loop above sorts the freq_table entries in the
+ * descending order, freq is the maximum frequency in the table.
+ * Assume that it corresponds to the CPPC nominal frequency and
+ * use it to set cpuinfo.max_freq.
+ */
+ policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
+ } else {
+ /*
+ * If the maximum "boost" frequency is unknown, ask the arch
+ * scale-invariance code to use the "nominal" performance for
+ * CPU utilization scaling so as to prevent the schedutil
+ * governor from selecting inadequate CPU frequencies.
+ */
+ arch_set_max_freq_ratio(true);
+ }
+
+ policy->freq_table = freq_table;
+ perf->state = 0;
+
+ switch (perf->control_register.space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ /*
+ * The core will not set policy->cur, because
+ * cpufreq_driver->get is NULL, so we need to set it here.
+ * However, we have to guess it, because the current speed is
+ * unknown and not detectable via IO ports.
+ */
+ policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
+ break;
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
+ break;
+ default:
+ break;
+ }
+
+ /* notify BIOS that we exist */
+ acpi_processor_notify_smm(THIS_MODULE);
+
+ pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
+ for (i = 0; i < perf->state_count; i++)
+ pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
+ (i == perf->state ? '*' : ' '), i,
+ (u32) perf->states[i].core_frequency,
+ (u32) perf->states[i].power,
+ (u32) perf->states[i].transition_latency);
+
+ /*
+ * the first call to ->target() should result in us actually
+ * writing something to the appropriate registers.
+ */
+ data->resume = 1;
+
+ policy->fast_switch_possible = !acpi_pstate_strict &&
+ !(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
+
+ if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
+ pr_warn(FW_WARN "P-state 0 is not max freq\n");
+
+ return result;
+
+err_unreg:
+ acpi_processor_unregister_performance(cpu);
+err_free_mask:
+ free_cpumask_var(data->freqdomain_cpus);
+err_free:
+ kfree(data);
+ policy->driver_data = NULL;
+
+ return result;
+}
+
+static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct acpi_cpufreq_data *data = policy->driver_data;
+
+ pr_debug("%s\n", __func__);
+
+ policy->fast_switch_possible = false;
+ policy->driver_data = NULL;
+ acpi_processor_unregister_performance(data->acpi_perf_cpu);
+ free_cpumask_var(data->freqdomain_cpus);
+ kfree(policy->freq_table);
+ kfree(data);
+
+ return 0;
+}
+
+static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ struct acpi_cpufreq_data *data = policy->driver_data;
+
+ pr_debug("%s\n", __func__);
+
+ data->resume = 1;
+
+ return 0;
+}
+
+static struct freq_attr *acpi_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ &freqdomain_cpus,
+#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
+ &cpb,
+#endif
+ NULL,
+};
+
+static struct cpufreq_driver acpi_cpufreq_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = acpi_cpufreq_target,
+ .fast_switch = acpi_cpufreq_fast_switch,
+ .bios_limit = acpi_processor_get_bios_limit,
+ .init = acpi_cpufreq_cpu_init,
+ .exit = acpi_cpufreq_cpu_exit,
+ .resume = acpi_cpufreq_resume,
+ .name = "acpi-cpufreq",
+ .attr = acpi_cpufreq_attr,
+};
+
+static enum cpuhp_state acpi_cpufreq_online;
+
+static void __init acpi_cpufreq_boost_init(void)
+{
+ int ret;
+
+ if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
+ pr_debug("Boost capabilities not present in the processor\n");
+ return;
+ }
+
+ acpi_cpufreq_driver.set_boost = set_boost;
+ acpi_cpufreq_driver.boost_enabled = boost_state(0);
+
+ /*
+ * This calls the online callback on all online cpu and forces all
+ * MSRs to the same value.
+ */
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpufreq/acpi:online",
+ cpufreq_boost_online, cpufreq_boost_down_prep);
+ if (ret < 0) {
+ pr_err("acpi_cpufreq: failed to register hotplug callbacks\n");
+ return;
+ }
+ acpi_cpufreq_online = ret;
+}
+
+static void acpi_cpufreq_boost_exit(void)
+{
+ if (acpi_cpufreq_online > 0)
+ cpuhp_remove_state_nocalls(acpi_cpufreq_online);
+}
+
+static int __init acpi_cpufreq_init(void)
+{
+ int ret;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ /* don't keep reloading if cpufreq_driver exists */
+ if (cpufreq_get_current_driver())
+ return -EEXIST;
+
+ pr_debug("%s\n", __func__);
+
+ ret = acpi_cpufreq_early_init();
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
+ /* this is a sysfs file with a strange name and an even stranger
+ * semantic - per CPU instantiation, but system global effect.
+ * Lets enable it only on AMD CPUs for compatibility reasons and
+ * only if configured. This is considered legacy code, which
+ * will probably be removed at some point in the future.
+ */
+ if (!check_amd_hwpstate_cpu(0)) {
+ struct freq_attr **attr;
+
+ pr_debug("CPB unsupported, do not expose it\n");
+
+ for (attr = acpi_cpufreq_attr; *attr; attr++)
+ if (*attr == &cpb) {
+ *attr = NULL;
+ break;
+ }
+ }
+#endif
+ acpi_cpufreq_boost_init();
+
+ ret = cpufreq_register_driver(&acpi_cpufreq_driver);
+ if (ret) {
+ free_acpi_perf_data();
+ acpi_cpufreq_boost_exit();
+ }
+ return ret;
+}
+
+static void __exit acpi_cpufreq_exit(void)
+{
+ pr_debug("%s\n", __func__);
+
+ acpi_cpufreq_boost_exit();
+
+ cpufreq_unregister_driver(&acpi_cpufreq_driver);
+
+ free_acpi_perf_data();
+}
+
+module_param(acpi_pstate_strict, uint, 0644);
+MODULE_PARM_DESC(acpi_pstate_strict,
+ "value 0 or non-zero. non-zero -> strict ACPI checks are "
+ "performed during frequency changes.");
+
+late_initcall(acpi_cpufreq_init);
+module_exit(acpi_cpufreq_exit);
+
+static const struct x86_cpu_id __maybe_unused acpi_cpufreq_ids[] = {
+ X86_MATCH_FEATURE(X86_FEATURE_ACPI, NULL),
+ X86_MATCH_FEATURE(X86_FEATURE_HW_PSTATE, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
+
+static const struct acpi_device_id __maybe_unused processor_device_ids[] = {
+ {ACPI_PROCESSOR_OBJECT_HID, },
+ {ACPI_PROCESSOR_DEVICE_HID, },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, processor_device_ids);
+
+MODULE_ALIAS("acpi");
diff --git a/drivers/cpufreq/amd-pstate-trace.c b/drivers/cpufreq/amd-pstate-trace.c
new file mode 100644
index 000000000..891b696dc
--- /dev/null
+++ b/drivers/cpufreq/amd-pstate-trace.c
@@ -0,0 +1,2 @@
+#define CREATE_TRACE_POINTS
+#include "amd-pstate-trace.h"
diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
new file mode 100644
index 000000000..35f38ae67
--- /dev/null
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * amd-pstate-trace.h - AMD Processor P-state Frequency Driver Tracer
+ *
+ * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ */
+
+#if !defined(_AMD_PSTATE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _AMD_PSTATE_TRACE_H
+
+#include <linux/cpufreq.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_events.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM amd_cpu
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE amd-pstate-trace
+
+#define TPS(x) tracepoint_string(x)
+
+TRACE_EVENT(amd_pstate_perf,
+
+ TP_PROTO(unsigned long min_perf,
+ unsigned long target_perf,
+ unsigned long capacity,
+ u64 freq,
+ u64 mperf,
+ u64 aperf,
+ u64 tsc,
+ unsigned int cpu_id,
+ bool changed,
+ bool fast_switch
+ ),
+
+ TP_ARGS(min_perf,
+ target_perf,
+ capacity,
+ freq,
+ mperf,
+ aperf,
+ tsc,
+ cpu_id,
+ changed,
+ fast_switch
+ ),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, min_perf)
+ __field(unsigned long, target_perf)
+ __field(unsigned long, capacity)
+ __field(unsigned long long, freq)
+ __field(unsigned long long, mperf)
+ __field(unsigned long long, aperf)
+ __field(unsigned long long, tsc)
+ __field(unsigned int, cpu_id)
+ __field(bool, changed)
+ __field(bool, fast_switch)
+ ),
+
+ TP_fast_assign(
+ __entry->min_perf = min_perf;
+ __entry->target_perf = target_perf;
+ __entry->capacity = capacity;
+ __entry->freq = freq;
+ __entry->mperf = mperf;
+ __entry->aperf = aperf;
+ __entry->tsc = tsc;
+ __entry->cpu_id = cpu_id;
+ __entry->changed = changed;
+ __entry->fast_switch = fast_switch;
+ ),
+
+ TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u changed=%s fast_switch=%s",
+ (unsigned long)__entry->min_perf,
+ (unsigned long)__entry->target_perf,
+ (unsigned long)__entry->capacity,
+ (unsigned long long)__entry->freq,
+ (unsigned long long)__entry->mperf,
+ (unsigned long long)__entry->aperf,
+ (unsigned long long)__entry->tsc,
+ (unsigned int)__entry->cpu_id,
+ (__entry->changed) ? "true" : "false",
+ (__entry->fast_switch) ? "true" : "false"
+ )
+);
+
+#endif /* _AMD_PSTATE_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#include <trace/define_trace.h>
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
new file mode 100644
index 000000000..b448c8d6a
--- /dev/null
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-1.0-or-later
+/*
+ * AMD Processor P-state Frequency Driver Unit Test
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc. All Rights Reserved.
+ *
+ * Author: Meng Li <li.meng@amd.com>
+ *
+ * The AMD P-State Unit Test is a test module for testing the amd-pstate
+ * driver. 1) It can help all users to verify their processor support
+ * (SBIOS/Firmware or Hardware). 2) Kernel can have a basic function
+ * test to avoid the kernel regression during the update. 3) We can
+ * introduce more functional or performance tests to align the result
+ * together, it will benefit power and performance scale optimization.
+ *
+ * This driver implements basic framework with plans to enhance it with
+ * additional test cases to improve the depth and coverage of the test.
+ *
+ * See Documentation/admin-guide/pm/amd-pstate.rst Unit Tests for
+ * amd-pstate to get more detail.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/fs.h>
+#include <linux/amd-pstate.h>
+
+#include <acpi/cppc_acpi.h>
+
+/*
+ * Abbreviations:
+ * amd_pstate_ut: used as a shortform for AMD P-State unit test.
+ * It helps to keep variable names smaller, simpler
+ */
+enum amd_pstate_ut_result {
+ AMD_PSTATE_UT_RESULT_PASS,
+ AMD_PSTATE_UT_RESULT_FAIL,
+};
+
+struct amd_pstate_ut_struct {
+ const char *name;
+ void (*func)(u32 index);
+ enum amd_pstate_ut_result result;
+};
+
+/*
+ * Kernel module for testing the AMD P-State unit test
+ */
+static void amd_pstate_ut_acpi_cpc_valid(u32 index);
+static void amd_pstate_ut_check_enabled(u32 index);
+static void amd_pstate_ut_check_perf(u32 index);
+static void amd_pstate_ut_check_freq(u32 index);
+
+static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = {
+ {"amd_pstate_ut_acpi_cpc_valid", amd_pstate_ut_acpi_cpc_valid },
+ {"amd_pstate_ut_check_enabled", amd_pstate_ut_check_enabled },
+ {"amd_pstate_ut_check_perf", amd_pstate_ut_check_perf },
+ {"amd_pstate_ut_check_freq", amd_pstate_ut_check_freq }
+};
+
+static bool get_shared_mem(void)
+{
+ bool result = false;
+
+ if (!boot_cpu_has(X86_FEATURE_CPPC))
+ result = true;
+
+ return result;
+}
+
+/*
+ * check the _CPC object is present in SBIOS.
+ */
+static void amd_pstate_ut_acpi_cpc_valid(u32 index)
+{
+ if (acpi_cpc_valid())
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ else {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s the _CPC object is not present in SBIOS!\n", __func__);
+ }
+}
+
+static void amd_pstate_ut_pstate_enable(u32 index)
+{
+ int ret = 0;
+ u64 cppc_enable = 0;
+
+ ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
+ if (ret) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
+ return;
+ }
+ if (cppc_enable)
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ else {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s amd pstate must be enabled!\n", __func__);
+ }
+}
+
+/*
+ * check if amd pstate is enabled
+ */
+static void amd_pstate_ut_check_enabled(u32 index)
+{
+ if (get_shared_mem())
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ else
+ amd_pstate_ut_pstate_enable(index);
+}
+
+/*
+ * check if performance values are reasonable.
+ * highest_perf >= nominal_perf > lowest_nonlinear_perf > lowest_perf > 0
+ */
+static void amd_pstate_ut_check_perf(u32 index)
+{
+ int cpu = 0, ret = 0;
+ u32 highest_perf = 0, nominal_perf = 0, lowest_nonlinear_perf = 0, lowest_perf = 0;
+ u64 cap1 = 0;
+ struct cppc_perf_caps cppc_perf;
+ struct cpufreq_policy *policy = NULL;
+ struct amd_cpudata *cpudata = NULL;
+
+ highest_perf = amd_get_highest_perf();
+
+ for_each_possible_cpu(cpu) {
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ break;
+ cpudata = policy->driver_data;
+
+ if (get_shared_mem()) {
+ ret = cppc_get_perf_caps(cpu, &cppc_perf);
+ if (ret) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret);
+ goto skip_test;
+ }
+
+ nominal_perf = cppc_perf.nominal_perf;
+ lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
+ lowest_perf = cppc_perf.lowest_perf;
+ } else {
+ ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
+ if (ret) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
+ goto skip_test;
+ }
+
+ nominal_perf = AMD_CPPC_NOMINAL_PERF(cap1);
+ lowest_nonlinear_perf = AMD_CPPC_LOWNONLIN_PERF(cap1);
+ lowest_perf = AMD_CPPC_LOWEST_PERF(cap1);
+ }
+
+ if ((highest_perf != READ_ONCE(cpudata->highest_perf)) ||
+ (nominal_perf != READ_ONCE(cpudata->nominal_perf)) ||
+ (lowest_nonlinear_perf != READ_ONCE(cpudata->lowest_nonlinear_perf)) ||
+ (lowest_perf != READ_ONCE(cpudata->lowest_perf))) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d highest=%d %d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n",
+ __func__, cpu, highest_perf, cpudata->highest_perf,
+ nominal_perf, cpudata->nominal_perf,
+ lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf,
+ lowest_perf, cpudata->lowest_perf);
+ goto skip_test;
+ }
+
+ if (!((highest_perf >= nominal_perf) &&
+ (nominal_perf > lowest_nonlinear_perf) &&
+ (lowest_nonlinear_perf > lowest_perf) &&
+ (lowest_perf > 0))) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n",
+ __func__, cpu, highest_perf, nominal_perf,
+ lowest_nonlinear_perf, lowest_perf);
+ goto skip_test;
+ }
+ cpufreq_cpu_put(policy);
+ }
+
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ return;
+skip_test:
+ cpufreq_cpu_put(policy);
+}
+
+/*
+ * Check if frequency values are reasonable.
+ * max_freq >= nominal_freq > lowest_nonlinear_freq > min_freq > 0
+ * check max freq when set support boost mode.
+ */
+static void amd_pstate_ut_check_freq(u32 index)
+{
+ int cpu = 0;
+ struct cpufreq_policy *policy = NULL;
+ struct amd_cpudata *cpudata = NULL;
+
+ for_each_possible_cpu(cpu) {
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ break;
+ cpudata = policy->driver_data;
+
+ if (!((cpudata->max_freq >= cpudata->nominal_freq) &&
+ (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
+ (cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
+ (cpudata->min_freq > 0))) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
+ __func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
+ cpudata->lowest_nonlinear_freq, cpudata->min_freq);
+ goto skip_test;
+ }
+
+ if (cpudata->min_freq != policy->min) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d cpudata_min_freq=%d policy_min=%d, they should be equal!\n",
+ __func__, cpu, cpudata->min_freq, policy->min);
+ goto skip_test;
+ }
+
+ if (cpudata->boost_supported) {
+ if ((policy->max == cpudata->max_freq) ||
+ (policy->max == cpudata->nominal_freq))
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ else {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
+ __func__, cpu, policy->max, cpudata->max_freq,
+ cpudata->nominal_freq);
+ goto skip_test;
+ }
+ } else {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d must support boost!\n", __func__, cpu);
+ goto skip_test;
+ }
+ cpufreq_cpu_put(policy);
+ }
+
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ return;
+skip_test:
+ cpufreq_cpu_put(policy);
+}
+
+static int __init amd_pstate_ut_init(void)
+{
+ u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases);
+
+ for (i = 0; i < arr_size; i++) {
+ amd_pstate_ut_cases[i].func(i);
+ switch (amd_pstate_ut_cases[i].result) {
+ case AMD_PSTATE_UT_RESULT_PASS:
+ pr_info("%-4d %-20s\t success!\n", i+1, amd_pstate_ut_cases[i].name);
+ break;
+ case AMD_PSTATE_UT_RESULT_FAIL:
+ default:
+ pr_info("%-4d %-20s\t fail!\n", i+1, amd_pstate_ut_cases[i].name);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void __exit amd_pstate_ut_exit(void)
+{
+}
+
+module_init(amd_pstate_ut_init);
+module_exit(amd_pstate_ut_exit);
+
+MODULE_AUTHOR("Meng Li <li.meng@amd.com>");
+MODULE_DESCRIPTION("AMD P-state driver Test module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
new file mode 100644
index 000000000..edc294ee5
--- /dev/null
+++ b/drivers/cpufreq/amd-pstate.c
@@ -0,0 +1,720 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * amd-pstate.c - AMD Processor P-state Frequency Driver
+ *
+ * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ * AMD P-State introduces a new CPU performance scaling design for AMD
+ * processors using the ACPI Collaborative Performance and Power Control (CPPC)
+ * feature which works with the AMD SMU firmware providing a finer grained
+ * frequency control range. It is to replace the legacy ACPI P-States control,
+ * allows a flexible, low-latency interface for the Linux kernel to directly
+ * communicate the performance hints to hardware.
+ *
+ * AMD P-State is supported on recent AMD Zen base CPU series include some of
+ * Zen2 and Zen3 processors. _CPC needs to be present in the ACPI tables of AMD
+ * P-State supported system. And there are two types of hardware implementations
+ * for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution.
+ * X86_FEATURE_CPPC CPU feature flag is used to distinguish the different types.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/compiler.h>
+#include <linux/dmi.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/static_call.h>
+#include <linux/amd-pstate.h>
+
+#include <acpi/processor.h>
+#include <acpi/cppc_acpi.h>
+
+#include <asm/msr.h>
+#include <asm/processor.h>
+#include <asm/cpufeature.h>
+#include <asm/cpu_device_id.h>
+#include "amd-pstate-trace.h"
+
+#define AMD_PSTATE_TRANSITION_LATENCY 20000
+#define AMD_PSTATE_TRANSITION_DELAY 1000
+
+/*
+ * TODO: We need more time to fine tune processors with shared memory solution
+ * with community together.
+ *
+ * There are some performance drops on the CPU benchmarks which reports from
+ * Suse. We are co-working with them to fine tune the shared memory solution. So
+ * we disable it by default to go acpi-cpufreq on these processors and add a
+ * module parameter to be able to enable it manually for debugging.
+ */
+static struct cpufreq_driver amd_pstate_driver;
+static int cppc_load __initdata;
+
+static inline int pstate_enable(bool enable)
+{
+ return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable);
+}
+
+static int cppc_enable(bool enable)
+{
+ int cpu, ret = 0;
+
+ for_each_present_cpu(cpu) {
+ ret = cppc_set_enable(cpu, enable);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
+
+static inline int amd_pstate_enable(bool enable)
+{
+ return static_call(amd_pstate_enable)(enable);
+}
+
+static int pstate_init_perf(struct amd_cpudata *cpudata)
+{
+ u64 cap1;
+ u32 highest_perf;
+
+ int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
+ &cap1);
+ if (ret)
+ return ret;
+
+ /*
+ * TODO: Introduce AMD specific power feature.
+ *
+ * CPPC entry doesn't indicate the highest performance in some ASICs.
+ */
+ highest_perf = amd_get_highest_perf();
+ if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
+ highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
+
+ WRITE_ONCE(cpudata->highest_perf, highest_perf);
+
+ WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
+ WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
+ WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
+
+ return 0;
+}
+
+static int cppc_init_perf(struct amd_cpudata *cpudata)
+{
+ struct cppc_perf_caps cppc_perf;
+ u32 highest_perf;
+
+ int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ if (ret)
+ return ret;
+
+ highest_perf = amd_get_highest_perf();
+ if (highest_perf > cppc_perf.highest_perf)
+ highest_perf = cppc_perf.highest_perf;
+
+ WRITE_ONCE(cpudata->highest_perf, highest_perf);
+
+ WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
+ WRITE_ONCE(cpudata->lowest_nonlinear_perf,
+ cppc_perf.lowest_nonlinear_perf);
+ WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
+
+ return 0;
+}
+
+DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
+
+static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
+{
+ return static_call(amd_pstate_init_perf)(cpudata);
+}
+
+static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+ u32 des_perf, u32 max_perf, bool fast_switch)
+{
+ if (fast_switch)
+ wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
+ else
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
+ READ_ONCE(cpudata->cppc_req_cached));
+}
+
+static void cppc_update_perf(struct amd_cpudata *cpudata,
+ u32 min_perf, u32 des_perf,
+ u32 max_perf, bool fast_switch)
+{
+ struct cppc_perf_ctrls perf_ctrls;
+
+ perf_ctrls.max_perf = max_perf;
+ perf_ctrls.min_perf = min_perf;
+ perf_ctrls.desired_perf = des_perf;
+
+ cppc_set_perf(cpudata->cpu, &perf_ctrls);
+}
+
+DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
+
+static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
+ u32 min_perf, u32 des_perf,
+ u32 max_perf, bool fast_switch)
+{
+ static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
+ max_perf, fast_switch);
+}
+
+static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
+{
+ u64 aperf, mperf, tsc;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ rdmsrl(MSR_IA32_APERF, aperf);
+ rdmsrl(MSR_IA32_MPERF, mperf);
+ tsc = rdtsc();
+
+ if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
+ local_irq_restore(flags);
+ return false;
+ }
+
+ local_irq_restore(flags);
+
+ cpudata->cur.aperf = aperf;
+ cpudata->cur.mperf = mperf;
+ cpudata->cur.tsc = tsc;
+ cpudata->cur.aperf -= cpudata->prev.aperf;
+ cpudata->cur.mperf -= cpudata->prev.mperf;
+ cpudata->cur.tsc -= cpudata->prev.tsc;
+
+ cpudata->prev.aperf = aperf;
+ cpudata->prev.mperf = mperf;
+ cpudata->prev.tsc = tsc;
+
+ cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf);
+
+ return true;
+}
+
+static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
+ u32 des_perf, u32 max_perf, bool fast_switch)
+{
+ u64 prev = READ_ONCE(cpudata->cppc_req_cached);
+ u64 value = prev;
+
+ des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+ value &= ~AMD_CPPC_MIN_PERF(~0L);
+ value |= AMD_CPPC_MIN_PERF(min_perf);
+
+ value &= ~AMD_CPPC_DES_PERF(~0L);
+ value |= AMD_CPPC_DES_PERF(des_perf);
+
+ value &= ~AMD_CPPC_MAX_PERF(~0L);
+ value |= AMD_CPPC_MAX_PERF(max_perf);
+
+ if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
+ trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
+ cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
+ cpudata->cpu, (value != prev), fast_switch);
+ }
+
+ if (value == prev)
+ return;
+
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ amd_pstate_update_perf(cpudata, min_perf, des_perf,
+ max_perf, fast_switch);
+}
+
+static int amd_pstate_verify(struct cpufreq_policy_data *policy)
+{
+ cpufreq_verify_within_cpu_limits(policy);
+
+ return 0;
+}
+
+static int amd_pstate_update_freq(struct cpufreq_policy *policy,
+ unsigned int target_freq, bool fast_switch)
+{
+ struct cpufreq_freqs freqs;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ unsigned long max_perf, min_perf, des_perf, cap_perf;
+
+ if (!cpudata->max_freq)
+ return -ENODEV;
+
+ cap_perf = READ_ONCE(cpudata->highest_perf);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
+ max_perf = cap_perf;
+
+ freqs.old = policy->cur;
+ freqs.new = target_freq;
+
+ des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
+ cpudata->max_freq);
+
+ WARN_ON(fast_switch && !policy->fast_switch_enabled);
+ /*
+ * If fast_switch is desired, then there aren't any registered
+ * transition notifiers. See comment for
+ * cpufreq_enable_fast_switch().
+ */
+ if (!fast_switch)
+ cpufreq_freq_transition_begin(policy, &freqs);
+
+ amd_pstate_update(cpudata, min_perf, des_perf, max_perf, fast_switch);
+
+ if (!fast_switch)
+ cpufreq_freq_transition_end(policy, &freqs, false);
+
+ return 0;
+}
+
+static int amd_pstate_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ return amd_pstate_update_freq(policy, target_freq, false);
+}
+
+static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ if (!amd_pstate_update_freq(policy, target_freq, true))
+ return target_freq;
+ return policy->cur;
+}
+
+static void amd_pstate_adjust_perf(unsigned int cpu,
+ unsigned long _min_perf,
+ unsigned long target_perf,
+ unsigned long capacity)
+{
+ unsigned long max_perf, min_perf, des_perf,
+ cap_perf, lowest_nonlinear_perf, max_freq;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct amd_cpudata *cpudata = policy->driver_data;
+ unsigned int target_freq;
+
+ cap_perf = READ_ONCE(cpudata->highest_perf);
+ lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+ max_freq = READ_ONCE(cpudata->max_freq);
+
+ des_perf = cap_perf;
+ if (target_perf < capacity)
+ des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
+
+ min_perf = READ_ONCE(cpudata->highest_perf);
+ if (_min_perf < capacity)
+ min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
+
+ if (min_perf < lowest_nonlinear_perf)
+ min_perf = lowest_nonlinear_perf;
+
+ max_perf = cap_perf;
+ if (max_perf < min_perf)
+ max_perf = min_perf;
+
+ des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+ target_freq = div_u64(des_perf * max_freq, max_perf);
+ policy->cur = target_freq;
+
+ amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
+ cpufreq_cpu_put(policy);
+}
+
+static int amd_get_min_freq(struct amd_cpudata *cpudata)
+{
+ struct cppc_perf_caps cppc_perf;
+
+ int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ if (ret)
+ return ret;
+
+ /* Switch to khz */
+ return cppc_perf.lowest_freq * 1000;
+}
+
+static int amd_get_max_freq(struct amd_cpudata *cpudata)
+{
+ struct cppc_perf_caps cppc_perf;
+ u32 max_perf, max_freq, nominal_freq, nominal_perf;
+ u64 boost_ratio;
+
+ int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ if (ret)
+ return ret;
+
+ nominal_freq = cppc_perf.nominal_freq;
+ nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ max_perf = READ_ONCE(cpudata->highest_perf);
+
+ boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT,
+ nominal_perf);
+
+ max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT;
+
+ /* Switch to khz */
+ return max_freq * 1000;
+}
+
+static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
+{
+ struct cppc_perf_caps cppc_perf;
+
+ int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ if (ret)
+ return ret;
+
+ /* Switch to khz */
+ return cppc_perf.nominal_freq * 1000;
+}
+
+static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
+{
+ struct cppc_perf_caps cppc_perf;
+ u32 lowest_nonlinear_freq, lowest_nonlinear_perf,
+ nominal_freq, nominal_perf;
+ u64 lowest_nonlinear_ratio;
+
+ int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ if (ret)
+ return ret;
+
+ nominal_freq = cppc_perf.nominal_freq;
+ nominal_perf = READ_ONCE(cpudata->nominal_perf);
+
+ lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
+
+ lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
+ nominal_perf);
+
+ lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT;
+
+ /* Switch to khz */
+ return lowest_nonlinear_freq * 1000;
+}
+
+static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
+
+ if (!cpudata->boost_supported) {
+ pr_err("Boost mode is not supported by this processor or SBIOS\n");
+ return -EINVAL;
+ }
+
+ if (state)
+ policy->cpuinfo.max_freq = cpudata->max_freq;
+ else
+ policy->cpuinfo.max_freq = cpudata->nominal_freq;
+
+ policy->max = policy->cpuinfo.max_freq;
+
+ ret = freq_qos_update_request(&cpudata->req[1],
+ policy->cpuinfo.max_freq);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
+{
+ u32 highest_perf, nominal_perf;
+
+ highest_perf = READ_ONCE(cpudata->highest_perf);
+ nominal_perf = READ_ONCE(cpudata->nominal_perf);
+
+ if (highest_perf <= nominal_perf)
+ return;
+
+ cpudata->boost_supported = true;
+ amd_pstate_driver.boost_enabled = true;
+}
+
+static void amd_perf_ctl_reset(unsigned int cpu)
+{
+ wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
+}
+
+static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
+{
+ int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+ struct device *dev;
+ struct amd_cpudata *cpudata;
+
+ /*
+ * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
+ * which is ideal for initialization process.
+ */
+ amd_perf_ctl_reset(policy->cpu);
+ dev = get_cpu_device(policy->cpu);
+ if (!dev)
+ return -ENODEV;
+
+ cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
+ if (!cpudata)
+ return -ENOMEM;
+
+ cpudata->cpu = policy->cpu;
+
+ ret = amd_pstate_init_perf(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
+ min_freq = amd_get_min_freq(cpudata);
+ max_freq = amd_get_max_freq(cpudata);
+ nominal_freq = amd_get_nominal_freq(cpudata);
+ lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
+
+ if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
+ dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
+ min_freq, max_freq);
+ ret = -EINVAL;
+ goto free_cpudata1;
+ }
+
+ policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
+ policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
+
+ policy->min = min_freq;
+ policy->max = max_freq;
+
+ policy->cpuinfo.min_freq = min_freq;
+ policy->cpuinfo.max_freq = max_freq;
+
+ /* It will be updated by governor */
+ policy->cur = policy->cpuinfo.min_freq;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC))
+ policy->fast_switch_possible = true;
+
+ ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
+ FREQ_QOS_MIN, policy->cpuinfo.min_freq);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
+ goto free_cpudata1;
+ }
+
+ ret = freq_qos_add_request(&policy->constraints, &cpudata->req[1],
+ FREQ_QOS_MAX, policy->cpuinfo.max_freq);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
+ goto free_cpudata2;
+ }
+
+ /* Initial processor data capability frequencies */
+ cpudata->max_freq = max_freq;
+ cpudata->min_freq = min_freq;
+ cpudata->nominal_freq = nominal_freq;
+ cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
+
+ policy->driver_data = cpudata;
+
+ amd_pstate_boost_init(cpudata);
+
+ return 0;
+
+free_cpudata2:
+ freq_qos_remove_request(&cpudata->req[0]);
+free_cpudata1:
+ kfree(cpudata);
+ return ret;
+}
+
+static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ freq_qos_remove_request(&cpudata->req[1]);
+ freq_qos_remove_request(&cpudata->req[0]);
+ policy->fast_switch_possible = false;
+ kfree(cpudata);
+
+ return 0;
+}
+
+static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ ret = amd_pstate_enable(true);
+ if (ret)
+ pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
+
+ return ret;
+}
+
+static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ ret = amd_pstate_enable(false);
+ if (ret)
+ pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
+
+ return ret;
+}
+
+/* Sysfs attributes */
+
+/*
+ * This frequency is to indicate the maximum hardware frequency.
+ * If boost is not active but supported, the frequency will be larger than the
+ * one in cpuinfo.
+ */
+static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
+ char *buf)
+{
+ int max_freq;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ max_freq = amd_get_max_freq(cpudata);
+ if (max_freq < 0)
+ return max_freq;
+
+ return sprintf(&buf[0], "%u\n", max_freq);
+}
+
+static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
+ char *buf)
+{
+ int freq;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ freq = amd_get_lowest_nonlinear_freq(cpudata);
+ if (freq < 0)
+ return freq;
+
+ return sprintf(&buf[0], "%u\n", freq);
+}
+
+/*
+ * In some of ASICs, the highest_perf is not the one in the _CPC table, so we
+ * need to expose it to sysfs.
+ */
+static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
+ char *buf)
+{
+ u32 perf;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ perf = READ_ONCE(cpudata->highest_perf);
+
+ return sprintf(&buf[0], "%u\n", perf);
+}
+
+cpufreq_freq_attr_ro(amd_pstate_max_freq);
+cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
+
+cpufreq_freq_attr_ro(amd_pstate_highest_perf);
+
+static struct freq_attr *amd_pstate_attr[] = {
+ &amd_pstate_max_freq,
+ &amd_pstate_lowest_nonlinear_freq,
+ &amd_pstate_highest_perf,
+ NULL,
+};
+
+static struct cpufreq_driver amd_pstate_driver = {
+ .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
+ .verify = amd_pstate_verify,
+ .target = amd_pstate_target,
+ .fast_switch = amd_pstate_fast_switch,
+ .init = amd_pstate_cpu_init,
+ .exit = amd_pstate_cpu_exit,
+ .suspend = amd_pstate_cpu_suspend,
+ .resume = amd_pstate_cpu_resume,
+ .set_boost = amd_pstate_set_boost,
+ .name = "amd-pstate",
+ .attr = amd_pstate_attr,
+};
+
+static int __init amd_pstate_init(void)
+{
+ int ret;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return -ENODEV;
+ /*
+ * by default the pstate driver is disabled to load
+ * enable the amd_pstate passive mode driver explicitly
+ * with amd_pstate=passive in kernel command line
+ */
+ if (!cppc_load) {
+ pr_debug("driver load is disabled, boot with amd_pstate=passive to enable this\n");
+ return -ENODEV;
+ }
+
+ if (!acpi_cpc_valid()) {
+ pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
+ return -ENODEV;
+ }
+
+ /* don't keep reloading if cpufreq_driver exists */
+ if (cpufreq_get_current_driver())
+ return -EEXIST;
+
+ /* capability check */
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ pr_debug("AMD CPPC MSR based functionality is supported\n");
+ amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf;
+ } else {
+ pr_debug("AMD CPPC shared memory based functionality is supported\n");
+ static_call_update(amd_pstate_enable, cppc_enable);
+ static_call_update(amd_pstate_init_perf, cppc_init_perf);
+ static_call_update(amd_pstate_update_perf, cppc_update_perf);
+ }
+
+ /* enable amd pstate feature */
+ ret = amd_pstate_enable(true);
+ if (ret) {
+ pr_err("failed to enable amd-pstate with return %d\n", ret);
+ return ret;
+ }
+
+ ret = cpufreq_register_driver(&amd_pstate_driver);
+ if (ret)
+ pr_err("failed to register amd_pstate_driver with return %d\n",
+ ret);
+
+ return ret;
+}
+device_initcall(amd_pstate_init);
+
+static int __init amd_pstate_param(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "disable")) {
+ cppc_load = 0;
+ pr_info("driver is explicitly disabled\n");
+ } else if (!strcmp(str, "passive"))
+ cppc_load = 1;
+
+ return 0;
+}
+early_param("amd_pstate", amd_pstate_param);
+
+MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
+MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
new file mode 100644
index 000000000..59b19b997
--- /dev/null
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * amd_freq_sensitivity.c: AMD frequency sensitivity feedback powersave bias
+ * for the ondemand governor.
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Jacob Shin <jacob.shin@amd.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/percpu-defs.h>
+#include <linux/init.h>
+#include <linux/mod_devicetable.h>
+
+#include <asm/msr.h>
+#include <asm/cpufeature.h>
+#include <asm/cpu_device_id.h>
+
+#include "cpufreq_ondemand.h"
+
+#define MSR_AMD64_FREQ_SENSITIVITY_ACTUAL 0xc0010080
+#define MSR_AMD64_FREQ_SENSITIVITY_REFERENCE 0xc0010081
+#define CLASS_CODE_SHIFT 56
+#define POWERSAVE_BIAS_MAX 1000
+#define POWERSAVE_BIAS_DEF 400
+
+struct cpu_data_t {
+ u64 actual;
+ u64 reference;
+ unsigned int freq_prev;
+};
+
+static DEFINE_PER_CPU(struct cpu_data_t, cpu_data);
+
+static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
+ unsigned int freq_next,
+ unsigned int relation)
+{
+ int sensitivity;
+ long d_actual, d_reference;
+ struct msr actual, reference;
+ struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu);
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *od_data = policy_dbs->dbs_data;
+ struct od_dbs_tuners *od_tuners = od_data->tuners;
+
+ if (!policy->freq_table)
+ return freq_next;
+
+ rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL,
+ &actual.l, &actual.h);
+ rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_REFERENCE,
+ &reference.l, &reference.h);
+ actual.h &= 0x00ffffff;
+ reference.h &= 0x00ffffff;
+
+ /* counter wrapped around, so stay on current frequency */
+ if (actual.q < data->actual || reference.q < data->reference) {
+ freq_next = policy->cur;
+ goto out;
+ }
+
+ d_actual = actual.q - data->actual;
+ d_reference = reference.q - data->reference;
+
+ /* divide by 0, so stay on current frequency as well */
+ if (d_reference == 0) {
+ freq_next = policy->cur;
+ goto out;
+ }
+
+ sensitivity = POWERSAVE_BIAS_MAX -
+ (POWERSAVE_BIAS_MAX * (d_reference - d_actual) / d_reference);
+
+ clamp(sensitivity, 0, POWERSAVE_BIAS_MAX);
+
+ /* this workload is not CPU bound, so choose a lower freq */
+ if (sensitivity < od_tuners->powersave_bias) {
+ if (data->freq_prev == policy->cur)
+ freq_next = policy->cur;
+
+ if (freq_next > policy->cur)
+ freq_next = policy->cur;
+ else if (freq_next < policy->cur)
+ freq_next = policy->min;
+ else {
+ unsigned int index;
+
+ index = cpufreq_table_find_index_h(policy,
+ policy->cur - 1,
+ relation & CPUFREQ_RELATION_E);
+ freq_next = policy->freq_table[index].frequency;
+ }
+
+ data->freq_prev = freq_next;
+ } else
+ data->freq_prev = 0;
+
+out:
+ data->actual = actual.q;
+ data->reference = reference.q;
+ return freq_next;
+}
+
+static int __init amd_freq_sensitivity_init(void)
+{
+ u64 val;
+ struct pci_dev *pcidev;
+ unsigned int pci_vendor;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ pci_vendor = PCI_VENDOR_ID_AMD;
+ else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+ pci_vendor = PCI_VENDOR_ID_HYGON;
+ else
+ return -ENODEV;
+
+ pcidev = pci_get_device(pci_vendor,
+ PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
+
+ if (!pcidev) {
+ if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK))
+ return -ENODEV;
+ } else {
+ pci_dev_put(pcidev);
+ }
+
+ if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
+ return -ENODEV;
+
+ if (!(val >> CLASS_CODE_SHIFT))
+ return -ENODEV;
+
+ od_register_powersave_bias_handler(amd_powersave_bias_target,
+ POWERSAVE_BIAS_DEF);
+ return 0;
+}
+late_initcall(amd_freq_sensitivity_init);
+
+static void __exit amd_freq_sensitivity_exit(void)
+{
+ od_unregister_powersave_bias_handler();
+}
+module_exit(amd_freq_sensitivity_exit);
+
+static const struct x86_cpu_id __maybe_unused amd_freq_sensitivity_ids[] = {
+ X86_MATCH_FEATURE(X86_FEATURE_PROC_FEEDBACK, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, amd_freq_sensitivity_ids);
+
+MODULE_AUTHOR("Jacob Shin <jacob.shin@amd.com>");
+MODULE_DESCRIPTION("AMD frequency sensitivity feedback powersave bias for "
+ "the ondemand governor.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
new file mode 100644
index 000000000..b74289a95
--- /dev/null
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CPU frequency scaling support for Armada 37xx platform.
+ *
+ * Copyright (C) 2017 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "cpufreq-dt.h"
+
+/* Clk register set */
+#define ARMADA_37XX_CLK_TBG_SEL 0
+#define ARMADA_37XX_CLK_TBG_SEL_CPU_OFF 22
+
+/* Power management in North Bridge register set */
+#define ARMADA_37XX_NB_L0L1 0x18
+#define ARMADA_37XX_NB_L2L3 0x1C
+#define ARMADA_37XX_NB_TBG_DIV_OFF 13
+#define ARMADA_37XX_NB_TBG_DIV_MASK 0x7
+#define ARMADA_37XX_NB_CLK_SEL_OFF 11
+#define ARMADA_37XX_NB_CLK_SEL_MASK 0x1
+#define ARMADA_37XX_NB_CLK_SEL_TBG 0x1
+#define ARMADA_37XX_NB_TBG_SEL_OFF 9
+#define ARMADA_37XX_NB_TBG_SEL_MASK 0x3
+#define ARMADA_37XX_NB_VDD_SEL_OFF 6
+#define ARMADA_37XX_NB_VDD_SEL_MASK 0x3
+#define ARMADA_37XX_NB_CONFIG_SHIFT 16
+#define ARMADA_37XX_NB_DYN_MOD 0x24
+#define ARMADA_37XX_NB_CLK_SEL_EN BIT(26)
+#define ARMADA_37XX_NB_TBG_EN BIT(28)
+#define ARMADA_37XX_NB_DIV_EN BIT(29)
+#define ARMADA_37XX_NB_VDD_EN BIT(30)
+#define ARMADA_37XX_NB_DFS_EN BIT(31)
+#define ARMADA_37XX_NB_CPU_LOAD 0x30
+#define ARMADA_37XX_NB_CPU_LOAD_MASK 0x3
+#define ARMADA_37XX_DVFS_LOAD_0 0
+#define ARMADA_37XX_DVFS_LOAD_1 1
+#define ARMADA_37XX_DVFS_LOAD_2 2
+#define ARMADA_37XX_DVFS_LOAD_3 3
+
+/* AVS register set */
+#define ARMADA_37XX_AVS_CTL0 0x0
+#define ARMADA_37XX_AVS_ENABLE BIT(30)
+#define ARMADA_37XX_AVS_HIGH_VDD_LIMIT 16
+#define ARMADA_37XX_AVS_LOW_VDD_LIMIT 22
+#define ARMADA_37XX_AVS_VDD_MASK 0x3F
+#define ARMADA_37XX_AVS_CTL2 0x8
+#define ARMADA_37XX_AVS_LOW_VDD_EN BIT(6)
+#define ARMADA_37XX_AVS_VSET(x) (0x1C + 4 * (x))
+
+/*
+ * On Armada 37xx the Power management manages 4 level of CPU load,
+ * each level can be associated with a CPU clock source, a CPU
+ * divider, a VDD level, etc...
+ */
+#define LOAD_LEVEL_NR 4
+
+#define MIN_VOLT_MV 1000
+#define MIN_VOLT_MV_FOR_L1_1000MHZ 1108
+#define MIN_VOLT_MV_FOR_L1_1200MHZ 1155
+
+/* AVS value for the corresponding voltage (in mV) */
+static int avs_map[] = {
+ 747, 758, 770, 782, 793, 805, 817, 828, 840, 852, 863, 875, 887, 898,
+ 910, 922, 933, 945, 957, 968, 980, 992, 1003, 1015, 1027, 1038, 1050,
+ 1062, 1073, 1085, 1097, 1108, 1120, 1132, 1143, 1155, 1167, 1178, 1190,
+ 1202, 1213, 1225, 1237, 1248, 1260, 1272, 1283, 1295, 1307, 1318, 1330,
+ 1342
+};
+
+struct armada37xx_cpufreq_state {
+ struct platform_device *pdev;
+ struct device *cpu_dev;
+ struct regmap *regmap;
+ u32 nb_l0l1;
+ u32 nb_l2l3;
+ u32 nb_dyn_mod;
+ u32 nb_cpu_load;
+};
+
+static struct armada37xx_cpufreq_state *armada37xx_cpufreq_state;
+
+struct armada_37xx_dvfs {
+ u32 cpu_freq_max;
+ u8 divider[LOAD_LEVEL_NR];
+ u32 avs[LOAD_LEVEL_NR];
+};
+
+static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
+ /*
+ * The cpufreq scaling for 1.2 GHz variant of the SOC is currently
+ * unstable because we do not know how to configure it properly.
+ */
+ /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
+ {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
+ {.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
+ {.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
+};
+
+static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(armada_37xx_dvfs); i++) {
+ if (freq == armada_37xx_dvfs[i].cpu_freq_max)
+ return &armada_37xx_dvfs[i];
+ }
+
+ pr_err("Unsupported CPU frequency %d MHz\n", freq/1000000);
+ return NULL;
+}
+
+/*
+ * Setup the four level managed by the hardware. Once the four level
+ * will be configured then the DVFS will be enabled.
+ */
+static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
+ struct regmap *clk_base, u8 *divider)
+{
+ u32 cpu_tbg_sel;
+ int load_lvl;
+
+ /* Determine to which TBG clock is CPU connected */
+ regmap_read(clk_base, ARMADA_37XX_CLK_TBG_SEL, &cpu_tbg_sel);
+ cpu_tbg_sel >>= ARMADA_37XX_CLK_TBG_SEL_CPU_OFF;
+ cpu_tbg_sel &= ARMADA_37XX_NB_TBG_SEL_MASK;
+
+ for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
+ unsigned int reg, mask, val, offset = 0;
+
+ if (load_lvl <= ARMADA_37XX_DVFS_LOAD_1)
+ reg = ARMADA_37XX_NB_L0L1;
+ else
+ reg = ARMADA_37XX_NB_L2L3;
+
+ if (load_lvl == ARMADA_37XX_DVFS_LOAD_0 ||
+ load_lvl == ARMADA_37XX_DVFS_LOAD_2)
+ offset += ARMADA_37XX_NB_CONFIG_SHIFT;
+
+ /* Set cpu clock source, for all the level we use TBG */
+ val = ARMADA_37XX_NB_CLK_SEL_TBG << ARMADA_37XX_NB_CLK_SEL_OFF;
+ mask = (ARMADA_37XX_NB_CLK_SEL_MASK
+ << ARMADA_37XX_NB_CLK_SEL_OFF);
+
+ /* Set TBG index, for all levels we use the same TBG */
+ val = cpu_tbg_sel << ARMADA_37XX_NB_TBG_SEL_OFF;
+ mask = (ARMADA_37XX_NB_TBG_SEL_MASK
+ << ARMADA_37XX_NB_TBG_SEL_OFF);
+
+ /*
+ * Set cpu divider based on the pre-computed array in
+ * order to have balanced step.
+ */
+ val |= divider[load_lvl] << ARMADA_37XX_NB_TBG_DIV_OFF;
+ mask |= (ARMADA_37XX_NB_TBG_DIV_MASK
+ << ARMADA_37XX_NB_TBG_DIV_OFF);
+
+ /* Set VDD divider which is actually the load level. */
+ val |= load_lvl << ARMADA_37XX_NB_VDD_SEL_OFF;
+ mask |= (ARMADA_37XX_NB_VDD_SEL_MASK
+ << ARMADA_37XX_NB_VDD_SEL_OFF);
+
+ val <<= offset;
+ mask <<= offset;
+
+ regmap_update_bits(base, reg, mask, val);
+ }
+}
+
+/*
+ * Find out the armada 37x supported AVS value whose voltage value is
+ * the round-up closest to the target voltage value.
+ */
+static u32 armada_37xx_avs_val_match(int target_vm)
+{
+ u32 avs;
+
+ /* Find out the round-up closest supported voltage value */
+ for (avs = 0; avs < ARRAY_SIZE(avs_map); avs++)
+ if (avs_map[avs] >= target_vm)
+ break;
+
+ /*
+ * If all supported voltages are smaller than target one,
+ * choose the largest supported voltage
+ */
+ if (avs == ARRAY_SIZE(avs_map))
+ avs = ARRAY_SIZE(avs_map) - 1;
+
+ return avs;
+}
+
+/*
+ * For Armada 37xx soc, L0(VSET0) VDD AVS value is set to SVC revision
+ * value or a default value when SVC is not supported.
+ * - L0 can be read out from the register of AVS_CTRL_0 and L0 voltage
+ * can be got from the mapping table of avs_map.
+ * - L1 voltage should be about 100mv smaller than L0 voltage
+ * - L2 & L3 voltage should be about 150mv smaller than L0 voltage.
+ * This function calculates L1 & L2 & L3 AVS values dynamically based
+ * on L0 voltage and fill all AVS values to the AVS value table.
+ * When base CPU frequency is 1000 or 1200 MHz then there is additional
+ * minimal avs value for load L1.
+ */
+static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
+ struct armada_37xx_dvfs *dvfs)
+{
+ unsigned int target_vm;
+ int load_level = 0;
+ u32 l0_vdd_min;
+
+ if (base == NULL)
+ return;
+
+ /* Get L0 VDD min value */
+ regmap_read(base, ARMADA_37XX_AVS_CTL0, &l0_vdd_min);
+ l0_vdd_min = (l0_vdd_min >> ARMADA_37XX_AVS_LOW_VDD_LIMIT) &
+ ARMADA_37XX_AVS_VDD_MASK;
+ if (l0_vdd_min >= ARRAY_SIZE(avs_map)) {
+ pr_err("L0 VDD MIN %d is not correct.\n", l0_vdd_min);
+ return;
+ }
+ dvfs->avs[0] = l0_vdd_min;
+
+ if (avs_map[l0_vdd_min] <= MIN_VOLT_MV) {
+ /*
+ * If L0 voltage is smaller than 1000mv, then all VDD sets
+ * use L0 voltage;
+ */
+ u32 avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV);
+
+ for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++)
+ dvfs->avs[load_level] = avs_min;
+
+ /*
+ * Set the avs values for load L0 and L1 when base CPU frequency
+ * is 1000/1200 MHz to its typical initial values according to
+ * the Armada 3700 Hardware Specifications.
+ */
+ if (dvfs->cpu_freq_max >= 1000*1000*1000) {
+ if (dvfs->cpu_freq_max >= 1200*1000*1000)
+ avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
+ else
+ avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
+ dvfs->avs[0] = dvfs->avs[1] = avs_min;
+ }
+
+ return;
+ }
+
+ /*
+ * L1 voltage is equal to L0 voltage - 100mv and it must be
+ * larger than 1000mv
+ */
+
+ target_vm = avs_map[l0_vdd_min] - 100;
+ target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ dvfs->avs[1] = armada_37xx_avs_val_match(target_vm);
+
+ /*
+ * L2 & L3 voltage is equal to L0 voltage - 150mv and it must
+ * be larger than 1000mv
+ */
+ target_vm = avs_map[l0_vdd_min] - 150;
+ target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
+
+ /*
+ * Fix the avs value for load L1 when base CPU frequency is 1000/1200 MHz,
+ * otherwise the CPU gets stuck when switching from load L1 to load L0.
+ * Also ensure that avs value for load L1 is not higher than for L0.
+ */
+ if (dvfs->cpu_freq_max >= 1000*1000*1000) {
+ u32 avs_min_l1;
+
+ if (dvfs->cpu_freq_max >= 1200*1000*1000)
+ avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
+ else
+ avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
+
+ if (avs_min_l1 > dvfs->avs[0])
+ avs_min_l1 = dvfs->avs[0];
+
+ if (dvfs->avs[1] < avs_min_l1)
+ dvfs->avs[1] = avs_min_l1;
+ }
+}
+
+static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
+ struct armada_37xx_dvfs *dvfs)
+{
+ unsigned int avs_val = 0;
+ int load_level = 0;
+
+ if (base == NULL)
+ return;
+
+ /* Disable AVS before the configuration */
+ regmap_update_bits(base, ARMADA_37XX_AVS_CTL0,
+ ARMADA_37XX_AVS_ENABLE, 0);
+
+
+ /* Enable low voltage mode */
+ regmap_update_bits(base, ARMADA_37XX_AVS_CTL2,
+ ARMADA_37XX_AVS_LOW_VDD_EN,
+ ARMADA_37XX_AVS_LOW_VDD_EN);
+
+
+ for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++) {
+ avs_val = dvfs->avs[load_level];
+ regmap_update_bits(base, ARMADA_37XX_AVS_VSET(load_level-1),
+ ARMADA_37XX_AVS_VDD_MASK << ARMADA_37XX_AVS_HIGH_VDD_LIMIT |
+ ARMADA_37XX_AVS_VDD_MASK << ARMADA_37XX_AVS_LOW_VDD_LIMIT,
+ avs_val << ARMADA_37XX_AVS_HIGH_VDD_LIMIT |
+ avs_val << ARMADA_37XX_AVS_LOW_VDD_LIMIT);
+ }
+
+ /* Enable AVS after the configuration */
+ regmap_update_bits(base, ARMADA_37XX_AVS_CTL0,
+ ARMADA_37XX_AVS_ENABLE,
+ ARMADA_37XX_AVS_ENABLE);
+
+}
+
+static void armada37xx_cpufreq_disable_dvfs(struct regmap *base)
+{
+ unsigned int reg = ARMADA_37XX_NB_DYN_MOD,
+ mask = ARMADA_37XX_NB_DFS_EN;
+
+ regmap_update_bits(base, reg, mask, 0);
+}
+
+static void __init armada37xx_cpufreq_enable_dvfs(struct regmap *base)
+{
+ unsigned int val, reg = ARMADA_37XX_NB_CPU_LOAD,
+ mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
+
+ /* Start with the highest load (0) */
+ val = ARMADA_37XX_DVFS_LOAD_0;
+ regmap_update_bits(base, reg, mask, val);
+
+ /* Now enable DVFS for the CPUs */
+ reg = ARMADA_37XX_NB_DYN_MOD;
+ mask = ARMADA_37XX_NB_CLK_SEL_EN | ARMADA_37XX_NB_TBG_EN |
+ ARMADA_37XX_NB_DIV_EN | ARMADA_37XX_NB_VDD_EN |
+ ARMADA_37XX_NB_DFS_EN;
+
+ regmap_update_bits(base, reg, mask, mask);
+}
+
+static int armada37xx_cpufreq_suspend(struct cpufreq_policy *policy)
+{
+ struct armada37xx_cpufreq_state *state = armada37xx_cpufreq_state;
+
+ regmap_read(state->regmap, ARMADA_37XX_NB_L0L1, &state->nb_l0l1);
+ regmap_read(state->regmap, ARMADA_37XX_NB_L2L3, &state->nb_l2l3);
+ regmap_read(state->regmap, ARMADA_37XX_NB_CPU_LOAD,
+ &state->nb_cpu_load);
+ regmap_read(state->regmap, ARMADA_37XX_NB_DYN_MOD, &state->nb_dyn_mod);
+
+ return 0;
+}
+
+static int armada37xx_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ struct armada37xx_cpufreq_state *state = armada37xx_cpufreq_state;
+
+ /* Ensure DVFS is disabled otherwise the following registers are RO */
+ armada37xx_cpufreq_disable_dvfs(state->regmap);
+
+ regmap_write(state->regmap, ARMADA_37XX_NB_L0L1, state->nb_l0l1);
+ regmap_write(state->regmap, ARMADA_37XX_NB_L2L3, state->nb_l2l3);
+ regmap_write(state->regmap, ARMADA_37XX_NB_CPU_LOAD,
+ state->nb_cpu_load);
+
+ /*
+ * NB_DYN_MOD register is the one that actually enable back DVFS if it
+ * was enabled before the suspend operation. This must be done last
+ * otherwise other registers are not writable.
+ */
+ regmap_write(state->regmap, ARMADA_37XX_NB_DYN_MOD, state->nb_dyn_mod);
+
+ return 0;
+}
+
+static int __init armada37xx_cpufreq_driver_init(void)
+{
+ struct cpufreq_dt_platform_data pdata;
+ struct armada_37xx_dvfs *dvfs;
+ struct platform_device *pdev;
+ unsigned long freq;
+ unsigned int base_frequency;
+ struct regmap *nb_clk_base, *nb_pm_base, *avs_base;
+ struct device *cpu_dev;
+ int load_lvl, ret;
+ struct clk *clk, *parent;
+
+ nb_clk_base =
+ syscon_regmap_lookup_by_compatible("marvell,armada-3700-periph-clock-nb");
+ if (IS_ERR(nb_clk_base))
+ return -ENODEV;
+
+ nb_pm_base =
+ syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
+
+ if (IS_ERR(nb_pm_base))
+ return -ENODEV;
+
+ avs_base =
+ syscon_regmap_lookup_by_compatible("marvell,armada-3700-avs");
+
+ /* if AVS is not present don't use it but still try to setup dvfs */
+ if (IS_ERR(avs_base)) {
+ pr_info("Syscon failed for Adapting Voltage Scaling: skip it\n");
+ avs_base = NULL;
+ }
+ /* Before doing any configuration on the DVFS first, disable it */
+ armada37xx_cpufreq_disable_dvfs(nb_pm_base);
+
+ /*
+ * On CPU 0 register the operating points supported (which are
+ * the nominal CPU frequency and full integer divisions of
+ * it).
+ */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ dev_err(cpu_dev, "Cannot get CPU\n");
+ return -ENODEV;
+ }
+
+ clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(cpu_dev, "Cannot get clock for CPU0\n");
+ return PTR_ERR(clk);
+ }
+
+ parent = clk_get_parent(clk);
+ if (IS_ERR(parent)) {
+ dev_err(cpu_dev, "Cannot get parent clock for CPU0\n");
+ clk_put(clk);
+ return PTR_ERR(parent);
+ }
+
+ /* Get parent CPU frequency */
+ base_frequency = clk_get_rate(parent);
+
+ if (!base_frequency) {
+ dev_err(cpu_dev, "Failed to get parent clock rate for CPU\n");
+ clk_put(clk);
+ return -EINVAL;
+ }
+
+ dvfs = armada_37xx_cpu_freq_info_get(base_frequency);
+ if (!dvfs) {
+ clk_put(clk);
+ return -EINVAL;
+ }
+
+ armada37xx_cpufreq_state = kmalloc(sizeof(*armada37xx_cpufreq_state),
+ GFP_KERNEL);
+ if (!armada37xx_cpufreq_state) {
+ clk_put(clk);
+ return -ENOMEM;
+ }
+
+ armada37xx_cpufreq_state->regmap = nb_pm_base;
+
+ armada37xx_cpufreq_avs_configure(avs_base, dvfs);
+ armada37xx_cpufreq_avs_setup(avs_base, dvfs);
+
+ armada37xx_cpufreq_dvfs_setup(nb_pm_base, nb_clk_base, dvfs->divider);
+ clk_put(clk);
+
+ for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
+ load_lvl++) {
+ unsigned long u_volt = avs_map[dvfs->avs[load_lvl]] * 1000;
+ freq = base_frequency / dvfs->divider[load_lvl];
+ ret = dev_pm_opp_add(cpu_dev, freq, u_volt);
+ if (ret)
+ goto remove_opp;
+
+
+ }
+
+ /* Now that everything is setup, enable the DVFS at hardware level */
+ armada37xx_cpufreq_enable_dvfs(nb_pm_base);
+
+ memset(&pdata, 0, sizeof(pdata));
+ pdata.suspend = armada37xx_cpufreq_suspend;
+ pdata.resume = armada37xx_cpufreq_resume;
+
+ pdev = platform_device_register_data(NULL, "cpufreq-dt", -1, &pdata,
+ sizeof(pdata));
+ ret = PTR_ERR_OR_ZERO(pdev);
+ if (ret)
+ goto disable_dvfs;
+
+ armada37xx_cpufreq_state->cpu_dev = cpu_dev;
+ armada37xx_cpufreq_state->pdev = pdev;
+ platform_set_drvdata(pdev, dvfs);
+ return 0;
+
+disable_dvfs:
+ armada37xx_cpufreq_disable_dvfs(nb_pm_base);
+remove_opp:
+ /* clean-up the already added opp before leaving */
+ while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
+ freq = base_frequency / dvfs->divider[load_lvl];
+ dev_pm_opp_remove(cpu_dev, freq);
+ }
+
+ kfree(armada37xx_cpufreq_state);
+
+ return ret;
+}
+/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
+late_initcall(armada37xx_cpufreq_driver_init);
+
+static void __exit armada37xx_cpufreq_driver_exit(void)
+{
+ struct platform_device *pdev = armada37xx_cpufreq_state->pdev;
+ struct armada_37xx_dvfs *dvfs = platform_get_drvdata(pdev);
+ unsigned long freq;
+ int load_lvl;
+
+ platform_device_unregister(pdev);
+
+ armada37xx_cpufreq_disable_dvfs(armada37xx_cpufreq_state->regmap);
+
+ for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
+ freq = dvfs->cpu_freq_max / dvfs->divider[load_lvl];
+ dev_pm_opp_remove(armada37xx_cpufreq_state->cpu_dev, freq);
+ }
+
+ kfree(armada37xx_cpufreq_state);
+}
+module_exit(armada37xx_cpufreq_driver_exit);
+
+static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = {
+ { .compatible = "marvell,armada-3700-nb-pm" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match);
+
+MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
+MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
new file mode 100644
index 000000000..b0fc5e84f
--- /dev/null
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CPUFreq support for Armada 8K
+ *
+ * Copyright (C) 2018 Marvell
+ *
+ * Omri Itach <omrii@marvell.com>
+ * Gregory Clement <gregory.clement@bootlin.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+
+/*
+ * Setup the opps list with the divider for the max frequency, that
+ * will be filled at runtime.
+ */
+static const int opps_div[] __initconst = {1, 2, 3, 4};
+
+static struct platform_device *armada_8k_pdev;
+
+struct freq_table {
+ struct device *cpu_dev;
+ unsigned int freq[ARRAY_SIZE(opps_div)];
+};
+
+/* If the CPUs share the same clock, then they are in the same cluster. */
+static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
+ struct cpumask *cpumask)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct device *cpu_dev;
+ struct clk *clk;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_warn("Failed to get cpu%d device\n", cpu);
+ continue;
+ }
+
+ clk = clk_get(cpu_dev, 0);
+ if (IS_ERR(clk)) {
+ pr_warn("Cannot get clock for CPU %d\n", cpu);
+ } else {
+ if (clk_is_match(clk, cur_clk))
+ cpumask_set_cpu(cpu, cpumask);
+
+ clk_put(clk);
+ }
+ }
+}
+
+static int __init armada_8k_add_opp(struct clk *clk, struct device *cpu_dev,
+ struct freq_table *freq_tables,
+ int opps_index)
+{
+ unsigned int cur_frequency;
+ unsigned int freq;
+ int i, ret;
+
+ /* Get nominal (current) CPU frequency. */
+ cur_frequency = clk_get_rate(clk);
+ if (!cur_frequency) {
+ dev_err(cpu_dev, "Failed to get clock rate for this CPU\n");
+ return -EINVAL;
+ }
+
+ freq_tables[opps_index].cpu_dev = cpu_dev;
+
+ for (i = 0; i < ARRAY_SIZE(opps_div); i++) {
+ freq = cur_frequency / opps_div[i];
+
+ ret = dev_pm_opp_add(cpu_dev, freq, 0);
+ if (ret)
+ return ret;
+
+ freq_tables[opps_index].freq[i] = freq;
+ }
+
+ return 0;
+}
+
+static void armada_8k_cpufreq_free_table(struct freq_table *freq_tables)
+{
+ int opps_index, nb_cpus = num_possible_cpus();
+
+ for (opps_index = 0 ; opps_index <= nb_cpus; opps_index++) {
+ int i;
+
+ /* If cpu_dev is NULL then we reached the end of the array */
+ if (!freq_tables[opps_index].cpu_dev)
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(opps_div); i++) {
+ /*
+ * A 0Hz frequency is not valid, this meant
+ * that it was not yet initialized so there is
+ * no more opp to free
+ */
+ if (freq_tables[opps_index].freq[i] == 0)
+ break;
+
+ dev_pm_opp_remove(freq_tables[opps_index].cpu_dev,
+ freq_tables[opps_index].freq[i]);
+ }
+ }
+
+ kfree(freq_tables);
+}
+
+static int __init armada_8k_cpufreq_init(void)
+{
+ int ret = 0, opps_index = 0, cpu, nb_cpus;
+ struct freq_table *freq_tables;
+ struct device_node *node;
+ struct cpumask cpus;
+
+ node = of_find_compatible_node(NULL, NULL, "marvell,ap806-cpu-clock");
+ if (!node || !of_device_is_available(node)) {
+ of_node_put(node);
+ return -ENODEV;
+ }
+ of_node_put(node);
+
+ nb_cpus = num_possible_cpus();
+ freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
+ if (!freq_tables)
+ return -ENOMEM;
+ cpumask_copy(&cpus, cpu_possible_mask);
+
+ /*
+ * For each CPU, this loop registers the operating points
+ * supported (which are the nominal CPU frequency and full integer
+ * divisions of it).
+ */
+ for_each_cpu(cpu, &cpus) {
+ struct cpumask shared_cpus;
+ struct device *cpu_dev;
+ struct clk *clk;
+
+ cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev) {
+ pr_err("Cannot get CPU %d\n", cpu);
+ continue;
+ }
+
+ clk = clk_get(cpu_dev, 0);
+
+ if (IS_ERR(clk)) {
+ pr_err("Cannot get clock for CPU %d\n", cpu);
+ ret = PTR_ERR(clk);
+ goto remove_opp;
+ }
+
+ ret = armada_8k_add_opp(clk, cpu_dev, freq_tables, opps_index);
+ if (ret) {
+ clk_put(clk);
+ goto remove_opp;
+ }
+
+ opps_index++;
+ cpumask_clear(&shared_cpus);
+ armada_8k_get_sharing_cpus(clk, &shared_cpus);
+ dev_pm_opp_set_sharing_cpus(cpu_dev, &shared_cpus);
+ cpumask_andnot(&cpus, &cpus, &shared_cpus);
+ clk_put(clk);
+ }
+
+ armada_8k_pdev = platform_device_register_simple("cpufreq-dt", -1,
+ NULL, 0);
+ ret = PTR_ERR_OR_ZERO(armada_8k_pdev);
+ if (ret)
+ goto remove_opp;
+
+ platform_set_drvdata(armada_8k_pdev, freq_tables);
+
+ return 0;
+
+remove_opp:
+ armada_8k_cpufreq_free_table(freq_tables);
+ return ret;
+}
+module_init(armada_8k_cpufreq_init);
+
+static void __exit armada_8k_cpufreq_exit(void)
+{
+ struct freq_table *freq_tables = platform_get_drvdata(armada_8k_pdev);
+
+ platform_device_unregister(armada_8k_pdev);
+ armada_8k_cpufreq_free_table(freq_tables);
+}
+module_exit(armada_8k_cpufreq_exit);
+
+static const struct of_device_id __maybe_unused armada_8k_cpufreq_of_match[] = {
+ { .compatible = "marvell,ap806-cpu-clock" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, armada_8k_cpufreq_of_match);
+
+MODULE_AUTHOR("Gregory Clement <gregory.clement@bootlin.com>");
+MODULE_DESCRIPTION("Armada 8K cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c
new file mode 100644
index 000000000..39221a9a1
--- /dev/null
+++ b/drivers/cpufreq/bmips-cpufreq.c
@@ -0,0 +1,189 @@
+/*
+ * CPU frequency scaling for Broadcom BMIPS SoCs
+ *
+ * Copyright (c) 2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+/* for mips_hpt_frequency */
+#include <asm/time.h>
+
+#define BMIPS_CPUFREQ_PREFIX "bmips"
+#define BMIPS_CPUFREQ_NAME BMIPS_CPUFREQ_PREFIX "-cpufreq"
+
+#define TRANSITION_LATENCY (25 * 1000) /* 25 us */
+
+#define BMIPS5_CLK_DIV_SET_SHIFT 0x7
+#define BMIPS5_CLK_DIV_SHIFT 0x4
+#define BMIPS5_CLK_DIV_MASK 0xf
+
+enum bmips_type {
+ BMIPS5000,
+ BMIPS5200,
+};
+
+struct cpufreq_compat {
+ const char *compatible;
+ unsigned int bmips_type;
+ unsigned int clk_mult;
+ unsigned int max_freqs;
+};
+
+#define BMIPS(c, t, m, f) { \
+ .compatible = c, \
+ .bmips_type = (t), \
+ .clk_mult = (m), \
+ .max_freqs = (f), \
+}
+
+static struct cpufreq_compat bmips_cpufreq_compat[] = {
+ BMIPS("brcm,bmips5000", BMIPS5000, 8, 4),
+ BMIPS("brcm,bmips5200", BMIPS5200, 8, 4),
+ { }
+};
+
+static struct cpufreq_compat *priv;
+
+static int htp_freq_to_cpu_freq(unsigned int clk_mult)
+{
+ return mips_hpt_frequency * clk_mult / 1000;
+}
+
+static struct cpufreq_frequency_table *
+bmips_cpufreq_get_freq_table(const struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *table;
+ unsigned long cpu_freq;
+ int i;
+
+ cpu_freq = htp_freq_to_cpu_freq(priv->clk_mult);
+
+ table = kmalloc_array(priv->max_freqs + 1, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < priv->max_freqs; i++) {
+ table[i].frequency = cpu_freq / (1 << i);
+ table[i].driver_data = i;
+ }
+ table[i].frequency = CPUFREQ_TABLE_END;
+
+ return table;
+}
+
+static unsigned int bmips_cpufreq_get(unsigned int cpu)
+{
+ unsigned int div;
+ uint32_t mode;
+
+ switch (priv->bmips_type) {
+ case BMIPS5200:
+ case BMIPS5000:
+ mode = read_c0_brcm_mode();
+ div = ((mode >> BMIPS5_CLK_DIV_SHIFT) & BMIPS5_CLK_DIV_MASK);
+ break;
+ default:
+ div = 0;
+ }
+
+ return htp_freq_to_cpu_freq(priv->clk_mult) / (1 << div);
+}
+
+static int bmips_cpufreq_target_index(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ unsigned int div = policy->freq_table[index].driver_data;
+
+ switch (priv->bmips_type) {
+ case BMIPS5200:
+ case BMIPS5000:
+ change_c0_brcm_mode(BMIPS5_CLK_DIV_MASK << BMIPS5_CLK_DIV_SHIFT,
+ (1 << BMIPS5_CLK_DIV_SET_SHIFT) |
+ (div << BMIPS5_CLK_DIV_SHIFT));
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int bmips_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ kfree(policy->freq_table);
+
+ return 0;
+}
+
+static int bmips_cpufreq_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *freq_table;
+
+ freq_table = bmips_cpufreq_get_freq_table(policy);
+ if (IS_ERR(freq_table)) {
+ pr_err("%s: couldn't determine frequency table (%ld).\n",
+ BMIPS_CPUFREQ_NAME, PTR_ERR(freq_table));
+ return PTR_ERR(freq_table);
+ }
+
+ cpufreq_generic_init(policy, freq_table, TRANSITION_LATENCY);
+ pr_info("%s: registered\n", BMIPS_CPUFREQ_NAME);
+
+ return 0;
+}
+
+static struct cpufreq_driver bmips_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = bmips_cpufreq_target_index,
+ .get = bmips_cpufreq_get,
+ .init = bmips_cpufreq_init,
+ .exit = bmips_cpufreq_exit,
+ .attr = cpufreq_generic_attr,
+ .name = BMIPS_CPUFREQ_PREFIX,
+};
+
+static int __init bmips_cpufreq_driver_init(void)
+{
+ struct cpufreq_compat *cc;
+ struct device_node *np;
+
+ for (cc = bmips_cpufreq_compat; cc->compatible; cc++) {
+ np = of_find_compatible_node(NULL, "cpu", cc->compatible);
+ if (np) {
+ of_node_put(np);
+ priv = cc;
+ break;
+ }
+ }
+
+ /* We hit the guard element of the array. No compatible CPU found. */
+ if (!cc->compatible)
+ return -ENODEV;
+
+ return cpufreq_register_driver(&bmips_cpufreq_driver);
+}
+module_init(bmips_cpufreq_driver_init);
+
+static void __exit bmips_cpufreq_driver_exit(void)
+{
+ cpufreq_unregister_driver(&bmips_cpufreq_driver);
+}
+module_exit(bmips_cpufreq_driver_exit);
+
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("CPUfreq driver for Broadcom BMIPS SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
new file mode 100644
index 000000000..f644c5e32
--- /dev/null
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -0,0 +1,786 @@
+/*
+ * CPU frequency scaling for Broadcom SoCs with AVS firmware that
+ * supports DVS or DVFS
+ *
+ * Copyright (c) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * "AVS" is the name of a firmware developed at Broadcom. It derives
+ * its name from the technique called "Adaptive Voltage Scaling".
+ * Adaptive voltage scaling was the original purpose of this firmware.
+ * The AVS firmware still supports "AVS mode", where all it does is
+ * adaptive voltage scaling. However, on some newer Broadcom SoCs, the
+ * AVS Firmware, despite its unchanged name, also supports DFS mode and
+ * DVFS mode.
+ *
+ * In the context of this document and the related driver, "AVS" by
+ * itself always means the Broadcom firmware and never refers to the
+ * technique called "Adaptive Voltage Scaling".
+ *
+ * The Broadcom STB AVS CPUfreq driver provides voltage and frequency
+ * scaling on Broadcom SoCs using AVS firmware with support for DFS and
+ * DVFS. The AVS firmware is running on its own co-processor. The
+ * driver supports both uniprocessor (UP) and symmetric multiprocessor
+ * (SMP) systems which share clock and voltage across all CPUs.
+ *
+ * Actual voltage and frequency scaling is done solely by the AVS
+ * firmware. This driver does not change frequency or voltage itself.
+ * It provides a standard CPUfreq interface to the rest of the kernel
+ * and to userland. It interfaces with the AVS firmware to effect the
+ * requested changes and to report back the current system status in a
+ * way that is expected by existing tools.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+
+/* Max number of arguments AVS calls take */
+#define AVS_MAX_CMD_ARGS 4
+/*
+ * This macro is used to generate AVS parameter register offsets. For
+ * x >= AVS_MAX_CMD_ARGS, it returns 0 to protect against accidental memory
+ * access outside of the parameter range. (Offset 0 is the first parameter.)
+ */
+#define AVS_PARAM_MULT(x) ((x) < AVS_MAX_CMD_ARGS ? (x) : 0)
+
+/* AVS Mailbox Register offsets */
+#define AVS_MBOX_COMMAND 0x00
+#define AVS_MBOX_STATUS 0x04
+#define AVS_MBOX_VOLTAGE0 0x08
+#define AVS_MBOX_TEMP0 0x0c
+#define AVS_MBOX_PV0 0x10
+#define AVS_MBOX_MV0 0x14
+#define AVS_MBOX_PARAM(x) (0x18 + AVS_PARAM_MULT(x) * sizeof(u32))
+#define AVS_MBOX_REVISION 0x28
+#define AVS_MBOX_PSTATE 0x2c
+#define AVS_MBOX_HEARTBEAT 0x30
+#define AVS_MBOX_MAGIC 0x34
+#define AVS_MBOX_SIGMA_HVT 0x38
+#define AVS_MBOX_SIGMA_SVT 0x3c
+#define AVS_MBOX_VOLTAGE1 0x40
+#define AVS_MBOX_TEMP1 0x44
+#define AVS_MBOX_PV1 0x48
+#define AVS_MBOX_MV1 0x4c
+#define AVS_MBOX_FREQUENCY 0x50
+
+/* AVS Commands */
+#define AVS_CMD_AVAILABLE 0x00
+#define AVS_CMD_DISABLE 0x10
+#define AVS_CMD_ENABLE 0x11
+#define AVS_CMD_S2_ENTER 0x12
+#define AVS_CMD_S2_EXIT 0x13
+#define AVS_CMD_BBM_ENTER 0x14
+#define AVS_CMD_BBM_EXIT 0x15
+#define AVS_CMD_S3_ENTER 0x16
+#define AVS_CMD_S3_EXIT 0x17
+#define AVS_CMD_BALANCE 0x18
+/* PMAP and P-STATE commands */
+#define AVS_CMD_GET_PMAP 0x30
+#define AVS_CMD_SET_PMAP 0x31
+#define AVS_CMD_GET_PSTATE 0x40
+#define AVS_CMD_SET_PSTATE 0x41
+
+/* Different modes AVS supports (for GET_PMAP/SET_PMAP) */
+#define AVS_MODE_AVS 0x0
+#define AVS_MODE_DFS 0x1
+#define AVS_MODE_DVS 0x2
+#define AVS_MODE_DVFS 0x3
+
+/*
+ * PMAP parameter p1
+ * unused:31-24, mdiv_p0:23-16, unused:15-14, pdiv:13-10 , ndiv_int:9-0
+ */
+#define NDIV_INT_SHIFT 0
+#define NDIV_INT_MASK 0x3ff
+#define PDIV_SHIFT 10
+#define PDIV_MASK 0xf
+#define MDIV_P0_SHIFT 16
+#define MDIV_P0_MASK 0xff
+/*
+ * PMAP parameter p2
+ * mdiv_p4:31-24, mdiv_p3:23-16, mdiv_p2:15:8, mdiv_p1:7:0
+ */
+#define MDIV_P1_SHIFT 0
+#define MDIV_P1_MASK 0xff
+#define MDIV_P2_SHIFT 8
+#define MDIV_P2_MASK 0xff
+#define MDIV_P3_SHIFT 16
+#define MDIV_P3_MASK 0xff
+#define MDIV_P4_SHIFT 24
+#define MDIV_P4_MASK 0xff
+
+/* Different P-STATES AVS supports (for GET_PSTATE/SET_PSTATE) */
+#define AVS_PSTATE_P0 0x0
+#define AVS_PSTATE_P1 0x1
+#define AVS_PSTATE_P2 0x2
+#define AVS_PSTATE_P3 0x3
+#define AVS_PSTATE_P4 0x4
+#define AVS_PSTATE_MAX AVS_PSTATE_P4
+
+/* CPU L2 Interrupt Controller Registers */
+#define AVS_CPU_L2_SET0 0x04
+#define AVS_CPU_L2_INT_MASK BIT(31)
+
+/* AVS Command Status Values */
+#define AVS_STATUS_CLEAR 0x00
+/* Command/notification accepted */
+#define AVS_STATUS_SUCCESS 0xf0
+/* Command/notification rejected */
+#define AVS_STATUS_FAILURE 0xff
+/* Invalid command/notification (unknown) */
+#define AVS_STATUS_INVALID 0xf1
+/* Non-AVS modes are not supported */
+#define AVS_STATUS_NO_SUPP 0xf2
+/* Cannot set P-State until P-Map supplied */
+#define AVS_STATUS_NO_MAP 0xf3
+/* Cannot change P-Map after initial P-Map set */
+#define AVS_STATUS_MAP_SET 0xf4
+/* Max AVS status; higher numbers are used for debugging */
+#define AVS_STATUS_MAX 0xff
+
+/* Other AVS related constants */
+#define AVS_LOOP_LIMIT 10000
+#define AVS_TIMEOUT 300 /* in ms; expected completion is < 10ms */
+#define AVS_FIRMWARE_MAGIC 0xa11600d1
+
+#define BRCM_AVS_CPUFREQ_PREFIX "brcmstb-avs"
+#define BRCM_AVS_CPUFREQ_NAME BRCM_AVS_CPUFREQ_PREFIX "-cpufreq"
+#define BRCM_AVS_CPU_DATA "brcm,avs-cpu-data-mem"
+#define BRCM_AVS_CPU_INTR "brcm,avs-cpu-l2-intr"
+#define BRCM_AVS_HOST_INTR "sw_intr"
+
+struct pmap {
+ unsigned int mode;
+ unsigned int p1;
+ unsigned int p2;
+ unsigned int state;
+};
+
+struct private_data {
+ void __iomem *base;
+ void __iomem *avs_intr_base;
+ struct device *dev;
+ struct completion done;
+ struct semaphore sem;
+ struct pmap pmap;
+ int host_irq;
+};
+
+static void __iomem *__map_region(const char *name)
+{
+ struct device_node *np;
+ void __iomem *ptr;
+
+ np = of_find_compatible_node(NULL, NULL, name);
+ if (!np)
+ return NULL;
+
+ ptr = of_iomap(np, 0);
+ of_node_put(np);
+
+ return ptr;
+}
+
+static unsigned long wait_for_avs_command(struct private_data *priv,
+ unsigned long timeout)
+{
+ unsigned long time_left = 0;
+ u32 val;
+
+ /* Event driven, wait for the command interrupt */
+ if (priv->host_irq >= 0)
+ return wait_for_completion_timeout(&priv->done,
+ msecs_to_jiffies(timeout));
+
+ /* Polling for command completion */
+ do {
+ time_left = timeout;
+ val = readl(priv->base + AVS_MBOX_STATUS);
+ if (val)
+ break;
+
+ usleep_range(1000, 2000);
+ } while (--timeout);
+
+ return time_left;
+}
+
+static int __issue_avs_command(struct private_data *priv, unsigned int cmd,
+ unsigned int num_in, unsigned int num_out,
+ u32 args[])
+{
+ void __iomem *base = priv->base;
+ unsigned long time_left;
+ unsigned int i;
+ int ret;
+ u32 val;
+
+ ret = down_interruptible(&priv->sem);
+ if (ret)
+ return ret;
+
+ /*
+ * Make sure no other command is currently running: cmd is 0 if AVS
+ * co-processor is idle. Due to the guard above, we should almost never
+ * have to wait here.
+ */
+ for (i = 0, val = 1; val != 0 && i < AVS_LOOP_LIMIT; i++)
+ val = readl(base + AVS_MBOX_COMMAND);
+
+ /* Give the caller a chance to retry if AVS is busy. */
+ if (i == AVS_LOOP_LIMIT) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ /* Clear status before we begin. */
+ writel(AVS_STATUS_CLEAR, base + AVS_MBOX_STATUS);
+
+ /* Provide input parameters */
+ for (i = 0; i < num_in; i++)
+ writel(args[i], base + AVS_MBOX_PARAM(i));
+
+ /* Protect from spurious interrupts. */
+ reinit_completion(&priv->done);
+
+ /* Now issue the command & tell firmware to wake up to process it. */
+ writel(cmd, base + AVS_MBOX_COMMAND);
+ writel(AVS_CPU_L2_INT_MASK, priv->avs_intr_base + AVS_CPU_L2_SET0);
+
+ /* Wait for AVS co-processor to finish processing the command. */
+ time_left = wait_for_avs_command(priv, AVS_TIMEOUT);
+
+ /*
+ * If the AVS status is not in the expected range, it means AVS didn't
+ * complete our command in time, and we return an error. Also, if there
+ * is no "time left", we timed out waiting for the interrupt.
+ */
+ val = readl(base + AVS_MBOX_STATUS);
+ if (time_left == 0 || val == 0 || val > AVS_STATUS_MAX) {
+ dev_err(priv->dev, "AVS command %#x didn't complete in time\n",
+ cmd);
+ dev_err(priv->dev, " Time left: %u ms, AVS status: %#x\n",
+ jiffies_to_msecs(time_left), val);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* Process returned values */
+ for (i = 0; i < num_out; i++)
+ args[i] = readl(base + AVS_MBOX_PARAM(i));
+
+ /* Clear status to tell AVS co-processor we are done. */
+ writel(AVS_STATUS_CLEAR, base + AVS_MBOX_STATUS);
+
+ /* Convert firmware errors to errno's as much as possible. */
+ switch (val) {
+ case AVS_STATUS_INVALID:
+ ret = -EINVAL;
+ break;
+ case AVS_STATUS_NO_SUPP:
+ ret = -ENOTSUPP;
+ break;
+ case AVS_STATUS_NO_MAP:
+ ret = -ENOENT;
+ break;
+ case AVS_STATUS_MAP_SET:
+ ret = -EEXIST;
+ break;
+ case AVS_STATUS_FAILURE:
+ ret = -EIO;
+ break;
+ }
+
+out:
+ up(&priv->sem);
+
+ return ret;
+}
+
+static irqreturn_t irq_handler(int irq, void *data)
+{
+ struct private_data *priv = data;
+
+ /* AVS command completed execution. Wake up __issue_avs_command(). */
+ complete(&priv->done);
+
+ return IRQ_HANDLED;
+}
+
+static char *brcm_avs_mode_to_string(unsigned int mode)
+{
+ switch (mode) {
+ case AVS_MODE_AVS:
+ return "AVS";
+ case AVS_MODE_DFS:
+ return "DFS";
+ case AVS_MODE_DVS:
+ return "DVS";
+ case AVS_MODE_DVFS:
+ return "DVFS";
+ }
+ return NULL;
+}
+
+static void brcm_avs_parse_p1(u32 p1, unsigned int *mdiv_p0, unsigned int *pdiv,
+ unsigned int *ndiv)
+{
+ *mdiv_p0 = (p1 >> MDIV_P0_SHIFT) & MDIV_P0_MASK;
+ *pdiv = (p1 >> PDIV_SHIFT) & PDIV_MASK;
+ *ndiv = (p1 >> NDIV_INT_SHIFT) & NDIV_INT_MASK;
+}
+
+static void brcm_avs_parse_p2(u32 p2, unsigned int *mdiv_p1,
+ unsigned int *mdiv_p2, unsigned int *mdiv_p3,
+ unsigned int *mdiv_p4)
+{
+ *mdiv_p4 = (p2 >> MDIV_P4_SHIFT) & MDIV_P4_MASK;
+ *mdiv_p3 = (p2 >> MDIV_P3_SHIFT) & MDIV_P3_MASK;
+ *mdiv_p2 = (p2 >> MDIV_P2_SHIFT) & MDIV_P2_MASK;
+ *mdiv_p1 = (p2 >> MDIV_P1_SHIFT) & MDIV_P1_MASK;
+}
+
+static int brcm_avs_get_pmap(struct private_data *priv, struct pmap *pmap)
+{
+ u32 args[AVS_MAX_CMD_ARGS];
+ int ret;
+
+ ret = __issue_avs_command(priv, AVS_CMD_GET_PMAP, 0, 4, args);
+ if (ret || !pmap)
+ return ret;
+
+ pmap->mode = args[0];
+ pmap->p1 = args[1];
+ pmap->p2 = args[2];
+ pmap->state = args[3];
+
+ return 0;
+}
+
+static int brcm_avs_set_pmap(struct private_data *priv, struct pmap *pmap)
+{
+ u32 args[AVS_MAX_CMD_ARGS];
+
+ args[0] = pmap->mode;
+ args[1] = pmap->p1;
+ args[2] = pmap->p2;
+ args[3] = pmap->state;
+
+ return __issue_avs_command(priv, AVS_CMD_SET_PMAP, 4, 0, args);
+}
+
+static int brcm_avs_get_pstate(struct private_data *priv, unsigned int *pstate)
+{
+ u32 args[AVS_MAX_CMD_ARGS];
+ int ret;
+
+ ret = __issue_avs_command(priv, AVS_CMD_GET_PSTATE, 0, 1, args);
+ if (ret)
+ return ret;
+ *pstate = args[0];
+
+ return 0;
+}
+
+static int brcm_avs_set_pstate(struct private_data *priv, unsigned int pstate)
+{
+ u32 args[AVS_MAX_CMD_ARGS];
+
+ args[0] = pstate;
+
+ return __issue_avs_command(priv, AVS_CMD_SET_PSTATE, 1, 0, args);
+
+}
+
+static u32 brcm_avs_get_voltage(void __iomem *base)
+{
+ return readl(base + AVS_MBOX_VOLTAGE1);
+}
+
+static u32 brcm_avs_get_frequency(void __iomem *base)
+{
+ return readl(base + AVS_MBOX_FREQUENCY) * 1000; /* in kHz */
+}
+
+/*
+ * We determine which frequencies are supported by cycling through all P-states
+ * and reading back what frequency we are running at for each P-state.
+ */
+static struct cpufreq_frequency_table *
+brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
+{
+ struct cpufreq_frequency_table *table;
+ unsigned int pstate;
+ int i, ret;
+
+ /* Remember P-state for later */
+ ret = brcm_avs_get_pstate(priv, &pstate);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * We allocate space for the 5 different P-STATES AVS,
+ * plus extra space for a terminating element.
+ */
+ table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1 + 1, sizeof(*table),
+ GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = AVS_PSTATE_P0; i <= AVS_PSTATE_MAX; i++) {
+ ret = brcm_avs_set_pstate(priv, i);
+ if (ret)
+ return ERR_PTR(ret);
+ table[i].frequency = brcm_avs_get_frequency(priv->base);
+ table[i].driver_data = i;
+ }
+ table[i].frequency = CPUFREQ_TABLE_END;
+
+ /* Restore P-state */
+ ret = brcm_avs_set_pstate(priv, pstate);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return table;
+}
+
+/*
+ * To ensure the right firmware is running we need to
+ * - check the MAGIC matches what we expect
+ * - brcm_avs_get_pmap() doesn't return -ENOTSUPP or -EINVAL
+ * We need to set up our interrupt handling before calling brcm_avs_get_pmap()!
+ */
+static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
+{
+ u32 magic;
+ int rc;
+
+ rc = brcm_avs_get_pmap(priv, NULL);
+ magic = readl(priv->base + AVS_MBOX_MAGIC);
+
+ return (magic == AVS_FIRMWARE_MAGIC) && ((rc != -ENOTSUPP) ||
+ (rc != -EINVAL));
+}
+
+static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct private_data *priv = policy->driver_data;
+
+ cpufreq_cpu_put(policy);
+
+ return brcm_avs_get_frequency(priv->base);
+}
+
+static int brcm_avs_target_index(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ return brcm_avs_set_pstate(policy->driver_data,
+ policy->freq_table[index].driver_data);
+}
+
+static int brcm_avs_suspend(struct cpufreq_policy *policy)
+{
+ struct private_data *priv = policy->driver_data;
+ int ret;
+
+ ret = brcm_avs_get_pmap(priv, &priv->pmap);
+ if (ret)
+ return ret;
+
+ /*
+ * We can't use the P-state returned by brcm_avs_get_pmap(), since
+ * that's the initial P-state from when the P-map was downloaded to the
+ * AVS co-processor, not necessarily the P-state we are running at now.
+ * So, we get the current P-state explicitly.
+ */
+ ret = brcm_avs_get_pstate(priv, &priv->pmap.state);
+ if (ret)
+ return ret;
+
+ /* This is best effort. Nothing to do if it fails. */
+ (void)__issue_avs_command(priv, AVS_CMD_S2_ENTER, 0, 0, NULL);
+
+ return 0;
+}
+
+static int brcm_avs_resume(struct cpufreq_policy *policy)
+{
+ struct private_data *priv = policy->driver_data;
+ int ret;
+
+ /* This is best effort. Nothing to do if it fails. */
+ (void)__issue_avs_command(priv, AVS_CMD_S2_EXIT, 0, 0, NULL);
+
+ ret = brcm_avs_set_pmap(priv, &priv->pmap);
+ if (ret == -EEXIST) {
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ struct device *dev = &pdev->dev;
+
+ dev_warn(dev, "PMAP was already set\n");
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/*
+ * All initialization code that we only want to execute once goes here. Setup
+ * code that can be re-tried on every core (if it failed before) can go into
+ * brcm_avs_cpufreq_init().
+ */
+static int brcm_avs_prepare_init(struct platform_device *pdev)
+{
+ struct private_data *priv;
+ struct device *dev;
+ int ret;
+
+ dev = &pdev->dev;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ sema_init(&priv->sem, 1);
+ init_completion(&priv->done);
+ platform_set_drvdata(pdev, priv);
+
+ priv->base = __map_region(BRCM_AVS_CPU_DATA);
+ if (!priv->base) {
+ dev_err(dev, "Couldn't find property %s in device tree.\n",
+ BRCM_AVS_CPU_DATA);
+ return -ENOENT;
+ }
+
+ priv->avs_intr_base = __map_region(BRCM_AVS_CPU_INTR);
+ if (!priv->avs_intr_base) {
+ dev_err(dev, "Couldn't find property %s in device tree.\n",
+ BRCM_AVS_CPU_INTR);
+ ret = -ENOENT;
+ goto unmap_base;
+ }
+
+ priv->host_irq = platform_get_irq_byname(pdev, BRCM_AVS_HOST_INTR);
+
+ ret = devm_request_irq(dev, priv->host_irq, irq_handler,
+ IRQF_TRIGGER_RISING,
+ BRCM_AVS_HOST_INTR, priv);
+ if (ret && priv->host_irq >= 0) {
+ dev_err(dev, "IRQ request failed: %s (%d) -- %d\n",
+ BRCM_AVS_HOST_INTR, priv->host_irq, ret);
+ goto unmap_intr_base;
+ }
+
+ if (brcm_avs_is_firmware_loaded(priv))
+ return 0;
+
+ dev_err(dev, "AVS firmware is not loaded or doesn't support DVFS\n");
+ ret = -ENODEV;
+
+unmap_intr_base:
+ iounmap(priv->avs_intr_base);
+unmap_base:
+ iounmap(priv->base);
+
+ return ret;
+}
+
+static void brcm_avs_prepare_uninit(struct platform_device *pdev)
+{
+ struct private_data *priv;
+
+ priv = platform_get_drvdata(pdev);
+
+ iounmap(priv->avs_intr_base);
+ iounmap(priv->base);
+}
+
+static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *freq_table;
+ struct platform_device *pdev;
+ struct private_data *priv;
+ struct device *dev;
+ int ret;
+
+ pdev = cpufreq_get_driver_data();
+ priv = platform_get_drvdata(pdev);
+ policy->driver_data = priv;
+ dev = &pdev->dev;
+
+ freq_table = brcm_avs_get_freq_table(dev, priv);
+ if (IS_ERR(freq_table)) {
+ ret = PTR_ERR(freq_table);
+ dev_err(dev, "Couldn't determine frequency table (%d).\n", ret);
+ return ret;
+ }
+
+ policy->freq_table = freq_table;
+
+ /* All cores share the same clock and thus the same policy. */
+ cpumask_setall(policy->cpus);
+
+ ret = __issue_avs_command(priv, AVS_CMD_ENABLE, 0, 0, NULL);
+ if (!ret) {
+ unsigned int pstate;
+
+ ret = brcm_avs_get_pstate(priv, &pstate);
+ if (!ret) {
+ policy->cur = freq_table[pstate].frequency;
+ dev_info(dev, "registered\n");
+ return 0;
+ }
+ }
+
+ dev_err(dev, "couldn't initialize driver (%d)\n", ret);
+
+ return ret;
+}
+
+static ssize_t show_brcm_avs_pstate(struct cpufreq_policy *policy, char *buf)
+{
+ struct private_data *priv = policy->driver_data;
+ unsigned int pstate;
+
+ if (brcm_avs_get_pstate(priv, &pstate))
+ return sprintf(buf, "<unknown>\n");
+
+ return sprintf(buf, "%u\n", pstate);
+}
+
+static ssize_t show_brcm_avs_mode(struct cpufreq_policy *policy, char *buf)
+{
+ struct private_data *priv = policy->driver_data;
+ struct pmap pmap;
+
+ if (brcm_avs_get_pmap(priv, &pmap))
+ return sprintf(buf, "<unknown>\n");
+
+ return sprintf(buf, "%s %u\n", brcm_avs_mode_to_string(pmap.mode),
+ pmap.mode);
+}
+
+static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy, char *buf)
+{
+ unsigned int mdiv_p0, mdiv_p1, mdiv_p2, mdiv_p3, mdiv_p4;
+ struct private_data *priv = policy->driver_data;
+ unsigned int ndiv, pdiv;
+ struct pmap pmap;
+
+ if (brcm_avs_get_pmap(priv, &pmap))
+ return sprintf(buf, "<unknown>\n");
+
+ brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv);
+ brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4);
+
+ return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u %u %u\n",
+ pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2,
+ mdiv_p3, mdiv_p4, pmap.mode, pmap.state);
+}
+
+static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)
+{
+ struct private_data *priv = policy->driver_data;
+
+ return sprintf(buf, "0x%08x\n", brcm_avs_get_voltage(priv->base));
+}
+
+static ssize_t show_brcm_avs_frequency(struct cpufreq_policy *policy, char *buf)
+{
+ struct private_data *priv = policy->driver_data;
+
+ return sprintf(buf, "0x%08x\n", brcm_avs_get_frequency(priv->base));
+}
+
+cpufreq_freq_attr_ro(brcm_avs_pstate);
+cpufreq_freq_attr_ro(brcm_avs_mode);
+cpufreq_freq_attr_ro(brcm_avs_pmap);
+cpufreq_freq_attr_ro(brcm_avs_voltage);
+cpufreq_freq_attr_ro(brcm_avs_frequency);
+
+static struct freq_attr *brcm_avs_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ &brcm_avs_pstate,
+ &brcm_avs_mode,
+ &brcm_avs_pmap,
+ &brcm_avs_voltage,
+ &brcm_avs_frequency,
+ NULL
+};
+
+static struct cpufreq_driver brcm_avs_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = brcm_avs_target_index,
+ .get = brcm_avs_cpufreq_get,
+ .suspend = brcm_avs_suspend,
+ .resume = brcm_avs_resume,
+ .init = brcm_avs_cpufreq_init,
+ .attr = brcm_avs_cpufreq_attr,
+ .name = BRCM_AVS_CPUFREQ_PREFIX,
+};
+
+static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = brcm_avs_prepare_init(pdev);
+ if (ret)
+ return ret;
+
+ brcm_avs_driver.driver_data = pdev;
+
+ ret = cpufreq_register_driver(&brcm_avs_driver);
+ if (ret)
+ brcm_avs_prepare_uninit(pdev);
+
+ return ret;
+}
+
+static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = cpufreq_unregister_driver(&brcm_avs_driver);
+ WARN_ON(ret);
+
+ brcm_avs_prepare_uninit(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id brcm_avs_cpufreq_match[] = {
+ { .compatible = BRCM_AVS_CPU_DATA },
+ { }
+};
+MODULE_DEVICE_TABLE(of, brcm_avs_cpufreq_match);
+
+static struct platform_driver brcm_avs_cpufreq_platdrv = {
+ .driver = {
+ .name = BRCM_AVS_CPUFREQ_NAME,
+ .of_match_table = brcm_avs_cpufreq_match,
+ },
+ .probe = brcm_avs_cpufreq_probe,
+ .remove = brcm_avs_cpufreq_remove,
+};
+module_platform_driver(brcm_avs_cpufreq_platdrv);
+
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("CPUfreq driver for Broadcom STB AVS");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
new file mode 100644
index 000000000..022e35554
--- /dev/null
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -0,0 +1,1014 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPPC (Collaborative Processor Performance Control) driver for
+ * interfacing with the CPUfreq layer and governors. See
+ * cppc_acpi.c for CPPC specific methods.
+ *
+ * (C) Copyright 2014, 2015 Linaro Ltd.
+ * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
+ */
+
+#define pr_fmt(fmt) "CPPC Cpufreq:" fmt
+
+#include <linux/arch_topology.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/dmi.h>
+#include <linux/irq_work.h>
+#include <linux/kthread.h>
+#include <linux/time.h>
+#include <linux/vmalloc.h>
+#include <uapi/linux/sched/types.h>
+
+#include <asm/unaligned.h>
+
+#include <acpi/cppc_acpi.h>
+
+/* Minimum struct length needed for the DMI processor entry we want */
+#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
+
+/* Offset in the DMI processor structure for the max frequency */
+#define DMI_PROCESSOR_MAX_SPEED 0x14
+
+/*
+ * This list contains information parsed from per CPU ACPI _CPC and _PSD
+ * structures: e.g. the highest and lowest supported performance, capabilities,
+ * desired performance, level requested etc. Depending on the share_type, not
+ * all CPUs will have an entry in the list.
+ */
+static LIST_HEAD(cpu_data_list);
+
+static bool boost_supported;
+
+struct cppc_workaround_oem_info {
+ char oem_id[ACPI_OEM_ID_SIZE + 1];
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
+ u32 oem_revision;
+};
+
+static struct cppc_workaround_oem_info wa_info[] = {
+ {
+ .oem_id = "HISI ",
+ .oem_table_id = "HIP07 ",
+ .oem_revision = 0,
+ }, {
+ .oem_id = "HISI ",
+ .oem_table_id = "HIP08 ",
+ .oem_revision = 0,
+ }
+};
+
+static struct cpufreq_driver cppc_cpufreq_driver;
+
+static enum {
+ FIE_UNSET = -1,
+ FIE_ENABLED,
+ FIE_DISABLED
+} fie_disabled = FIE_UNSET;
+
+#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
+module_param(fie_disabled, int, 0444);
+MODULE_PARM_DESC(fie_disabled, "Disable Frequency Invariance Engine (FIE)");
+
+/* Frequency invariance support */
+struct cppc_freq_invariance {
+ int cpu;
+ struct irq_work irq_work;
+ struct kthread_work work;
+ struct cppc_perf_fb_ctrs prev_perf_fb_ctrs;
+ struct cppc_cpudata *cpu_data;
+};
+
+static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
+static struct kthread_worker *kworker_fie;
+
+static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
+static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
+ struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+ struct cppc_perf_fb_ctrs *fb_ctrs_t1);
+
+/**
+ * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance
+ * @work: The work item.
+ *
+ * The CPPC driver register itself with the topology core to provide its own
+ * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which
+ * gets called by the scheduler on every tick.
+ *
+ * Note that the arch specific counters have higher priority than CPPC counters,
+ * if available, though the CPPC driver doesn't need to have any special
+ * handling for that.
+ *
+ * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we
+ * reach here from hard-irq context), which then schedules a normal work item
+ * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable
+ * based on the counter updates since the last tick.
+ */
+static void cppc_scale_freq_workfn(struct kthread_work *work)
+{
+ struct cppc_freq_invariance *cppc_fi;
+ struct cppc_perf_fb_ctrs fb_ctrs = {0};
+ struct cppc_cpudata *cpu_data;
+ unsigned long local_freq_scale;
+ u64 perf;
+
+ cppc_fi = container_of(work, struct cppc_freq_invariance, work);
+ cpu_data = cppc_fi->cpu_data;
+
+ if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
+ pr_warn("%s: failed to read perf counters\n", __func__);
+ return;
+ }
+
+ perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
+ &fb_ctrs);
+ cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
+
+ perf <<= SCHED_CAPACITY_SHIFT;
+ local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf);
+
+ /* This can happen due to counter's overflow */
+ if (unlikely(local_freq_scale > 1024))
+ local_freq_scale = 1024;
+
+ per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
+}
+
+static void cppc_irq_work(struct irq_work *irq_work)
+{
+ struct cppc_freq_invariance *cppc_fi;
+
+ cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work);
+ kthread_queue_work(kworker_fie, &cppc_fi->work);
+}
+
+static void cppc_scale_freq_tick(void)
+{
+ struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
+
+ /*
+ * cppc_get_perf_ctrs() can potentially sleep, call that from the right
+ * context.
+ */
+ irq_work_queue(&cppc_fi->irq_work);
+}
+
+static struct scale_freq_data cppc_sftd = {
+ .source = SCALE_FREQ_SOURCE_CPPC,
+ .set_freq_scale = cppc_scale_freq_tick,
+};
+
+static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
+{
+ struct cppc_freq_invariance *cppc_fi;
+ int cpu, ret;
+
+ if (fie_disabled)
+ return;
+
+ for_each_cpu(cpu, policy->cpus) {
+ cppc_fi = &per_cpu(cppc_freq_inv, cpu);
+ cppc_fi->cpu = cpu;
+ cppc_fi->cpu_data = policy->driver_data;
+ kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
+ init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
+
+ ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs);
+ if (ret) {
+ pr_warn("%s: failed to read perf counters for cpu:%d: %d\n",
+ __func__, cpu, ret);
+
+ /*
+ * Don't abort if the CPU was offline while the driver
+ * was getting registered.
+ */
+ if (cpu_online(cpu))
+ return;
+ }
+ }
+
+ /* Register for freq-invariance */
+ topology_set_scale_freq_source(&cppc_sftd, policy->cpus);
+}
+
+/*
+ * We free all the resources on policy's removal and not on CPU removal as the
+ * irq-work are per-cpu and the hotplug core takes care of flushing the pending
+ * irq-works (hint: smpcfd_dying_cpu()) on CPU hotplug. Even if the kthread-work
+ * fires on another CPU after the concerned CPU is removed, it won't harm.
+ *
+ * We just need to make sure to remove them all on policy->exit().
+ */
+static void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
+{
+ struct cppc_freq_invariance *cppc_fi;
+ int cpu;
+
+ if (fie_disabled)
+ return;
+
+ /* policy->cpus will be empty here, use related_cpus instead */
+ topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, policy->related_cpus);
+
+ for_each_cpu(cpu, policy->related_cpus) {
+ cppc_fi = &per_cpu(cppc_freq_inv, cpu);
+ irq_work_sync(&cppc_fi->irq_work);
+ kthread_cancel_work_sync(&cppc_fi->work);
+ }
+}
+
+static void __init cppc_freq_invariance_init(void)
+{
+ struct sched_attr attr = {
+ .size = sizeof(struct sched_attr),
+ .sched_policy = SCHED_DEADLINE,
+ .sched_nice = 0,
+ .sched_priority = 0,
+ /*
+ * Fake (unused) bandwidth; workaround to "fix"
+ * priority inheritance.
+ */
+ .sched_runtime = 1000000,
+ .sched_deadline = 10000000,
+ .sched_period = 10000000,
+ };
+ int ret;
+
+ if (fie_disabled != FIE_ENABLED && fie_disabled != FIE_DISABLED) {
+ fie_disabled = FIE_ENABLED;
+ if (cppc_perf_ctrs_in_pcc()) {
+ pr_info("FIE not enabled on systems with registers in PCC\n");
+ fie_disabled = FIE_DISABLED;
+ }
+ }
+
+ if (fie_disabled)
+ return;
+
+ kworker_fie = kthread_create_worker(0, "cppc_fie");
+ if (IS_ERR(kworker_fie))
+ return;
+
+ ret = sched_setattr_nocheck(kworker_fie->task, &attr);
+ if (ret) {
+ pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
+ ret);
+ kthread_destroy_worker(kworker_fie);
+ return;
+ }
+}
+
+static void cppc_freq_invariance_exit(void)
+{
+ if (fie_disabled)
+ return;
+
+ kthread_destroy_worker(kworker_fie);
+ kworker_fie = NULL;
+}
+
+#else
+static inline void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
+{
+}
+
+static inline void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
+{
+}
+
+static inline void cppc_freq_invariance_init(void)
+{
+}
+
+static inline void cppc_freq_invariance_exit(void)
+{
+}
+#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
+
+/* Callback function used to retrieve the max frequency from DMI */
+static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
+{
+ const u8 *dmi_data = (const u8 *)dm;
+ u16 *mhz = (u16 *)private;
+
+ if (dm->type == DMI_ENTRY_PROCESSOR &&
+ dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
+ u16 val = (u16)get_unaligned((const u16 *)
+ (dmi_data + DMI_PROCESSOR_MAX_SPEED));
+ *mhz = val > *mhz ? val : *mhz;
+ }
+}
+
+/* Look up the max frequency in DMI */
+static u64 cppc_get_dmi_max_khz(void)
+{
+ u16 mhz = 0;
+
+ dmi_walk(cppc_find_dmi_mhz, &mhz);
+
+ /*
+ * Real stupid fallback value, just in case there is no
+ * actual value set.
+ */
+ mhz = mhz ? mhz : 1;
+
+ return (1000 * mhz);
+}
+
+/*
+ * If CPPC lowest_freq and nominal_freq registers are exposed then we can
+ * use them to convert perf to freq and vice versa. The conversion is
+ * extrapolated as an affine function passing by the 2 points:
+ * - (Low perf, Low freq)
+ * - (Nominal perf, Nominal perf)
+ */
+static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data,
+ unsigned int perf)
+{
+ struct cppc_perf_caps *caps = &cpu_data->perf_caps;
+ s64 retval, offset = 0;
+ static u64 max_khz;
+ u64 mul, div;
+
+ if (caps->lowest_freq && caps->nominal_freq) {
+ mul = caps->nominal_freq - caps->lowest_freq;
+ div = caps->nominal_perf - caps->lowest_perf;
+ offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div);
+ } else {
+ if (!max_khz)
+ max_khz = cppc_get_dmi_max_khz();
+ mul = max_khz;
+ div = caps->highest_perf;
+ }
+
+ retval = offset + div64_u64(perf * mul, div);
+ if (retval >= 0)
+ return retval;
+ return 0;
+}
+
+static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
+ unsigned int freq)
+{
+ struct cppc_perf_caps *caps = &cpu_data->perf_caps;
+ s64 retval, offset = 0;
+ static u64 max_khz;
+ u64 mul, div;
+
+ if (caps->lowest_freq && caps->nominal_freq) {
+ mul = caps->nominal_perf - caps->lowest_perf;
+ div = caps->nominal_freq - caps->lowest_freq;
+ offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div);
+ } else {
+ if (!max_khz)
+ max_khz = cppc_get_dmi_max_khz();
+ mul = caps->highest_perf;
+ div = max_khz;
+ }
+
+ retval = offset + div64_u64(freq * mul, div);
+ if (retval >= 0)
+ return retval;
+ return 0;
+}
+
+static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+
+{
+ struct cppc_cpudata *cpu_data = policy->driver_data;
+ unsigned int cpu = policy->cpu;
+ struct cpufreq_freqs freqs;
+ u32 desired_perf;
+ int ret = 0;
+
+ desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
+ /* Return if it is exactly the same perf */
+ if (desired_perf == cpu_data->perf_ctrls.desired_perf)
+ return ret;
+
+ cpu_data->perf_ctrls.desired_perf = desired_perf;
+ freqs.old = policy->cur;
+ freqs.new = target_freq;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
+ cpufreq_freq_transition_end(policy, &freqs, ret != 0);
+
+ if (ret)
+ pr_debug("Failed to set target on CPU:%d. ret:%d\n",
+ cpu, ret);
+
+ return ret;
+}
+
+static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct cppc_cpudata *cpu_data = policy->driver_data;
+ unsigned int cpu = policy->cpu;
+ u32 desired_perf;
+ int ret;
+
+ desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
+ cpu_data->perf_ctrls.desired_perf = desired_perf;
+ ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
+
+ if (ret) {
+ pr_debug("Failed to set target on CPU:%d. ret:%d\n",
+ cpu, ret);
+ return 0;
+ }
+
+ return target_freq;
+}
+
+static int cppc_verify_policy(struct cpufreq_policy_data *policy)
+{
+ cpufreq_verify_within_cpu_limits(policy);
+ return 0;
+}
+
+/*
+ * The PCC subspace describes the rate at which platform can accept commands
+ * on the shared PCC channel (including READs which do not count towards freq
+ * transition requests), so ideally we need to use the PCC values as a fallback
+ * if we don't have a platform specific transition_delay_us
+ */
+#ifdef CONFIG_ARM64
+#include <asm/cputype.h>
+
+static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
+{
+ unsigned long implementor = read_cpuid_implementor();
+ unsigned long part_num = read_cpuid_part_number();
+
+ switch (implementor) {
+ case ARM_CPU_IMP_QCOM:
+ switch (part_num) {
+ case QCOM_CPU_PART_FALKOR_V1:
+ case QCOM_CPU_PART_FALKOR:
+ return 10000;
+ }
+ }
+ return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+}
+#else
+static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
+{
+ return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+}
+#endif
+
+#if defined(CONFIG_ARM64) && defined(CONFIG_ENERGY_MODEL)
+
+static DEFINE_PER_CPU(unsigned int, efficiency_class);
+static void cppc_cpufreq_register_em(struct cpufreq_policy *policy);
+
+/* Create an artificial performance state every CPPC_EM_CAP_STEP capacity unit. */
+#define CPPC_EM_CAP_STEP (20)
+/* Increase the cost value by CPPC_EM_COST_STEP every performance state. */
+#define CPPC_EM_COST_STEP (1)
+/* Add a cost gap correspnding to the energy of 4 CPUs. */
+#define CPPC_EM_COST_GAP (4 * SCHED_CAPACITY_SCALE * CPPC_EM_COST_STEP \
+ / CPPC_EM_CAP_STEP)
+
+static unsigned int get_perf_level_count(struct cpufreq_policy *policy)
+{
+ struct cppc_perf_caps *perf_caps;
+ unsigned int min_cap, max_cap;
+ struct cppc_cpudata *cpu_data;
+ int cpu = policy->cpu;
+
+ cpu_data = policy->driver_data;
+ perf_caps = &cpu_data->perf_caps;
+ max_cap = arch_scale_cpu_capacity(cpu);
+ min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
+ perf_caps->highest_perf);
+ if ((min_cap == 0) || (max_cap < min_cap))
+ return 0;
+ return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP;
+}
+
+/*
+ * The cost is defined as:
+ * cost = power * max_frequency / frequency
+ */
+static inline unsigned long compute_cost(int cpu, int step)
+{
+ return CPPC_EM_COST_GAP * per_cpu(efficiency_class, cpu) +
+ step * CPPC_EM_COST_STEP;
+}
+
+static int cppc_get_cpu_power(struct device *cpu_dev,
+ unsigned long *power, unsigned long *KHz)
+{
+ unsigned long perf_step, perf_prev, perf, perf_check;
+ unsigned int min_step, max_step, step, step_check;
+ unsigned long prev_freq = *KHz;
+ unsigned int min_cap, max_cap;
+ struct cpufreq_policy *policy;
+
+ struct cppc_perf_caps *perf_caps;
+ struct cppc_cpudata *cpu_data;
+
+ policy = cpufreq_cpu_get_raw(cpu_dev->id);
+ cpu_data = policy->driver_data;
+ perf_caps = &cpu_data->perf_caps;
+ max_cap = arch_scale_cpu_capacity(cpu_dev->id);
+ min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
+ perf_caps->highest_perf);
+ perf_step = div_u64((u64)CPPC_EM_CAP_STEP * perf_caps->highest_perf,
+ max_cap);
+ min_step = min_cap / CPPC_EM_CAP_STEP;
+ max_step = max_cap / CPPC_EM_CAP_STEP;
+
+ perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
+ step = perf_prev / perf_step;
+
+ if (step > max_step)
+ return -EINVAL;
+
+ if (min_step == max_step) {
+ step = max_step;
+ perf = perf_caps->highest_perf;
+ } else if (step < min_step) {
+ step = min_step;
+ perf = perf_caps->lowest_perf;
+ } else {
+ step++;
+ if (step == max_step)
+ perf = perf_caps->highest_perf;
+ else
+ perf = step * perf_step;
+ }
+
+ *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
+ perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
+ step_check = perf_check / perf_step;
+
+ /*
+ * To avoid bad integer approximation, check that new frequency value
+ * increased and that the new frequency will be converted to the
+ * desired step value.
+ */
+ while ((*KHz == prev_freq) || (step_check != step)) {
+ perf++;
+ *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
+ perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
+ step_check = perf_check / perf_step;
+ }
+
+ /*
+ * With an artificial EM, only the cost value is used. Still the power
+ * is populated such as 0 < power < EM_MAX_POWER. This allows to add
+ * more sense to the artificial performance states.
+ */
+ *power = compute_cost(cpu_dev->id, step);
+
+ return 0;
+}
+
+static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
+ unsigned long *cost)
+{
+ unsigned long perf_step, perf_prev;
+ struct cppc_perf_caps *perf_caps;
+ struct cpufreq_policy *policy;
+ struct cppc_cpudata *cpu_data;
+ unsigned int max_cap;
+ int step;
+
+ policy = cpufreq_cpu_get_raw(cpu_dev->id);
+ cpu_data = policy->driver_data;
+ perf_caps = &cpu_data->perf_caps;
+ max_cap = arch_scale_cpu_capacity(cpu_dev->id);
+
+ perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, KHz);
+ perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
+ step = perf_prev / perf_step;
+
+ *cost = compute_cost(cpu_dev->id, step);
+
+ return 0;
+}
+
+static int populate_efficiency_class(void)
+{
+ struct acpi_madt_generic_interrupt *gicc;
+ DECLARE_BITMAP(used_classes, 256) = {};
+ int class, cpu, index;
+
+ for_each_possible_cpu(cpu) {
+ gicc = acpi_cpu_get_madt_gicc(cpu);
+ class = gicc->efficiency_class;
+ bitmap_set(used_classes, class, 1);
+ }
+
+ if (bitmap_weight(used_classes, 256) <= 1) {
+ pr_debug("Efficiency classes are all equal (=%d). "
+ "No EM registered", class);
+ return -EINVAL;
+ }
+
+ /*
+ * Squeeze efficiency class values on [0:#efficiency_class-1].
+ * Values are per spec in [0:255].
+ */
+ index = 0;
+ for_each_set_bit(class, used_classes, 256) {
+ for_each_possible_cpu(cpu) {
+ gicc = acpi_cpu_get_madt_gicc(cpu);
+ if (gicc->efficiency_class == class)
+ per_cpu(efficiency_class, cpu) = index;
+ }
+ index++;
+ }
+ cppc_cpufreq_driver.register_em = cppc_cpufreq_register_em;
+
+ return 0;
+}
+
+static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
+{
+ struct cppc_cpudata *cpu_data;
+ struct em_data_callback em_cb =
+ EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost);
+
+ cpu_data = policy->driver_data;
+ em_dev_register_perf_domain(get_cpu_device(policy->cpu),
+ get_perf_level_count(policy), &em_cb,
+ cpu_data->shared_cpu_map, 0);
+}
+
+#else
+static int populate_efficiency_class(void)
+{
+ return 0;
+}
+#endif
+
+static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
+{
+ struct cppc_cpudata *cpu_data;
+ int ret;
+
+ cpu_data = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
+ if (!cpu_data)
+ goto out;
+
+ if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL))
+ goto free_cpu;
+
+ ret = acpi_get_psd_map(cpu, cpu_data);
+ if (ret) {
+ pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret);
+ goto free_mask;
+ }
+
+ ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps);
+ if (ret) {
+ pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret);
+ goto free_mask;
+ }
+
+ /* Convert the lowest and nominal freq from MHz to KHz */
+ cpu_data->perf_caps.lowest_freq *= 1000;
+ cpu_data->perf_caps.nominal_freq *= 1000;
+
+ list_add(&cpu_data->node, &cpu_data_list);
+
+ return cpu_data;
+
+free_mask:
+ free_cpumask_var(cpu_data->shared_cpu_map);
+free_cpu:
+ kfree(cpu_data);
+out:
+ return NULL;
+}
+
+static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy)
+{
+ struct cppc_cpudata *cpu_data = policy->driver_data;
+
+ list_del(&cpu_data->node);
+ free_cpumask_var(cpu_data->shared_cpu_map);
+ kfree(cpu_data);
+ policy->driver_data = NULL;
+}
+
+static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ struct cppc_cpudata *cpu_data;
+ struct cppc_perf_caps *caps;
+ int ret;
+
+ cpu_data = cppc_cpufreq_get_cpu_data(cpu);
+ if (!cpu_data) {
+ pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu);
+ return -ENODEV;
+ }
+ caps = &cpu_data->perf_caps;
+ policy->driver_data = cpu_data;
+
+ /*
+ * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
+ * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
+ */
+ policy->min = cppc_cpufreq_perf_to_khz(cpu_data,
+ caps->lowest_nonlinear_perf);
+ policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
+ caps->nominal_perf);
+
+ /*
+ * Set cpuinfo.min_freq to Lowest to make the full range of performance
+ * available if userspace wants to use any perf between lowest & lowest
+ * nonlinear perf
+ */
+ policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data,
+ caps->lowest_perf);
+ policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data,
+ caps->nominal_perf);
+
+ policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
+ policy->shared_type = cpu_data->shared_type;
+
+ switch (policy->shared_type) {
+ case CPUFREQ_SHARED_TYPE_HW:
+ case CPUFREQ_SHARED_TYPE_NONE:
+ /* Nothing to be done - we'll have a policy for each CPU */
+ break;
+ case CPUFREQ_SHARED_TYPE_ANY:
+ /*
+ * All CPUs in the domain will share a policy and all cpufreq
+ * operations will use a single cppc_cpudata structure stored
+ * in policy->driver_data.
+ */
+ cpumask_copy(policy->cpus, cpu_data->shared_cpu_map);
+ break;
+ default:
+ pr_debug("Unsupported CPU co-ord type: %d\n",
+ policy->shared_type);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ policy->fast_switch_possible = cppc_allow_fast_switch();
+ policy->dvfs_possible_from_any_cpu = true;
+
+ /*
+ * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
+ * is supported.
+ */
+ if (caps->highest_perf > caps->nominal_perf)
+ boost_supported = true;
+
+ /* Set policy->cur to max now. The governors will adjust later. */
+ policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf);
+ cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
+
+ ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
+ if (ret) {
+ pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
+ caps->highest_perf, cpu, ret);
+ goto out;
+ }
+
+ cppc_cpufreq_cpu_fie_init(policy);
+ return 0;
+
+out:
+ cppc_cpufreq_put_cpu_data(policy);
+ return ret;
+}
+
+static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct cppc_cpudata *cpu_data = policy->driver_data;
+ struct cppc_perf_caps *caps = &cpu_data->perf_caps;
+ unsigned int cpu = policy->cpu;
+ int ret;
+
+ cppc_cpufreq_cpu_fie_exit(policy);
+
+ cpu_data->perf_ctrls.desired_perf = caps->lowest_perf;
+
+ ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
+ if (ret)
+ pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
+ caps->lowest_perf, cpu, ret);
+
+ cppc_cpufreq_put_cpu_data(policy);
+ return 0;
+}
+
+static inline u64 get_delta(u64 t1, u64 t0)
+{
+ if (t1 > t0 || t0 > ~(u32)0)
+ return t1 - t0;
+
+ return (u32)t1 - (u32)t0;
+}
+
+static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
+ struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+ struct cppc_perf_fb_ctrs *fb_ctrs_t1)
+{
+ u64 delta_reference, delta_delivered;
+ u64 reference_perf;
+
+ reference_perf = fb_ctrs_t0->reference_perf;
+
+ delta_reference = get_delta(fb_ctrs_t1->reference,
+ fb_ctrs_t0->reference);
+ delta_delivered = get_delta(fb_ctrs_t1->delivered,
+ fb_ctrs_t0->delivered);
+
+ /* Check to avoid divide-by zero and invalid delivered_perf */
+ if (!delta_reference || !delta_delivered)
+ return cpu_data->perf_ctrls.desired_perf;
+
+ return (reference_perf * delta_delivered) / delta_reference;
+}
+
+static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
+{
+ struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cppc_cpudata *cpu_data = policy->driver_data;
+ u64 delivered_perf;
+ int ret;
+
+ cpufreq_cpu_put(policy);
+
+ ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
+ if (ret)
+ return ret;
+
+ udelay(2); /* 2usec delay between sampling */
+
+ ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1);
+ if (ret)
+ return ret;
+
+ delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
+ &fb_ctrs_t1);
+
+ return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
+}
+
+static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
+{
+ struct cppc_cpudata *cpu_data = policy->driver_data;
+ struct cppc_perf_caps *caps = &cpu_data->perf_caps;
+ int ret;
+
+ if (!boost_supported) {
+ pr_err("BOOST not supported by CPU or firmware\n");
+ return -EINVAL;
+ }
+
+ if (state)
+ policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
+ caps->highest_perf);
+ else
+ policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
+ caps->nominal_perf);
+ policy->cpuinfo.max_freq = policy->max;
+
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
+{
+ struct cppc_cpudata *cpu_data = policy->driver_data;
+
+ return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf);
+}
+cpufreq_freq_attr_ro(freqdomain_cpus);
+
+static struct freq_attr *cppc_cpufreq_attr[] = {
+ &freqdomain_cpus,
+ NULL,
+};
+
+static struct cpufreq_driver cppc_cpufreq_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = cppc_verify_policy,
+ .target = cppc_cpufreq_set_target,
+ .get = cppc_cpufreq_get_rate,
+ .fast_switch = cppc_cpufreq_fast_switch,
+ .init = cppc_cpufreq_cpu_init,
+ .exit = cppc_cpufreq_cpu_exit,
+ .set_boost = cppc_cpufreq_set_boost,
+ .attr = cppc_cpufreq_attr,
+ .name = "cppc_cpufreq",
+};
+
+/*
+ * HISI platform does not support delivered performance counter and
+ * reference performance counter. It can calculate the performance using the
+ * platform specific mechanism. We reuse the desired performance register to
+ * store the real performance calculated by the platform.
+ */
+static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cppc_cpudata *cpu_data = policy->driver_data;
+ u64 desired_perf;
+ int ret;
+
+ cpufreq_cpu_put(policy);
+
+ ret = cppc_get_desired_perf(cpu, &desired_perf);
+ if (ret < 0)
+ return -EIO;
+
+ return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf);
+}
+
+static void cppc_check_hisi_workaround(void)
+{
+ struct acpi_table_header *tbl;
+ acpi_status status = AE_OK;
+ int i;
+
+ status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
+ if (ACPI_FAILURE(status) || !tbl)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
+ if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
+ !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
+ wa_info[i].oem_revision == tbl->oem_revision) {
+ /* Overwrite the get() callback */
+ cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate;
+ fie_disabled = FIE_DISABLED;
+ break;
+ }
+ }
+
+ acpi_put_table(tbl);
+}
+
+static int __init cppc_cpufreq_init(void)
+{
+ int ret;
+
+ if (!acpi_cpc_valid())
+ return -ENODEV;
+
+ cppc_check_hisi_workaround();
+ cppc_freq_invariance_init();
+ populate_efficiency_class();
+
+ ret = cpufreq_register_driver(&cppc_cpufreq_driver);
+ if (ret)
+ cppc_freq_invariance_exit();
+
+ return ret;
+}
+
+static inline void free_cpu_data(void)
+{
+ struct cppc_cpudata *iter, *tmp;
+
+ list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) {
+ free_cpumask_var(iter->shared_cpu_map);
+ list_del(&iter->node);
+ kfree(iter);
+ }
+
+}
+
+static void __exit cppc_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&cppc_cpufreq_driver);
+ cppc_freq_invariance_exit();
+
+ free_cpu_data();
+}
+
+module_exit(cppc_cpufreq_exit);
+MODULE_AUTHOR("Ashwin Chaugule");
+MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
+MODULE_LICENSE("GPL");
+
+late_initcall(cppc_cpufreq_init);
+
+static const struct acpi_device_id cppc_acpi_ids[] __used = {
+ {ACPI_PROCESSOR_DEVICE_HID, },
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
new file mode 100644
index 000000000..8514bb62d
--- /dev/null
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "cpufreq-dt.h"
+
+/*
+ * Machines for which the cpufreq device is *always* created, mostly used for
+ * platforms using "operating-points" (V1) property.
+ */
+static const struct of_device_id allowlist[] __initconst = {
+ { .compatible = "allwinner,sun4i-a10", },
+ { .compatible = "allwinner,sun5i-a10s", },
+ { .compatible = "allwinner,sun5i-a13", },
+ { .compatible = "allwinner,sun5i-r8", },
+ { .compatible = "allwinner,sun6i-a31", },
+ { .compatible = "allwinner,sun6i-a31s", },
+ { .compatible = "allwinner,sun7i-a20", },
+ { .compatible = "allwinner,sun8i-a23", },
+ { .compatible = "allwinner,sun8i-a83t", },
+ { .compatible = "allwinner,sun8i-h3", },
+
+ { .compatible = "apm,xgene-shadowcat", },
+
+ { .compatible = "arm,integrator-ap", },
+ { .compatible = "arm,integrator-cp", },
+
+ { .compatible = "hisilicon,hi3660", },
+
+ { .compatible = "fsl,imx27", },
+ { .compatible = "fsl,imx51", },
+ { .compatible = "fsl,imx53", },
+
+ { .compatible = "marvell,berlin", },
+ { .compatible = "marvell,pxa250", },
+ { .compatible = "marvell,pxa270", },
+
+ { .compatible = "samsung,exynos3250", },
+ { .compatible = "samsung,exynos4210", },
+ { .compatible = "samsung,exynos5250", },
+#ifndef CONFIG_BL_SWITCHER
+ { .compatible = "samsung,exynos5800", },
+#endif
+
+ { .compatible = "renesas,emev2", },
+ { .compatible = "renesas,r7s72100", },
+ { .compatible = "renesas,r8a73a4", },
+ { .compatible = "renesas,r8a7740", },
+ { .compatible = "renesas,r8a7742", },
+ { .compatible = "renesas,r8a7743", },
+ { .compatible = "renesas,r8a7744", },
+ { .compatible = "renesas,r8a7745", },
+ { .compatible = "renesas,r8a7778", },
+ { .compatible = "renesas,r8a7779", },
+ { .compatible = "renesas,r8a7790", },
+ { .compatible = "renesas,r8a7791", },
+ { .compatible = "renesas,r8a7792", },
+ { .compatible = "renesas,r8a7793", },
+ { .compatible = "renesas,r8a7794", },
+ { .compatible = "renesas,sh73a0", },
+
+ { .compatible = "rockchip,rk2928", },
+ { .compatible = "rockchip,rk3036", },
+ { .compatible = "rockchip,rk3066a", },
+ { .compatible = "rockchip,rk3066b", },
+ { .compatible = "rockchip,rk3188", },
+ { .compatible = "rockchip,rk3228", },
+ { .compatible = "rockchip,rk3288", },
+ { .compatible = "rockchip,rk3328", },
+ { .compatible = "rockchip,rk3366", },
+ { .compatible = "rockchip,rk3368", },
+ { .compatible = "rockchip,rk3399",
+ .data = &(struct cpufreq_dt_platform_data)
+ { .have_governor_per_policy = true, },
+ },
+
+ { .compatible = "st-ericsson,u8500", },
+ { .compatible = "st-ericsson,u8540", },
+ { .compatible = "st-ericsson,u9500", },
+ { .compatible = "st-ericsson,u9540", },
+
+ { .compatible = "ti,omap2", },
+ { .compatible = "ti,omap4", },
+ { .compatible = "ti,omap5", },
+
+ { .compatible = "xlnx,zynq-7000", },
+ { .compatible = "xlnx,zynqmp", },
+
+ { }
+};
+
+/*
+ * Machines for which the cpufreq device is *not* created, mostly used for
+ * platforms using "operating-points-v2" property.
+ */
+static const struct of_device_id blocklist[] __initconst = {
+ { .compatible = "allwinner,sun50i-h6", },
+
+ { .compatible = "arm,vexpress", },
+
+ { .compatible = "calxeda,highbank", },
+ { .compatible = "calxeda,ecx-2000", },
+
+ { .compatible = "fsl,imx7ulp", },
+ { .compatible = "fsl,imx7d", },
+ { .compatible = "fsl,imx7s", },
+ { .compatible = "fsl,imx8mq", },
+ { .compatible = "fsl,imx8mm", },
+ { .compatible = "fsl,imx8mn", },
+ { .compatible = "fsl,imx8mp", },
+
+ { .compatible = "marvell,armadaxp", },
+
+ { .compatible = "mediatek,mt2701", },
+ { .compatible = "mediatek,mt2712", },
+ { .compatible = "mediatek,mt7622", },
+ { .compatible = "mediatek,mt7623", },
+ { .compatible = "mediatek,mt8167", },
+ { .compatible = "mediatek,mt817x", },
+ { .compatible = "mediatek,mt8173", },
+ { .compatible = "mediatek,mt8176", },
+ { .compatible = "mediatek,mt8183", },
+ { .compatible = "mediatek,mt8186", },
+ { .compatible = "mediatek,mt8365", },
+ { .compatible = "mediatek,mt8516", },
+
+ { .compatible = "nvidia,tegra20", },
+ { .compatible = "nvidia,tegra30", },
+ { .compatible = "nvidia,tegra124", },
+ { .compatible = "nvidia,tegra210", },
+ { .compatible = "nvidia,tegra234", },
+
+ { .compatible = "qcom,apq8096", },
+ { .compatible = "qcom,msm8996", },
+ { .compatible = "qcom,qcs404", },
+ { .compatible = "qcom,sa8155p" },
+ { .compatible = "qcom,sa8540p" },
+ { .compatible = "qcom,sc7180", },
+ { .compatible = "qcom,sc7280", },
+ { .compatible = "qcom,sc8180x", },
+ { .compatible = "qcom,sc8280xp", },
+ { .compatible = "qcom,sdm845", },
+ { .compatible = "qcom,sm6115", },
+ { .compatible = "qcom,sm6350", },
+ { .compatible = "qcom,sm6375", },
+ { .compatible = "qcom,sm8150", },
+ { .compatible = "qcom,sm8250", },
+ { .compatible = "qcom,sm8350", },
+
+ { .compatible = "st,stih407", },
+ { .compatible = "st,stih410", },
+ { .compatible = "st,stih418", },
+
+ { .compatible = "ti,am33xx", },
+ { .compatible = "ti,am43", },
+ { .compatible = "ti,dra7", },
+ { .compatible = "ti,omap3", },
+
+ { .compatible = "qcom,ipq8064", },
+ { .compatible = "qcom,apq8064", },
+ { .compatible = "qcom,msm8974", },
+ { .compatible = "qcom,msm8960", },
+
+ { }
+};
+
+static bool __init cpu0_node_has_opp_v2_prop(void)
+{
+ struct device_node *np = of_cpu_device_node_get(0);
+ bool ret = false;
+
+ if (of_property_present(np, "operating-points-v2"))
+ ret = true;
+
+ of_node_put(np);
+ return ret;
+}
+
+static int __init cpufreq_dt_platdev_init(void)
+{
+ struct device_node *np = of_find_node_by_path("/");
+ const struct of_device_id *match;
+ const void *data = NULL;
+
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(allowlist, np);
+ if (match) {
+ data = match->data;
+ goto create_pdev;
+ }
+
+ if (cpu0_node_has_opp_v2_prop() && !of_match_node(blocklist, np))
+ goto create_pdev;
+
+ of_node_put(np);
+ return -ENODEV;
+
+create_pdev:
+ of_node_put(np);
+ return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt",
+ -1, data,
+ sizeof(struct cpufreq_dt_platform_data)));
+}
+core_initcall(cpufreq_dt_platdev_init);
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
new file mode 100644
index 000000000..4aec4b2a5
--- /dev/null
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ *
+ * Copyright (C) 2014 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#include "cpufreq-dt.h"
+
+struct private_data {
+ struct list_head node;
+
+ cpumask_var_t cpus;
+ struct device *cpu_dev;
+ struct cpufreq_frequency_table *freq_table;
+ bool have_static_opps;
+ int opp_token;
+};
+
+static LIST_HEAD(priv_list);
+
+static struct freq_attr *cpufreq_dt_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL, /* Extra space for boost-attr if required */
+ NULL,
+};
+
+static struct private_data *cpufreq_dt_find_data(int cpu)
+{
+ struct private_data *priv;
+
+ list_for_each_entry(priv, &priv_list, node) {
+ if (cpumask_test_cpu(cpu, priv->cpus))
+ return priv;
+ }
+
+ return NULL;
+}
+
+static int set_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct private_data *priv = policy->driver_data;
+ unsigned long freq = policy->freq_table[index].frequency;
+
+ return dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
+}
+
+/*
+ * An earlier version of opp-v1 bindings used to name the regulator
+ * "cpu0-supply", we still need to handle that for backwards compatibility.
+ */
+static const char *find_supply_name(struct device *dev)
+{
+ struct device_node *np;
+ struct property *pp;
+ int cpu = dev->id;
+ const char *name = NULL;
+
+ np = of_node_get(dev->of_node);
+
+ /* This must be valid for sure */
+ if (WARN_ON(!np))
+ return NULL;
+
+ /* Try "cpu0" for older DTs */
+ if (!cpu) {
+ pp = of_find_property(np, "cpu0-supply", NULL);
+ if (pp) {
+ name = "cpu0";
+ goto node_put;
+ }
+ }
+
+ pp = of_find_property(np, "cpu-supply", NULL);
+ if (pp) {
+ name = "cpu";
+ goto node_put;
+ }
+
+ dev_dbg(dev, "no regulator for cpu%d\n", cpu);
+node_put:
+ of_node_put(np);
+ return name;
+}
+
+static int cpufreq_init(struct cpufreq_policy *policy)
+{
+ struct private_data *priv;
+ struct device *cpu_dev;
+ struct clk *cpu_clk;
+ unsigned int transition_latency;
+ int ret;
+
+ priv = cpufreq_dt_find_data(policy->cpu);
+ if (!priv) {
+ pr_err("failed to find data for cpu%d\n", policy->cpu);
+ return -ENODEV;
+ }
+ cpu_dev = priv->cpu_dev;
+
+ cpu_clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(cpu_clk)) {
+ ret = PTR_ERR(cpu_clk);
+ dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret);
+ return ret;
+ }
+
+ transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
+ if (!transition_latency)
+ transition_latency = CPUFREQ_ETERNAL;
+
+ cpumask_copy(policy->cpus, priv->cpus);
+ policy->driver_data = priv;
+ policy->clk = cpu_clk;
+ policy->freq_table = priv->freq_table;
+ policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000;
+ policy->cpuinfo.transition_latency = transition_latency;
+ policy->dvfs_possible_from_any_cpu = true;
+
+ /* Support turbo/boost mode */
+ if (policy_has_boost_freq(policy)) {
+ /* This gets disabled by core on driver unregister */
+ ret = cpufreq_enable_boost_support();
+ if (ret)
+ goto out_clk_put;
+ cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
+ }
+
+ return 0;
+
+out_clk_put:
+ clk_put(cpu_clk);
+
+ return ret;
+}
+
+static int cpufreq_online(struct cpufreq_policy *policy)
+{
+ /* We did light-weight tear down earlier, nothing to do here */
+ return 0;
+}
+
+static int cpufreq_offline(struct cpufreq_policy *policy)
+{
+ /*
+ * Preserve policy->driver_data and don't free resources on light-weight
+ * tear down.
+ */
+ return 0;
+}
+
+static int cpufreq_exit(struct cpufreq_policy *policy)
+{
+ clk_put(policy->clk);
+ return 0;
+}
+
+static struct cpufreq_driver dt_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_IS_COOLING_DEV,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = set_target,
+ .get = cpufreq_generic_get,
+ .init = cpufreq_init,
+ .exit = cpufreq_exit,
+ .online = cpufreq_online,
+ .offline = cpufreq_offline,
+ .register_em = cpufreq_register_em_with_opp,
+ .name = "cpufreq-dt",
+ .attr = cpufreq_dt_attr,
+ .suspend = cpufreq_generic_suspend,
+};
+
+static int dt_cpufreq_early_init(struct device *dev, int cpu)
+{
+ struct private_data *priv;
+ struct device *cpu_dev;
+ bool fallback = false;
+ const char *reg_name[] = { NULL, NULL };
+ int ret;
+
+ /* Check if this CPU is already covered by some other policy */
+ if (cpufreq_dt_find_data(cpu))
+ return 0;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return -EPROBE_DEFER;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_set_cpu(cpu, priv->cpus);
+ priv->cpu_dev = cpu_dev;
+
+ /*
+ * OPP layer will be taking care of regulators now, but it needs to know
+ * the name of the regulator first.
+ */
+ reg_name[0] = find_supply_name(cpu_dev);
+ if (reg_name[0]) {
+ priv->opp_token = dev_pm_opp_set_regulators(cpu_dev, reg_name);
+ if (priv->opp_token < 0) {
+ ret = dev_err_probe(cpu_dev, priv->opp_token,
+ "failed to set regulators\n");
+ goto free_cpumask;
+ }
+ }
+
+ /* Get OPP-sharing information from "operating-points-v2" bindings */
+ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus);
+ if (ret) {
+ if (ret != -ENOENT)
+ goto out;
+
+ /*
+ * operating-points-v2 not supported, fallback to all CPUs share
+ * OPP for backward compatibility if the platform hasn't set
+ * sharing CPUs.
+ */
+ if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus))
+ fallback = true;
+ }
+
+ /*
+ * Initialize OPP tables for all priv->cpus. They will be shared by
+ * all CPUs which have marked their CPUs shared with OPP bindings.
+ *
+ * For platforms not using operating-points-v2 bindings, we do this
+ * before updating priv->cpus. Otherwise, we will end up creating
+ * duplicate OPPs for the CPUs.
+ *
+ * OPPs might be populated at runtime, don't fail for error here unless
+ * it is -EPROBE_DEFER.
+ */
+ ret = dev_pm_opp_of_cpumask_add_table(priv->cpus);
+ if (!ret) {
+ priv->have_static_opps = true;
+ } else if (ret == -EPROBE_DEFER) {
+ goto out;
+ }
+
+ /*
+ * The OPP table must be initialized, statically or dynamically, by this
+ * point.
+ */
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ dev_err(cpu_dev, "OPP table can't be empty\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (fallback) {
+ cpumask_setall(priv->cpus);
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus);
+ if (ret)
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+ __func__, ret);
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &priv->freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto out;
+ }
+
+ list_add(&priv->node, &priv_list);
+ return 0;
+
+out:
+ if (priv->have_static_opps)
+ dev_pm_opp_of_cpumask_remove_table(priv->cpus);
+ dev_pm_opp_put_regulators(priv->opp_token);
+free_cpumask:
+ free_cpumask_var(priv->cpus);
+ return ret;
+}
+
+static void dt_cpufreq_release(void)
+{
+ struct private_data *priv, *tmp;
+
+ list_for_each_entry_safe(priv, tmp, &priv_list, node) {
+ dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table);
+ if (priv->have_static_opps)
+ dev_pm_opp_of_cpumask_remove_table(priv->cpus);
+ dev_pm_opp_put_regulators(priv->opp_token);
+ free_cpumask_var(priv->cpus);
+ list_del(&priv->node);
+ }
+}
+
+static int dt_cpufreq_probe(struct platform_device *pdev)
+{
+ struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev);
+ int ret, cpu;
+
+ /* Request resources early so we can return in case of -EPROBE_DEFER */
+ for_each_possible_cpu(cpu) {
+ ret = dt_cpufreq_early_init(&pdev->dev, cpu);
+ if (ret)
+ goto err;
+ }
+
+ if (data) {
+ if (data->have_governor_per_policy)
+ dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
+
+ dt_cpufreq_driver.resume = data->resume;
+ if (data->suspend)
+ dt_cpufreq_driver.suspend = data->suspend;
+ if (data->get_intermediate) {
+ dt_cpufreq_driver.target_intermediate = data->target_intermediate;
+ dt_cpufreq_driver.get_intermediate = data->get_intermediate;
+ }
+ }
+
+ ret = cpufreq_register_driver(&dt_cpufreq_driver);
+ if (ret) {
+ dev_err(&pdev->dev, "failed register driver: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+err:
+ dt_cpufreq_release();
+ return ret;
+}
+
+static int dt_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&dt_cpufreq_driver);
+ dt_cpufreq_release();
+ return 0;
+}
+
+static struct platform_driver dt_cpufreq_platdrv = {
+ .driver = {
+ .name = "cpufreq-dt",
+ },
+ .probe = dt_cpufreq_probe,
+ .remove = dt_cpufreq_remove,
+};
+module_platform_driver(dt_cpufreq_platdrv);
+
+MODULE_ALIAS("platform:cpufreq-dt");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("Generic cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt.h b/drivers/cpufreq/cpufreq-dt.h
new file mode 100644
index 000000000..28c8af7ec
--- /dev/null
+++ b/drivers/cpufreq/cpufreq-dt.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Linaro
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ */
+
+#ifndef __CPUFREQ_DT_H__
+#define __CPUFREQ_DT_H__
+
+#include <linux/types.h>
+
+struct cpufreq_policy;
+
+struct cpufreq_dt_platform_data {
+ bool have_governor_per_policy;
+
+ unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
+ unsigned int index);
+ int (*target_intermediate)(struct cpufreq_policy *policy,
+ unsigned int index);
+ int (*suspend)(struct cpufreq_policy *policy);
+ int (*resume)(struct cpufreq_policy *policy);
+};
+
+#endif /* __CPUFREQ_DT_H__ */
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
new file mode 100644
index 000000000..f7a7bcf6f
--- /dev/null
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (C) 2004-2006 Sebastian Witt <se.witt@gmx.net>
+ *
+ * Based upon reverse engineered information
+ *
+ * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#define NFORCE2_XTAL 25
+#define NFORCE2_BOOTFSB 0x48
+#define NFORCE2_PLLENABLE 0xa8
+#define NFORCE2_PLLREG 0xa4
+#define NFORCE2_PLLADR 0xa0
+#define NFORCE2_PLL(mul, div) (0x100000 | (mul << 8) | div)
+
+#define NFORCE2_MIN_FSB 50
+#define NFORCE2_SAFE_DISTANCE 50
+
+/* Delay in ms between FSB changes */
+/* #define NFORCE2_DELAY 10 */
+
+/*
+ * nforce2_chipset:
+ * FSB is changed using the chipset
+ */
+static struct pci_dev *nforce2_dev;
+
+/* fid:
+ * multiplier * 10
+ */
+static int fid;
+
+/* min_fsb, max_fsb:
+ * minimum and maximum FSB (= FSB at boot time)
+ */
+static int min_fsb;
+static int max_fsb;
+
+MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>");
+MODULE_DESCRIPTION("nForce2 FSB changing cpufreq driver");
+MODULE_LICENSE("GPL");
+
+module_param(fid, int, 0444);
+module_param(min_fsb, int, 0444);
+
+MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)");
+MODULE_PARM_DESC(min_fsb,
+ "Minimum FSB to use, if not defined: current FSB - 50");
+
+/**
+ * nforce2_calc_fsb - calculate FSB
+ * @pll: PLL value
+ *
+ * Calculates FSB from PLL value
+ */
+static int nforce2_calc_fsb(int pll)
+{
+ unsigned char mul, div;
+
+ mul = (pll >> 8) & 0xff;
+ div = pll & 0xff;
+
+ if (div > 0)
+ return NFORCE2_XTAL * mul / div;
+
+ return 0;
+}
+
+/**
+ * nforce2_calc_pll - calculate PLL value
+ * @fsb: FSB
+ *
+ * Calculate PLL value for given FSB
+ */
+static int nforce2_calc_pll(unsigned int fsb)
+{
+ unsigned char xmul, xdiv;
+ unsigned char mul = 0, div = 0;
+ int tried = 0;
+
+ /* Try to calculate multiplier and divider up to 4 times */
+ while (((mul == 0) || (div == 0)) && (tried <= 3)) {
+ for (xdiv = 2; xdiv <= 0x80; xdiv++)
+ for (xmul = 1; xmul <= 0xfe; xmul++)
+ if (nforce2_calc_fsb(NFORCE2_PLL(xmul, xdiv)) ==
+ fsb + tried) {
+ mul = xmul;
+ div = xdiv;
+ }
+ tried++;
+ }
+
+ if ((mul == 0) || (div == 0))
+ return -1;
+
+ return NFORCE2_PLL(mul, div);
+}
+
+/**
+ * nforce2_write_pll - write PLL value to chipset
+ * @pll: PLL value
+ *
+ * Writes new FSB PLL value to chipset
+ */
+static void nforce2_write_pll(int pll)
+{
+ int temp;
+
+ /* Set the pll addr. to 0x00 */
+ pci_write_config_dword(nforce2_dev, NFORCE2_PLLADR, 0);
+
+ /* Now write the value in all 64 registers */
+ for (temp = 0; temp <= 0x3f; temp++)
+ pci_write_config_dword(nforce2_dev, NFORCE2_PLLREG, pll);
+}
+
+/**
+ * nforce2_fsb_read - Read FSB
+ *
+ * Read FSB from chipset
+ * If bootfsb != 0, return FSB at boot-time
+ */
+static unsigned int nforce2_fsb_read(int bootfsb)
+{
+ struct pci_dev *nforce2_sub5;
+ u32 fsb, temp = 0;
+
+ /* Get chipset boot FSB from subdevice 5 (FSB at boot-time) */
+ nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, 0x01EF,
+ PCI_ANY_ID, PCI_ANY_ID, NULL);
+ if (!nforce2_sub5)
+ return 0;
+
+ pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb);
+ fsb /= 1000000;
+
+ /* Check if PLL register is already set */
+ pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
+
+ if (bootfsb || !temp)
+ return fsb;
+
+ /* Use PLL register FSB value */
+ pci_read_config_dword(nforce2_dev, NFORCE2_PLLREG, &temp);
+ fsb = nforce2_calc_fsb(temp);
+
+ return fsb;
+}
+
+/**
+ * nforce2_set_fsb - set new FSB
+ * @fsb: New FSB
+ *
+ * Sets new FSB
+ */
+static int nforce2_set_fsb(unsigned int fsb)
+{
+ u32 temp = 0;
+ unsigned int tfsb;
+ int diff;
+ int pll = 0;
+
+ if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) {
+ pr_err("FSB %d is out of range!\n", fsb);
+ return -EINVAL;
+ }
+
+ tfsb = nforce2_fsb_read(0);
+ if (!tfsb) {
+ pr_err("Error while reading the FSB\n");
+ return -EINVAL;
+ }
+
+ /* First write? Then set actual value */
+ pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
+ if (!temp) {
+ pll = nforce2_calc_pll(tfsb);
+
+ if (pll < 0)
+ return -EINVAL;
+
+ nforce2_write_pll(pll);
+ }
+
+ /* Enable write access */
+ temp = 0x01;
+ pci_write_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8)temp);
+
+ diff = tfsb - fsb;
+
+ if (!diff)
+ return 0;
+
+ while ((tfsb != fsb) && (tfsb <= max_fsb) && (tfsb >= min_fsb)) {
+ if (diff < 0)
+ tfsb++;
+ else
+ tfsb--;
+
+ /* Calculate the PLL reg. value */
+ pll = nforce2_calc_pll(tfsb);
+ if (pll == -1)
+ return -EINVAL;
+
+ nforce2_write_pll(pll);
+#ifdef NFORCE2_DELAY
+ mdelay(NFORCE2_DELAY);
+#endif
+ }
+
+ temp = 0x40;
+ pci_write_config_byte(nforce2_dev, NFORCE2_PLLADR, (u8)temp);
+
+ return 0;
+}
+
+/**
+ * nforce2_get - get the CPU frequency
+ * @cpu: CPU number
+ *
+ * Returns the CPU frequency
+ */
+static unsigned int nforce2_get(unsigned int cpu)
+{
+ if (cpu)
+ return 0;
+ return nforce2_fsb_read(0) * fid * 100;
+}
+
+/**
+ * nforce2_target - set a new CPUFreq policy
+ * @policy: new policy
+ * @target_freq: the target frequency
+ * @relation: how that frequency relates to achieved frequency
+ * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
+ *
+ * Sets a new CPUFreq policy.
+ */
+static int nforce2_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+/* unsigned long flags; */
+ struct cpufreq_freqs freqs;
+ unsigned int target_fsb;
+
+ if ((target_freq > policy->max) || (target_freq < policy->min))
+ return -EINVAL;
+
+ target_fsb = target_freq / (fid * 100);
+
+ freqs.old = nforce2_get(policy->cpu);
+ freqs.new = target_fsb * fid * 100;
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ pr_debug("Old CPU frequency %d kHz, new %d kHz\n",
+ freqs.old, freqs.new);
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+
+ /* Disable IRQs */
+ /* local_irq_save(flags); */
+
+ if (nforce2_set_fsb(target_fsb) < 0)
+ pr_err("Changing FSB to %d failed\n", target_fsb);
+ else
+ pr_debug("Changed FSB successfully to %d\n",
+ target_fsb);
+
+ /* Enable IRQs */
+ /* local_irq_restore(flags); */
+
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+
+ return 0;
+}
+
+/**
+ * nforce2_verify - verifies a new CPUFreq policy
+ * @policy: new policy
+ */
+static int nforce2_verify(struct cpufreq_policy_data *policy)
+{
+ unsigned int fsb_pol_max;
+
+ fsb_pol_max = policy->max / (fid * 100);
+
+ if (policy->min < (fsb_pol_max * fid * 100))
+ policy->max = (fsb_pol_max + 1) * fid * 100;
+
+ cpufreq_verify_within_cpu_limits(policy);
+ return 0;
+}
+
+static int nforce2_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int fsb;
+ unsigned int rfid;
+
+ /* capability check */
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+ /* Get current FSB */
+ fsb = nforce2_fsb_read(0);
+
+ if (!fsb)
+ return -EIO;
+
+ /* FIX: Get FID from CPU */
+ if (!fid) {
+ if (!cpu_khz) {
+ pr_warn("cpu_khz not set, can't calculate multiplier!\n");
+ return -ENODEV;
+ }
+
+ fid = cpu_khz / (fsb * 100);
+ rfid = fid % 5;
+
+ if (rfid) {
+ if (rfid > 2)
+ fid += 5 - rfid;
+ else
+ fid -= rfid;
+ }
+ }
+
+ pr_info("FSB currently at %i MHz, FID %d.%d\n",
+ fsb, fid / 10, fid % 10);
+
+ /* Set maximum FSB to FSB at boot time */
+ max_fsb = nforce2_fsb_read(1);
+
+ if (!max_fsb)
+ return -EIO;
+
+ if (!min_fsb)
+ min_fsb = max_fsb - NFORCE2_SAFE_DISTANCE;
+
+ if (min_fsb < NFORCE2_MIN_FSB)
+ min_fsb = NFORCE2_MIN_FSB;
+
+ /* cpuinfo and default policy values */
+ policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100;
+ policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100;
+
+ return 0;
+}
+
+static int nforce2_cpu_exit(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static struct cpufreq_driver nforce2_driver = {
+ .name = "nforce2",
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
+ .verify = nforce2_verify,
+ .target = nforce2_target,
+ .get = nforce2_get,
+ .init = nforce2_cpu_init,
+ .exit = nforce2_cpu_exit,
+};
+
+#ifdef MODULE
+static const struct pci_device_id nforce2_ids[] = {
+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2 },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, nforce2_ids);
+#endif
+
+/**
+ * nforce2_detect_chipset - detect the Southbridge which contains FSB PLL logic
+ *
+ * Detects nForce2 A2 and C1 stepping
+ *
+ */
+static int nforce2_detect_chipset(void)
+{
+ nforce2_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
+ PCI_DEVICE_ID_NVIDIA_NFORCE2,
+ PCI_ANY_ID, PCI_ANY_ID, NULL);
+
+ if (nforce2_dev == NULL)
+ return -ENODEV;
+
+ pr_info("Detected nForce2 chipset revision %X\n",
+ nforce2_dev->revision);
+ pr_info("FSB changing is maybe unstable and can lead to crashes and data loss\n");
+
+ return 0;
+}
+
+/**
+ * nforce2_init - initializes the nForce2 CPUFreq driver
+ *
+ * Initializes the nForce2 FSB support. Returns -ENODEV on unsupported
+ * devices, -EINVAL on problems during initialization, and zero on
+ * success.
+ */
+static int __init nforce2_init(void)
+{
+ /* TODO: do we need to detect the processor? */
+
+ /* detect chipset */
+ if (nforce2_detect_chipset()) {
+ pr_info("No nForce2 chipset\n");
+ return -ENODEV;
+ }
+
+ return cpufreq_register_driver(&nforce2_driver);
+}
+
+/**
+ * nforce2_exit - unregisters cpufreq module
+ *
+ * Unregisters nForce2 FSB change support.
+ */
+static void __exit nforce2_exit(void)
+{
+ cpufreq_unregister_driver(&nforce2_driver);
+}
+
+module_init(nforce2_init);
+module_exit(nforce2_exit);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
new file mode 100644
index 000000000..c8912756f
--- /dev/null
+++ b/drivers/cpufreq/cpufreq.c
@@ -0,0 +1,2953 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/cpufreq/cpufreq.c
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
+ * Added handling for CPU hotplug
+ * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
+ * Fix handling for CPU hotplug -- affected CPUs
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu_cooling.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/syscore_ops.h>
+#include <linux/tick.h>
+#include <linux/units.h>
+#include <trace/events/power.h>
+
+static LIST_HEAD(cpufreq_policy_list);
+
+/* Macros to iterate over CPU policies */
+#define for_each_suitable_policy(__policy, __active) \
+ list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
+ if ((__active) == !policy_is_inactive(__policy))
+
+#define for_each_active_policy(__policy) \
+ for_each_suitable_policy(__policy, true)
+#define for_each_inactive_policy(__policy) \
+ for_each_suitable_policy(__policy, false)
+
+/* Iterate over governors */
+static LIST_HEAD(cpufreq_governor_list);
+#define for_each_governor(__governor) \
+ list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
+
+static char default_governor[CPUFREQ_NAME_LEN];
+
+/*
+ * The "cpufreq driver" - the arch- or hardware-dependent low
+ * level driver of CPUFreq support, and its spinlock. This lock
+ * also protects the cpufreq_cpu_data array.
+ */
+static struct cpufreq_driver *cpufreq_driver;
+static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
+static DEFINE_RWLOCK(cpufreq_driver_lock);
+
+static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
+bool cpufreq_supports_freq_invariance(void)
+{
+ return static_branch_likely(&cpufreq_freq_invariance);
+}
+
+/* Flag to suspend/resume CPUFreq governors */
+static bool cpufreq_suspended;
+
+static inline bool has_target(void)
+{
+ return cpufreq_driver->target_index || cpufreq_driver->target;
+}
+
+/* internal prototypes */
+static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
+static int cpufreq_init_governor(struct cpufreq_policy *policy);
+static void cpufreq_exit_governor(struct cpufreq_policy *policy);
+static void cpufreq_governor_limits(struct cpufreq_policy *policy);
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_governor *new_gov,
+ unsigned int new_pol);
+
+/*
+ * Two notifier lists: the "policy" list is involved in the
+ * validation process for a new CPU frequency policy; the
+ * "transition" list for kernel code that needs to handle
+ * changes to devices when the CPU clock speed changes.
+ * The mutex locks both lists.
+ */
+static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
+SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
+
+static int off __read_mostly;
+static int cpufreq_disabled(void)
+{
+ return off;
+}
+void disable_cpufreq(void)
+{
+ off = 1;
+}
+static DEFINE_MUTEX(cpufreq_governor_mutex);
+
+bool have_governor_per_policy(void)
+{
+ return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
+}
+EXPORT_SYMBOL_GPL(have_governor_per_policy);
+
+static struct kobject *cpufreq_global_kobject;
+
+struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
+{
+ if (have_governor_per_policy())
+ return &policy->kobj;
+ else
+ return cpufreq_global_kobject;
+}
+EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
+
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
+{
+ struct kernel_cpustat kcpustat;
+ u64 cur_wall_time;
+ u64 idle_time;
+ u64 busy_time;
+
+ cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
+
+ kcpustat_cpu_fetch(&kcpustat, cpu);
+
+ busy_time = kcpustat.cpustat[CPUTIME_USER];
+ busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat.cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat.cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat.cpustat[CPUTIME_NICE];
+
+ idle_time = cur_wall_time - busy_time;
+ if (wall)
+ *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
+
+ return div_u64(idle_time, NSEC_PER_USEC);
+}
+
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
+{
+ u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
+
+ if (idle_time == -1ULL)
+ return get_cpu_idle_time_jiffy(cpu, wall);
+ else if (!io_busy)
+ idle_time += get_cpu_iowait_time_us(cpu, wall);
+
+ return idle_time;
+}
+EXPORT_SYMBOL_GPL(get_cpu_idle_time);
+
+/*
+ * This is a generic cpufreq init() routine which can be used by cpufreq
+ * drivers of SMP systems. It will do following:
+ * - validate & show freq table passed
+ * - set policies transition latency
+ * - policy->cpus with all possible CPUs
+ */
+void cpufreq_generic_init(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table,
+ unsigned int transition_latency)
+{
+ policy->freq_table = table;
+ policy->cpuinfo.transition_latency = transition_latency;
+
+ /*
+ * The driver only supports the SMP configuration where all processors
+ * share the clock and voltage and clock.
+ */
+ cpumask_setall(policy->cpus);
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_init);
+
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+
+ return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
+
+unsigned int cpufreq_generic_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+
+ if (!policy || IS_ERR(policy->clk)) {
+ pr_err("%s: No %s associated to cpu: %d\n",
+ __func__, policy ? "clk" : "policy", cpu);
+ return 0;
+ }
+
+ return clk_get_rate(policy->clk) / 1000;
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_get);
+
+/**
+ * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
+ * @cpu: CPU to find the policy for.
+ *
+ * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
+ * the kobject reference counter of that policy. Return a valid policy on
+ * success or NULL on failure.
+ *
+ * The policy returned by this function has to be released with the help of
+ * cpufreq_cpu_put() to balance its kobject reference counter properly.
+ */
+struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = NULL;
+ unsigned long flags;
+
+ if (WARN_ON(cpu >= nr_cpu_ids))
+ return NULL;
+
+ /* get the cpufreq driver */
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ if (cpufreq_driver) {
+ /* get the CPU */
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (policy)
+ kobject_get(&policy->kobj);
+ }
+
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ return policy;
+}
+EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
+
+/**
+ * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
+ * @policy: cpufreq policy returned by cpufreq_cpu_get().
+ */
+void cpufreq_cpu_put(struct cpufreq_policy *policy)
+{
+ kobject_put(&policy->kobj);
+}
+EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
+
+/**
+ * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
+ * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
+ */
+void cpufreq_cpu_release(struct cpufreq_policy *policy)
+{
+ if (WARN_ON(!policy))
+ return;
+
+ lockdep_assert_held(&policy->rwsem);
+
+ up_write(&policy->rwsem);
+
+ cpufreq_cpu_put(policy);
+}
+
+/**
+ * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
+ * @cpu: CPU to find the policy for.
+ *
+ * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
+ * if the policy returned by it is not NULL, acquire its rwsem for writing.
+ * Return the policy if it is active or release it and return NULL otherwise.
+ *
+ * The policy returned by this function has to be released with the help of
+ * cpufreq_cpu_release() in order to release its rwsem and balance its usage
+ * counter properly.
+ */
+struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+ if (!policy)
+ return NULL;
+
+ down_write(&policy->rwsem);
+
+ if (policy_is_inactive(policy)) {
+ cpufreq_cpu_release(policy);
+ return NULL;
+ }
+
+ return policy;
+}
+
+/*********************************************************************
+ * EXTERNALLY AFFECTING FREQUENCY CHANGES *
+ *********************************************************************/
+
+/**
+ * adjust_jiffies - Adjust the system "loops_per_jiffy".
+ * @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
+ * @ci: Frequency change information.
+ *
+ * This function alters the system "loops_per_jiffy" for the clock
+ * speed change. Note that loops_per_jiffy cannot be updated on SMP
+ * systems as each CPU might be scaled differently. So, use the arch
+ * per-CPU loops_per_jiffy value wherever possible.
+ */
+static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
+{
+#ifndef CONFIG_SMP
+ static unsigned long l_p_j_ref;
+ static unsigned int l_p_j_ref_freq;
+
+ if (ci->flags & CPUFREQ_CONST_LOOPS)
+ return;
+
+ if (!l_p_j_ref_freq) {
+ l_p_j_ref = loops_per_jiffy;
+ l_p_j_ref_freq = ci->old;
+ pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
+ l_p_j_ref, l_p_j_ref_freq);
+ }
+ if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
+ loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
+ ci->new);
+ pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
+ loops_per_jiffy, ci->new);
+ }
+#endif
+}
+
+/**
+ * cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
+ * @policy: cpufreq policy to enable fast frequency switching for.
+ * @freqs: contain details of the frequency update.
+ * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
+ *
+ * This function calls the transition notifiers and adjust_jiffies().
+ *
+ * It is called twice on all CPU frequency changes that have external effects.
+ */
+static void cpufreq_notify_transition(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs,
+ unsigned int state)
+{
+ int cpu;
+
+ BUG_ON(irqs_disabled());
+
+ if (cpufreq_disabled())
+ return;
+
+ freqs->policy = policy;
+ freqs->flags = cpufreq_driver->flags;
+ pr_debug("notification %u of frequency transition to %u kHz\n",
+ state, freqs->new);
+
+ switch (state) {
+ case CPUFREQ_PRECHANGE:
+ /*
+ * Detect if the driver reported a value as "old frequency"
+ * which is not equal to what the cpufreq core thinks is
+ * "old frequency".
+ */
+ if (policy->cur && policy->cur != freqs->old) {
+ pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
+ freqs->old, policy->cur);
+ freqs->old = policy->cur;
+ }
+
+ srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
+ CPUFREQ_PRECHANGE, freqs);
+
+ adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
+ break;
+
+ case CPUFREQ_POSTCHANGE:
+ adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
+ pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
+ cpumask_pr_args(policy->cpus));
+
+ for_each_cpu(cpu, policy->cpus)
+ trace_cpu_frequency(freqs->new, cpu);
+
+ srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
+ CPUFREQ_POSTCHANGE, freqs);
+
+ cpufreq_stats_record_transition(policy, freqs->new);
+ policy->cur = freqs->new;
+ }
+}
+
+/* Do post notifications when there are chances that transition has failed */
+static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, int transition_failed)
+{
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
+ if (!transition_failed)
+ return;
+
+ swap(freqs->old, freqs->new);
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
+}
+
+void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs)
+{
+
+ /*
+ * Catch double invocations of _begin() which lead to self-deadlock.
+ * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
+ * doesn't invoke _begin() on their behalf, and hence the chances of
+ * double invocations are very low. Moreover, there are scenarios
+ * where these checks can emit false-positive warnings in these
+ * drivers; so we avoid that by skipping them altogether.
+ */
+ WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
+ && current == policy->transition_task);
+
+wait:
+ wait_event(policy->transition_wait, !policy->transition_ongoing);
+
+ spin_lock(&policy->transition_lock);
+
+ if (unlikely(policy->transition_ongoing)) {
+ spin_unlock(&policy->transition_lock);
+ goto wait;
+ }
+
+ policy->transition_ongoing = true;
+ policy->transition_task = current;
+
+ spin_unlock(&policy->transition_lock);
+
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
+}
+EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
+
+void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, int transition_failed)
+{
+ if (WARN_ON(!policy->transition_ongoing))
+ return;
+
+ cpufreq_notify_post_transition(policy, freqs, transition_failed);
+
+ arch_set_freq_scale(policy->related_cpus,
+ policy->cur,
+ policy->cpuinfo.max_freq);
+
+ spin_lock(&policy->transition_lock);
+ policy->transition_ongoing = false;
+ policy->transition_task = NULL;
+ spin_unlock(&policy->transition_lock);
+
+ wake_up(&policy->transition_wait);
+}
+EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
+
+/*
+ * Fast frequency switching status count. Positive means "enabled", negative
+ * means "disabled" and 0 means "not decided yet".
+ */
+static int cpufreq_fast_switch_count;
+static DEFINE_MUTEX(cpufreq_fast_switch_lock);
+
+static void cpufreq_list_transition_notifiers(void)
+{
+ struct notifier_block *nb;
+
+ pr_info("Registered transition notifiers:\n");
+
+ mutex_lock(&cpufreq_transition_notifier_list.mutex);
+
+ for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
+ pr_info("%pS\n", nb->notifier_call);
+
+ mutex_unlock(&cpufreq_transition_notifier_list.mutex);
+}
+
+/**
+ * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
+ * @policy: cpufreq policy to enable fast frequency switching for.
+ *
+ * Try to enable fast frequency switching for @policy.
+ *
+ * The attempt will fail if there is at least one transition notifier registered
+ * at this point, as fast frequency switching is quite fundamentally at odds
+ * with transition notifiers. Thus if successful, it will make registration of
+ * transition notifiers fail going forward.
+ */
+void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
+{
+ lockdep_assert_held(&policy->rwsem);
+
+ if (!policy->fast_switch_possible)
+ return;
+
+ mutex_lock(&cpufreq_fast_switch_lock);
+ if (cpufreq_fast_switch_count >= 0) {
+ cpufreq_fast_switch_count++;
+ policy->fast_switch_enabled = true;
+ } else {
+ pr_warn("CPU%u: Fast frequency switching not enabled\n",
+ policy->cpu);
+ cpufreq_list_transition_notifiers();
+ }
+ mutex_unlock(&cpufreq_fast_switch_lock);
+}
+EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
+
+/**
+ * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
+ * @policy: cpufreq policy to disable fast frequency switching for.
+ */
+void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
+{
+ mutex_lock(&cpufreq_fast_switch_lock);
+ if (policy->fast_switch_enabled) {
+ policy->fast_switch_enabled = false;
+ if (!WARN_ON(cpufreq_fast_switch_count <= 0))
+ cpufreq_fast_switch_count--;
+ }
+ mutex_unlock(&cpufreq_fast_switch_lock);
+}
+EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
+
+static unsigned int __resolve_freq(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ unsigned int idx;
+
+ target_freq = clamp_val(target_freq, policy->min, policy->max);
+
+ if (!policy->freq_table)
+ return target_freq;
+
+ idx = cpufreq_frequency_table_target(policy, target_freq, relation);
+ policy->cached_resolved_idx = idx;
+ policy->cached_target_freq = target_freq;
+ return policy->freq_table[idx].frequency;
+}
+
+/**
+ * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
+ * one.
+ * @policy: associated policy to interrogate
+ * @target_freq: target frequency to resolve.
+ *
+ * The target to driver frequency mapping is cached in the policy.
+ *
+ * Return: Lowest driver-supported frequency greater than or equal to the
+ * given target_freq, subject to policy (min/max) and driver limitations.
+ */
+unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
+
+unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
+{
+ unsigned int latency;
+
+ if (policy->transition_delay_us)
+ return policy->transition_delay_us;
+
+ latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
+ if (latency) {
+ /*
+ * For platforms that can change the frequency very fast (< 10
+ * us), the above formula gives a decent transition delay. But
+ * for platforms where transition_latency is in milliseconds, it
+ * ends up giving unrealistic values.
+ *
+ * Cap the default transition delay to 10 ms, which seems to be
+ * a reasonable amount of time after which we should reevaluate
+ * the frequency.
+ */
+ return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
+ }
+
+ return LATENCY_MULTIPLIER;
+}
+EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
+
+/*********************************************************************
+ * SYSFS INTERFACE *
+ *********************************************************************/
+static ssize_t show_boost(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
+}
+
+static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret, enable;
+
+ ret = sscanf(buf, "%d", &enable);
+ if (ret != 1 || enable < 0 || enable > 1)
+ return -EINVAL;
+
+ if (cpufreq_boost_trigger_state(enable)) {
+ pr_err("%s: Cannot %s BOOST!\n",
+ __func__, enable ? "enable" : "disable");
+ return -EINVAL;
+ }
+
+ pr_debug("%s: cpufreq BOOST %s\n",
+ __func__, enable ? "enabled" : "disabled");
+
+ return count;
+}
+define_one_global_rw(boost);
+
+static struct cpufreq_governor *find_governor(const char *str_governor)
+{
+ struct cpufreq_governor *t;
+
+ for_each_governor(t)
+ if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
+ return t;
+
+ return NULL;
+}
+
+static struct cpufreq_governor *get_governor(const char *str_governor)
+{
+ struct cpufreq_governor *t;
+
+ mutex_lock(&cpufreq_governor_mutex);
+ t = find_governor(str_governor);
+ if (!t)
+ goto unlock;
+
+ if (!try_module_get(t->owner))
+ t = NULL;
+
+unlock:
+ mutex_unlock(&cpufreq_governor_mutex);
+
+ return t;
+}
+
+static unsigned int cpufreq_parse_policy(char *str_governor)
+{
+ if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
+ return CPUFREQ_POLICY_PERFORMANCE;
+
+ if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
+ return CPUFREQ_POLICY_POWERSAVE;
+
+ return CPUFREQ_POLICY_UNKNOWN;
+}
+
+/**
+ * cpufreq_parse_governor - parse a governor string only for has_target()
+ * @str_governor: Governor name.
+ */
+static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
+{
+ struct cpufreq_governor *t;
+
+ t = get_governor(str_governor);
+ if (t)
+ return t;
+
+ if (request_module("cpufreq_%s", str_governor))
+ return NULL;
+
+ return get_governor(str_governor);
+}
+
+/*
+ * cpufreq_per_cpu_attr_read() / show_##file_name() -
+ * print out cpufreq information
+ *
+ * Write out information from cpufreq_driver->policy[cpu]; object must be
+ * "unsigned int".
+ */
+
+#define show_one(file_name, object) \
+static ssize_t show_##file_name \
+(struct cpufreq_policy *policy, char *buf) \
+{ \
+ return sprintf(buf, "%u\n", policy->object); \
+}
+
+show_one(cpuinfo_min_freq, cpuinfo.min_freq);
+show_one(cpuinfo_max_freq, cpuinfo.max_freq);
+show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
+show_one(scaling_min_freq, min);
+show_one(scaling_max_freq, max);
+
+__weak unsigned int arch_freq_get_on_cpu(int cpu)
+{
+ return 0;
+}
+
+static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
+{
+ ssize_t ret;
+ unsigned int freq;
+
+ freq = arch_freq_get_on_cpu(policy->cpu);
+ if (freq)
+ ret = sprintf(buf, "%u\n", freq);
+ else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
+ ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
+ else
+ ret = sprintf(buf, "%u\n", policy->cur);
+ return ret;
+}
+
+/*
+ * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
+ */
+#define store_one(file_name, object) \
+static ssize_t store_##file_name \
+(struct cpufreq_policy *policy, const char *buf, size_t count) \
+{ \
+ unsigned long val; \
+ int ret; \
+ \
+ ret = sscanf(buf, "%lu", &val); \
+ if (ret != 1) \
+ return -EINVAL; \
+ \
+ ret = freq_qos_update_request(policy->object##_freq_req, val);\
+ return ret >= 0 ? count : ret; \
+}
+
+store_one(scaling_min_freq, min);
+store_one(scaling_max_freq, max);
+
+/*
+ * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
+ */
+static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
+ char *buf)
+{
+ unsigned int cur_freq = __cpufreq_get(policy);
+
+ if (cur_freq)
+ return sprintf(buf, "%u\n", cur_freq);
+
+ return sprintf(buf, "<unknown>\n");
+}
+
+/*
+ * show_scaling_governor - show the current policy for the specified CPU
+ */
+static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
+{
+ if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
+ return sprintf(buf, "powersave\n");
+ else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
+ return sprintf(buf, "performance\n");
+ else if (policy->governor)
+ return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
+ policy->governor->name);
+ return -EINVAL;
+}
+
+/*
+ * store_scaling_governor - store policy for the specified CPU
+ */
+static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ char str_governor[16];
+ int ret;
+
+ ret = sscanf(buf, "%15s", str_governor);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (cpufreq_driver->setpolicy) {
+ unsigned int new_pol;
+
+ new_pol = cpufreq_parse_policy(str_governor);
+ if (!new_pol)
+ return -EINVAL;
+
+ ret = cpufreq_set_policy(policy, NULL, new_pol);
+ } else {
+ struct cpufreq_governor *new_gov;
+
+ new_gov = cpufreq_parse_governor(str_governor);
+ if (!new_gov)
+ return -EINVAL;
+
+ ret = cpufreq_set_policy(policy, new_gov,
+ CPUFREQ_POLICY_UNKNOWN);
+
+ module_put(new_gov->owner);
+ }
+
+ return ret ? ret : count;
+}
+
+/*
+ * show_scaling_driver - show the cpufreq driver currently loaded
+ */
+static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
+{
+ return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
+}
+
+/*
+ * show_scaling_available_governors - show the available CPUfreq governors
+ */
+static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
+ char *buf)
+{
+ ssize_t i = 0;
+ struct cpufreq_governor *t;
+
+ if (!has_target()) {
+ i += sprintf(buf, "performance powersave");
+ goto out;
+ }
+
+ mutex_lock(&cpufreq_governor_mutex);
+ for_each_governor(t) {
+ if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
+ - (CPUFREQ_NAME_LEN + 2)))
+ break;
+ i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
+ }
+ mutex_unlock(&cpufreq_governor_mutex);
+out:
+ i += sprintf(&buf[i], "\n");
+ return i;
+}
+
+ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
+{
+ ssize_t i = 0;
+ unsigned int cpu;
+
+ for_each_cpu(cpu, mask) {
+ i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u ", cpu);
+ if (i >= (PAGE_SIZE - 5))
+ break;
+ }
+
+ /* Remove the extra space at the end */
+ i--;
+
+ i += sprintf(&buf[i], "\n");
+ return i;
+}
+EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
+
+/*
+ * show_related_cpus - show the CPUs affected by each transition even if
+ * hw coordination is in use
+ */
+static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
+{
+ return cpufreq_show_cpus(policy->related_cpus, buf);
+}
+
+/*
+ * show_affected_cpus - show the CPUs affected by each transition
+ */
+static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
+{
+ return cpufreq_show_cpus(policy->cpus, buf);
+}
+
+static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ unsigned int freq = 0;
+ unsigned int ret;
+
+ if (!policy->governor || !policy->governor->store_setspeed)
+ return -EINVAL;
+
+ ret = sscanf(buf, "%u", &freq);
+ if (ret != 1)
+ return -EINVAL;
+
+ policy->governor->store_setspeed(policy, freq);
+
+ return count;
+}
+
+static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
+{
+ if (!policy->governor || !policy->governor->show_setspeed)
+ return sprintf(buf, "<unsupported>\n");
+
+ return policy->governor->show_setspeed(policy, buf);
+}
+
+/*
+ * show_bios_limit - show the current cpufreq HW/BIOS limitation
+ */
+static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
+{
+ unsigned int limit;
+ int ret;
+ ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
+ if (!ret)
+ return sprintf(buf, "%u\n", limit);
+ return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
+}
+
+cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
+cpufreq_freq_attr_ro(cpuinfo_min_freq);
+cpufreq_freq_attr_ro(cpuinfo_max_freq);
+cpufreq_freq_attr_ro(cpuinfo_transition_latency);
+cpufreq_freq_attr_ro(scaling_available_governors);
+cpufreq_freq_attr_ro(scaling_driver);
+cpufreq_freq_attr_ro(scaling_cur_freq);
+cpufreq_freq_attr_ro(bios_limit);
+cpufreq_freq_attr_ro(related_cpus);
+cpufreq_freq_attr_ro(affected_cpus);
+cpufreq_freq_attr_rw(scaling_min_freq);
+cpufreq_freq_attr_rw(scaling_max_freq);
+cpufreq_freq_attr_rw(scaling_governor);
+cpufreq_freq_attr_rw(scaling_setspeed);
+
+static struct attribute *cpufreq_attrs[] = {
+ &cpuinfo_min_freq.attr,
+ &cpuinfo_max_freq.attr,
+ &cpuinfo_transition_latency.attr,
+ &scaling_min_freq.attr,
+ &scaling_max_freq.attr,
+ &affected_cpus.attr,
+ &related_cpus.attr,
+ &scaling_governor.attr,
+ &scaling_driver.attr,
+ &scaling_available_governors.attr,
+ &scaling_setspeed.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(cpufreq);
+
+#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
+#define to_attr(a) container_of(a, struct freq_attr, attr)
+
+static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct cpufreq_policy *policy = to_policy(kobj);
+ struct freq_attr *fattr = to_attr(attr);
+ ssize_t ret = -EBUSY;
+
+ if (!fattr->show)
+ return -EIO;
+
+ down_read(&policy->rwsem);
+ if (likely(!policy_is_inactive(policy)))
+ ret = fattr->show(policy, buf);
+ up_read(&policy->rwsem);
+
+ return ret;
+}
+
+static ssize_t store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cpufreq_policy *policy = to_policy(kobj);
+ struct freq_attr *fattr = to_attr(attr);
+ ssize_t ret = -EBUSY;
+
+ if (!fattr->store)
+ return -EIO;
+
+ down_write(&policy->rwsem);
+ if (likely(!policy_is_inactive(policy)))
+ ret = fattr->store(policy, buf, count);
+ up_write(&policy->rwsem);
+
+ return ret;
+}
+
+static void cpufreq_sysfs_release(struct kobject *kobj)
+{
+ struct cpufreq_policy *policy = to_policy(kobj);
+ pr_debug("last reference is dropped\n");
+ complete(&policy->kobj_unregister);
+}
+
+static const struct sysfs_ops sysfs_ops = {
+ .show = show,
+ .store = store,
+};
+
+static struct kobj_type ktype_cpufreq = {
+ .sysfs_ops = &sysfs_ops,
+ .default_groups = cpufreq_groups,
+ .release = cpufreq_sysfs_release,
+};
+
+static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
+ struct device *dev)
+{
+ if (unlikely(!dev))
+ return;
+
+ if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
+ return;
+
+ dev_dbg(dev, "%s: Adding symlink\n", __func__);
+ if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
+ dev_err(dev, "cpufreq symlink creation failed\n");
+}
+
+static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
+ struct device *dev)
+{
+ dev_dbg(dev, "%s: Removing symlink\n", __func__);
+ sysfs_remove_link(&dev->kobj, "cpufreq");
+ cpumask_clear_cpu(cpu, policy->real_cpus);
+}
+
+static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
+{
+ struct freq_attr **drv_attr;
+ int ret = 0;
+
+ /* set up files for this cpu device */
+ drv_attr = cpufreq_driver->attr;
+ while (drv_attr && *drv_attr) {
+ ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
+ if (ret)
+ return ret;
+ drv_attr++;
+ }
+ if (cpufreq_driver->get) {
+ ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
+ if (ret)
+ return ret;
+ }
+
+ ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
+ if (ret)
+ return ret;
+
+ if (cpufreq_driver->bios_limit) {
+ ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cpufreq_init_policy(struct cpufreq_policy *policy)
+{
+ struct cpufreq_governor *gov = NULL;
+ unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
+ int ret;
+
+ if (has_target()) {
+ /* Update policy governor to the one used before hotplug. */
+ gov = get_governor(policy->last_governor);
+ if (gov) {
+ pr_debug("Restoring governor %s for cpu %d\n",
+ gov->name, policy->cpu);
+ } else {
+ gov = get_governor(default_governor);
+ }
+
+ if (!gov) {
+ gov = cpufreq_default_governor();
+ __module_get(gov->owner);
+ }
+
+ } else {
+
+ /* Use the default policy if there is no last_policy. */
+ if (policy->last_policy) {
+ pol = policy->last_policy;
+ } else {
+ pol = cpufreq_parse_policy(default_governor);
+ /*
+ * In case the default governor is neither "performance"
+ * nor "powersave", fall back to the initial policy
+ * value set by the driver.
+ */
+ if (pol == CPUFREQ_POLICY_UNKNOWN)
+ pol = policy->policy;
+ }
+ if (pol != CPUFREQ_POLICY_PERFORMANCE &&
+ pol != CPUFREQ_POLICY_POWERSAVE)
+ return -ENODATA;
+ }
+
+ ret = cpufreq_set_policy(policy, gov, pol);
+ if (gov)
+ module_put(gov->owner);
+
+ return ret;
+}
+
+static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
+{
+ int ret = 0;
+
+ /* Has this CPU been taken care of already? */
+ if (cpumask_test_cpu(cpu, policy->cpus))
+ return 0;
+
+ down_write(&policy->rwsem);
+ if (has_target())
+ cpufreq_stop_governor(policy);
+
+ cpumask_set_cpu(cpu, policy->cpus);
+
+ if (has_target()) {
+ ret = cpufreq_start_governor(policy);
+ if (ret)
+ pr_err("%s: Failed to start governor\n", __func__);
+ }
+ up_write(&policy->rwsem);
+ return ret;
+}
+
+void refresh_frequency_limits(struct cpufreq_policy *policy)
+{
+ if (!policy_is_inactive(policy)) {
+ pr_debug("updating policy for CPU %u\n", policy->cpu);
+
+ cpufreq_set_policy(policy, policy->governor, policy->policy);
+ }
+}
+EXPORT_SYMBOL(refresh_frequency_limits);
+
+static void handle_update(struct work_struct *work)
+{
+ struct cpufreq_policy *policy =
+ container_of(work, struct cpufreq_policy, update);
+
+ pr_debug("handle_update for cpu %u called\n", policy->cpu);
+ down_write(&policy->rwsem);
+ refresh_frequency_limits(policy);
+ up_write(&policy->rwsem);
+}
+
+static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
+ void *data)
+{
+ struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
+
+ schedule_work(&policy->update);
+ return 0;
+}
+
+static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
+ void *data)
+{
+ struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
+
+ schedule_work(&policy->update);
+ return 0;
+}
+
+static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
+{
+ struct kobject *kobj;
+ struct completion *cmp;
+
+ down_write(&policy->rwsem);
+ cpufreq_stats_free_table(policy);
+ kobj = &policy->kobj;
+ cmp = &policy->kobj_unregister;
+ up_write(&policy->rwsem);
+ kobject_put(kobj);
+
+ /*
+ * We need to make sure that the underlying kobj is
+ * actually not referenced anymore by anybody before we
+ * proceed with unloading.
+ */
+ pr_debug("waiting for dropping of refcount\n");
+ wait_for_completion(cmp);
+ pr_debug("wait complete\n");
+}
+
+static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ struct device *dev = get_cpu_device(cpu);
+ int ret;
+
+ if (!dev)
+ return NULL;
+
+ policy = kzalloc(sizeof(*policy), GFP_KERNEL);
+ if (!policy)
+ return NULL;
+
+ if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
+ goto err_free_policy;
+
+ if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
+ goto err_free_cpumask;
+
+ if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
+ goto err_free_rcpumask;
+
+ init_completion(&policy->kobj_unregister);
+ ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
+ cpufreq_global_kobject, "policy%u", cpu);
+ if (ret) {
+ dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
+ /*
+ * The entire policy object will be freed below, but the extra
+ * memory allocated for the kobject name needs to be freed by
+ * releasing the kobject.
+ */
+ kobject_put(&policy->kobj);
+ goto err_free_real_cpus;
+ }
+
+ freq_constraints_init(&policy->constraints);
+
+ policy->nb_min.notifier_call = cpufreq_notifier_min;
+ policy->nb_max.notifier_call = cpufreq_notifier_max;
+
+ ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
+ &policy->nb_min);
+ if (ret) {
+ dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
+ ret, cpumask_pr_args(policy->cpus));
+ goto err_kobj_remove;
+ }
+
+ ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
+ &policy->nb_max);
+ if (ret) {
+ dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
+ ret, cpumask_pr_args(policy->cpus));
+ goto err_min_qos_notifier;
+ }
+
+ INIT_LIST_HEAD(&policy->policy_list);
+ init_rwsem(&policy->rwsem);
+ spin_lock_init(&policy->transition_lock);
+ init_waitqueue_head(&policy->transition_wait);
+ INIT_WORK(&policy->update, handle_update);
+
+ policy->cpu = cpu;
+ return policy;
+
+err_min_qos_notifier:
+ freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
+ &policy->nb_min);
+err_kobj_remove:
+ cpufreq_policy_put_kobj(policy);
+err_free_real_cpus:
+ free_cpumask_var(policy->real_cpus);
+err_free_rcpumask:
+ free_cpumask_var(policy->related_cpus);
+err_free_cpumask:
+ free_cpumask_var(policy->cpus);
+err_free_policy:
+ kfree(policy);
+
+ return NULL;
+}
+
+static void cpufreq_policy_free(struct cpufreq_policy *policy)
+{
+ unsigned long flags;
+ int cpu;
+
+ /*
+ * The callers must ensure the policy is inactive by now, to avoid any
+ * races with show()/store() callbacks.
+ */
+ if (unlikely(!policy_is_inactive(policy)))
+ pr_warn("%s: Freeing active policy\n", __func__);
+
+ /* Remove policy from list */
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ list_del(&policy->policy_list);
+
+ for_each_cpu(cpu, policy->related_cpus)
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
+ &policy->nb_max);
+ freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
+ &policy->nb_min);
+
+ /* Cancel any pending policy->update work before freeing the policy. */
+ cancel_work_sync(&policy->update);
+
+ if (policy->max_freq_req) {
+ /*
+ * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
+ * notification, since CPUFREQ_CREATE_POLICY notification was
+ * sent after adding max_freq_req earlier.
+ */
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_REMOVE_POLICY, policy);
+ freq_qos_remove_request(policy->max_freq_req);
+ }
+
+ freq_qos_remove_request(policy->min_freq_req);
+ kfree(policy->min_freq_req);
+
+ cpufreq_policy_put_kobj(policy);
+ free_cpumask_var(policy->real_cpus);
+ free_cpumask_var(policy->related_cpus);
+ free_cpumask_var(policy->cpus);
+ kfree(policy);
+}
+
+static int cpufreq_online(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ bool new_policy;
+ unsigned long flags;
+ unsigned int j;
+ int ret;
+
+ pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
+
+ /* Check if this CPU already has a policy to manage it */
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (policy) {
+ WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
+ if (!policy_is_inactive(policy))
+ return cpufreq_add_policy_cpu(policy, cpu);
+
+ /* This is the only online CPU for the policy. Start over. */
+ new_policy = false;
+ down_write(&policy->rwsem);
+ policy->cpu = cpu;
+ policy->governor = NULL;
+ } else {
+ new_policy = true;
+ policy = cpufreq_policy_alloc(cpu);
+ if (!policy)
+ return -ENOMEM;
+ down_write(&policy->rwsem);
+ }
+
+ if (!new_policy && cpufreq_driver->online) {
+ /* Recover policy->cpus using related_cpus */
+ cpumask_copy(policy->cpus, policy->related_cpus);
+
+ ret = cpufreq_driver->online(policy);
+ if (ret) {
+ pr_debug("%s: %d: initialization failed\n", __func__,
+ __LINE__);
+ goto out_exit_policy;
+ }
+ } else {
+ cpumask_copy(policy->cpus, cpumask_of(cpu));
+
+ /*
+ * Call driver. From then on the cpufreq must be able
+ * to accept all calls to ->verify and ->setpolicy for this CPU.
+ */
+ ret = cpufreq_driver->init(policy);
+ if (ret) {
+ pr_debug("%s: %d: initialization failed\n", __func__,
+ __LINE__);
+ goto out_free_policy;
+ }
+
+ /*
+ * The initialization has succeeded and the policy is online.
+ * If there is a problem with its frequency table, take it
+ * offline and drop it.
+ */
+ ret = cpufreq_table_validate_and_sort(policy);
+ if (ret)
+ goto out_offline_policy;
+
+ /* related_cpus should at least include policy->cpus. */
+ cpumask_copy(policy->related_cpus, policy->cpus);
+ }
+
+ /*
+ * affected cpus must always be the one, which are online. We aren't
+ * managing offline cpus here.
+ */
+ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
+
+ if (new_policy) {
+ for_each_cpu(j, policy->related_cpus) {
+ per_cpu(cpufreq_cpu_data, j) = policy;
+ add_cpu_dev_symlink(policy, j, get_cpu_device(j));
+ }
+
+ policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
+ GFP_KERNEL);
+ if (!policy->min_freq_req) {
+ ret = -ENOMEM;
+ goto out_destroy_policy;
+ }
+
+ ret = freq_qos_add_request(&policy->constraints,
+ policy->min_freq_req, FREQ_QOS_MIN,
+ FREQ_QOS_MIN_DEFAULT_VALUE);
+ if (ret < 0) {
+ /*
+ * So we don't call freq_qos_remove_request() for an
+ * uninitialized request.
+ */
+ kfree(policy->min_freq_req);
+ policy->min_freq_req = NULL;
+ goto out_destroy_policy;
+ }
+
+ /*
+ * This must be initialized right here to avoid calling
+ * freq_qos_remove_request() on uninitialized request in case
+ * of errors.
+ */
+ policy->max_freq_req = policy->min_freq_req + 1;
+
+ ret = freq_qos_add_request(&policy->constraints,
+ policy->max_freq_req, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ if (ret < 0) {
+ policy->max_freq_req = NULL;
+ goto out_destroy_policy;
+ }
+
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_CREATE_POLICY, policy);
+ }
+
+ if (cpufreq_driver->get && has_target()) {
+ policy->cur = cpufreq_driver->get(policy->cpu);
+ if (!policy->cur) {
+ ret = -EIO;
+ pr_err("%s: ->get() failed\n", __func__);
+ goto out_destroy_policy;
+ }
+ }
+
+ /*
+ * Sometimes boot loaders set CPU frequency to a value outside of
+ * frequency table present with cpufreq core. In such cases CPU might be
+ * unstable if it has to run on that frequency for long duration of time
+ * and so its better to set it to a frequency which is specified in
+ * freq-table. This also makes cpufreq stats inconsistent as
+ * cpufreq-stats would fail to register because current frequency of CPU
+ * isn't found in freq-table.
+ *
+ * Because we don't want this change to effect boot process badly, we go
+ * for the next freq which is >= policy->cur ('cur' must be set by now,
+ * otherwise we will end up setting freq to lowest of the table as 'cur'
+ * is initialized to zero).
+ *
+ * We are passing target-freq as "policy->cur - 1" otherwise
+ * __cpufreq_driver_target() would simply fail, as policy->cur will be
+ * equal to target-freq.
+ */
+ if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
+ && has_target()) {
+ unsigned int old_freq = policy->cur;
+
+ /* Are we running at unknown frequency ? */
+ ret = cpufreq_frequency_table_get_index(policy, old_freq);
+ if (ret == -EINVAL) {
+ ret = __cpufreq_driver_target(policy, old_freq - 1,
+ CPUFREQ_RELATION_L);
+
+ /*
+ * Reaching here after boot in a few seconds may not
+ * mean that system will remain stable at "unknown"
+ * frequency for longer duration. Hence, a BUG_ON().
+ */
+ BUG_ON(ret);
+ pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
+ __func__, policy->cpu, old_freq, policy->cur);
+ }
+ }
+
+ if (new_policy) {
+ ret = cpufreq_add_dev_interface(policy);
+ if (ret)
+ goto out_destroy_policy;
+
+ cpufreq_stats_create_table(policy);
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ list_add(&policy->policy_list, &cpufreq_policy_list);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ /*
+ * Register with the energy model before
+ * sched_cpufreq_governor_change() is called, which will result
+ * in rebuilding of the sched domains, which should only be done
+ * once the energy model is properly initialized for the policy
+ * first.
+ *
+ * Also, this should be called before the policy is registered
+ * with cooling framework.
+ */
+ if (cpufreq_driver->register_em)
+ cpufreq_driver->register_em(policy);
+ }
+
+ ret = cpufreq_init_policy(policy);
+ if (ret) {
+ pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
+ __func__, cpu, ret);
+ goto out_destroy_policy;
+ }
+
+ up_write(&policy->rwsem);
+
+ kobject_uevent(&policy->kobj, KOBJ_ADD);
+
+ /* Callback for handling stuff after policy is ready */
+ if (cpufreq_driver->ready)
+ cpufreq_driver->ready(policy);
+
+ if (cpufreq_thermal_control_enabled(cpufreq_driver))
+ policy->cdev = of_cpufreq_cooling_register(policy);
+
+ pr_debug("initialization complete\n");
+
+ return 0;
+
+out_destroy_policy:
+ for_each_cpu(j, policy->real_cpus)
+ remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
+
+out_offline_policy:
+ if (cpufreq_driver->offline)
+ cpufreq_driver->offline(policy);
+
+out_exit_policy:
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+
+out_free_policy:
+ cpumask_clear(policy->cpus);
+ up_write(&policy->rwsem);
+
+ cpufreq_policy_free(policy);
+ return ret;
+}
+
+/**
+ * cpufreq_add_dev - the cpufreq interface for a CPU device.
+ * @dev: CPU device.
+ * @sif: Subsystem interface structure pointer (not used)
+ */
+static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+{
+ struct cpufreq_policy *policy;
+ unsigned cpu = dev->id;
+ int ret;
+
+ dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
+
+ if (cpu_online(cpu)) {
+ ret = cpufreq_online(cpu);
+ if (ret)
+ return ret;
+ }
+
+ /* Create sysfs link on CPU registration */
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (policy)
+ add_cpu_dev_symlink(policy, cpu, dev);
+
+ return 0;
+}
+
+static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
+{
+ int ret;
+
+ if (has_target())
+ cpufreq_stop_governor(policy);
+
+ cpumask_clear_cpu(cpu, policy->cpus);
+
+ if (!policy_is_inactive(policy)) {
+ /* Nominate a new CPU if necessary. */
+ if (cpu == policy->cpu)
+ policy->cpu = cpumask_any(policy->cpus);
+
+ /* Start the governor again for the active policy. */
+ if (has_target()) {
+ ret = cpufreq_start_governor(policy);
+ if (ret)
+ pr_err("%s: Failed to start governor\n", __func__);
+ }
+
+ return;
+ }
+
+ if (has_target())
+ strncpy(policy->last_governor, policy->governor->name,
+ CPUFREQ_NAME_LEN);
+ else
+ policy->last_policy = policy->policy;
+
+ if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
+ cpufreq_cooling_unregister(policy->cdev);
+ policy->cdev = NULL;
+ }
+
+ if (has_target())
+ cpufreq_exit_governor(policy);
+
+ /*
+ * Perform the ->offline() during light-weight tear-down, as
+ * that allows fast recovery when the CPU comes back.
+ */
+ if (cpufreq_driver->offline) {
+ cpufreq_driver->offline(policy);
+ } else if (cpufreq_driver->exit) {
+ cpufreq_driver->exit(policy);
+ policy->freq_table = NULL;
+ }
+}
+
+static int cpufreq_offline(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+
+ pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (!policy) {
+ pr_debug("%s: No cpu_data found\n", __func__);
+ return 0;
+ }
+
+ down_write(&policy->rwsem);
+
+ __cpufreq_offline(cpu, policy);
+
+ up_write(&policy->rwsem);
+ return 0;
+}
+
+/*
+ * cpufreq_remove_dev - remove a CPU device
+ *
+ * Removes the cpufreq interface for a CPU device.
+ */
+static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
+{
+ unsigned int cpu = dev->id;
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+
+ if (!policy)
+ return;
+
+ down_write(&policy->rwsem);
+
+ if (cpu_online(cpu))
+ __cpufreq_offline(cpu, policy);
+
+ remove_cpu_dev_symlink(policy, cpu, dev);
+
+ if (!cpumask_empty(policy->real_cpus)) {
+ up_write(&policy->rwsem);
+ return;
+ }
+
+ /* We did light-weight exit earlier, do full tear down now */
+ if (cpufreq_driver->offline)
+ cpufreq_driver->exit(policy);
+
+ up_write(&policy->rwsem);
+
+ cpufreq_policy_free(policy);
+}
+
+/**
+ * cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
+ * @policy: Policy managing CPUs.
+ * @new_freq: New CPU frequency.
+ *
+ * Adjust to the current frequency first and clean up later by either calling
+ * cpufreq_update_policy(), or scheduling handle_update().
+ */
+static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
+ unsigned int new_freq)
+{
+ struct cpufreq_freqs freqs;
+
+ pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
+ policy->cur, new_freq);
+
+ freqs.old = policy->cur;
+ freqs.new = new_freq;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+}
+
+static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
+{
+ unsigned int new_freq;
+
+ new_freq = cpufreq_driver->get(policy->cpu);
+ if (!new_freq)
+ return 0;
+
+ /*
+ * If fast frequency switching is used with the given policy, the check
+ * against policy->cur is pointless, so skip it in that case.
+ */
+ if (policy->fast_switch_enabled || !has_target())
+ return new_freq;
+
+ if (policy->cur != new_freq) {
+ /*
+ * For some platforms, the frequency returned by hardware may be
+ * slightly different from what is provided in the frequency
+ * table, for example hardware may return 499 MHz instead of 500
+ * MHz. In such cases it is better to avoid getting into
+ * unnecessary frequency updates.
+ */
+ if (abs(policy->cur - new_freq) < KHZ_PER_MHZ)
+ return policy->cur;
+
+ cpufreq_out_of_sync(policy, new_freq);
+ if (update)
+ schedule_work(&policy->update);
+ }
+
+ return new_freq;
+}
+
+/**
+ * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
+ * @cpu: CPU number
+ *
+ * This is the last known freq, without actually getting it from the driver.
+ * Return value will be same as what is shown in scaling_cur_freq in sysfs.
+ */
+unsigned int cpufreq_quick_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ unsigned int ret_freq = 0;
+ unsigned long flags;
+
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
+ ret_freq = cpufreq_driver->get(cpu);
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ return ret_freq;
+ }
+
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ policy = cpufreq_cpu_get(cpu);
+ if (policy) {
+ ret_freq = policy->cur;
+ cpufreq_cpu_put(policy);
+ }
+
+ return ret_freq;
+}
+EXPORT_SYMBOL(cpufreq_quick_get);
+
+/**
+ * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
+ * @cpu: CPU number
+ *
+ * Just return the max possible frequency for a given CPU.
+ */
+unsigned int cpufreq_quick_get_max(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ unsigned int ret_freq = 0;
+
+ if (policy) {
+ ret_freq = policy->max;
+ cpufreq_cpu_put(policy);
+ }
+
+ return ret_freq;
+}
+EXPORT_SYMBOL(cpufreq_quick_get_max);
+
+/**
+ * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
+ * @cpu: CPU number
+ *
+ * The default return value is the max_freq field of cpuinfo.
+ */
+__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ unsigned int ret_freq = 0;
+
+ if (policy) {
+ ret_freq = policy->cpuinfo.max_freq;
+ cpufreq_cpu_put(policy);
+ }
+
+ return ret_freq;
+}
+EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
+
+static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
+{
+ if (unlikely(policy_is_inactive(policy)))
+ return 0;
+
+ return cpufreq_verify_current_freq(policy, true);
+}
+
+/**
+ * cpufreq_get - get the current CPU frequency (in kHz)
+ * @cpu: CPU number
+ *
+ * Get the CPU current (static) CPU frequency
+ */
+unsigned int cpufreq_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ unsigned int ret_freq = 0;
+
+ if (policy) {
+ down_read(&policy->rwsem);
+ if (cpufreq_driver->get)
+ ret_freq = __cpufreq_get(policy);
+ up_read(&policy->rwsem);
+
+ cpufreq_cpu_put(policy);
+ }
+
+ return ret_freq;
+}
+EXPORT_SYMBOL(cpufreq_get);
+
+static struct subsys_interface cpufreq_interface = {
+ .name = "cpufreq",
+ .subsys = &cpu_subsys,
+ .add_dev = cpufreq_add_dev,
+ .remove_dev = cpufreq_remove_dev,
+};
+
+/*
+ * In case platform wants some specific frequency to be configured
+ * during suspend..
+ */
+int cpufreq_generic_suspend(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ if (!policy->suspend_freq) {
+ pr_debug("%s: suspend_freq not defined\n", __func__);
+ return 0;
+ }
+
+ pr_debug("%s: Setting suspend-freq: %u\n", __func__,
+ policy->suspend_freq);
+
+ ret = __cpufreq_driver_target(policy, policy->suspend_freq,
+ CPUFREQ_RELATION_H);
+ if (ret)
+ pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
+ __func__, policy->suspend_freq, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(cpufreq_generic_suspend);
+
+/**
+ * cpufreq_suspend() - Suspend CPUFreq governors.
+ *
+ * Called during system wide Suspend/Hibernate cycles for suspending governors
+ * as some platforms can't change frequency after this point in suspend cycle.
+ * Because some of the devices (like: i2c, regulators, etc) they use for
+ * changing frequency are suspended quickly after this point.
+ */
+void cpufreq_suspend(void)
+{
+ struct cpufreq_policy *policy;
+
+ if (!cpufreq_driver)
+ return;
+
+ if (!has_target() && !cpufreq_driver->suspend)
+ goto suspend;
+
+ pr_debug("%s: Suspending Governors\n", __func__);
+
+ for_each_active_policy(policy) {
+ if (has_target()) {
+ down_write(&policy->rwsem);
+ cpufreq_stop_governor(policy);
+ up_write(&policy->rwsem);
+ }
+
+ if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
+ pr_err("%s: Failed to suspend driver: %s\n", __func__,
+ cpufreq_driver->name);
+ }
+
+suspend:
+ cpufreq_suspended = true;
+}
+
+/**
+ * cpufreq_resume() - Resume CPUFreq governors.
+ *
+ * Called during system wide Suspend/Hibernate cycle for resuming governors that
+ * are suspended with cpufreq_suspend().
+ */
+void cpufreq_resume(void)
+{
+ struct cpufreq_policy *policy;
+ int ret;
+
+ if (!cpufreq_driver)
+ return;
+
+ if (unlikely(!cpufreq_suspended))
+ return;
+
+ cpufreq_suspended = false;
+
+ if (!has_target() && !cpufreq_driver->resume)
+ return;
+
+ pr_debug("%s: Resuming Governors\n", __func__);
+
+ for_each_active_policy(policy) {
+ if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
+ pr_err("%s: Failed to resume driver: %p\n", __func__,
+ policy);
+ } else if (has_target()) {
+ down_write(&policy->rwsem);
+ ret = cpufreq_start_governor(policy);
+ up_write(&policy->rwsem);
+
+ if (ret)
+ pr_err("%s: Failed to start governor for policy: %p\n",
+ __func__, policy);
+ }
+ }
+}
+
+/**
+ * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
+ * @flags: Flags to test against the current cpufreq driver's flags.
+ *
+ * Assumes that the driver is there, so callers must ensure that this is the
+ * case.
+ */
+bool cpufreq_driver_test_flags(u16 flags)
+{
+ return !!(cpufreq_driver->flags & flags);
+}
+
+/**
+ * cpufreq_get_current_driver - Return the current driver's name.
+ *
+ * Return the name string of the currently registered cpufreq driver or NULL if
+ * none.
+ */
+const char *cpufreq_get_current_driver(void)
+{
+ if (cpufreq_driver)
+ return cpufreq_driver->name;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
+
+/**
+ * cpufreq_get_driver_data - Return current driver data.
+ *
+ * Return the private data of the currently registered cpufreq driver, or NULL
+ * if no cpufreq driver has been registered.
+ */
+void *cpufreq_get_driver_data(void)
+{
+ if (cpufreq_driver)
+ return cpufreq_driver->driver_data;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
+
+/*********************************************************************
+ * NOTIFIER LISTS INTERFACE *
+ *********************************************************************/
+
+/**
+ * cpufreq_register_notifier - Register a notifier with cpufreq.
+ * @nb: notifier function to register.
+ * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
+ *
+ * Add a notifier to one of two lists: either a list of notifiers that run on
+ * clock rate changes (once before and once after every transition), or a list
+ * of notifiers that ron on cpufreq policy changes.
+ *
+ * This function may sleep and it has the same return values as
+ * blocking_notifier_chain_register().
+ */
+int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
+{
+ int ret;
+
+ if (cpufreq_disabled())
+ return -EINVAL;
+
+ switch (list) {
+ case CPUFREQ_TRANSITION_NOTIFIER:
+ mutex_lock(&cpufreq_fast_switch_lock);
+
+ if (cpufreq_fast_switch_count > 0) {
+ mutex_unlock(&cpufreq_fast_switch_lock);
+ return -EBUSY;
+ }
+ ret = srcu_notifier_chain_register(
+ &cpufreq_transition_notifier_list, nb);
+ if (!ret)
+ cpufreq_fast_switch_count--;
+
+ mutex_unlock(&cpufreq_fast_switch_lock);
+ break;
+ case CPUFREQ_POLICY_NOTIFIER:
+ ret = blocking_notifier_chain_register(
+ &cpufreq_policy_notifier_list, nb);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cpufreq_register_notifier);
+
+/**
+ * cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
+ * @nb: notifier block to be unregistered.
+ * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
+ *
+ * Remove a notifier from one of the cpufreq notifier lists.
+ *
+ * This function may sleep and it has the same return values as
+ * blocking_notifier_chain_unregister().
+ */
+int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
+{
+ int ret;
+
+ if (cpufreq_disabled())
+ return -EINVAL;
+
+ switch (list) {
+ case CPUFREQ_TRANSITION_NOTIFIER:
+ mutex_lock(&cpufreq_fast_switch_lock);
+
+ ret = srcu_notifier_chain_unregister(
+ &cpufreq_transition_notifier_list, nb);
+ if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
+ cpufreq_fast_switch_count++;
+
+ mutex_unlock(&cpufreq_fast_switch_lock);
+ break;
+ case CPUFREQ_POLICY_NOTIFIER:
+ ret = blocking_notifier_chain_unregister(
+ &cpufreq_policy_notifier_list, nb);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cpufreq_unregister_notifier);
+
+
+/*********************************************************************
+ * GOVERNORS *
+ *********************************************************************/
+
+/**
+ * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
+ * @policy: cpufreq policy to switch the frequency for.
+ * @target_freq: New frequency to set (may be approximate).
+ *
+ * Carry out a fast frequency switch without sleeping.
+ *
+ * The driver's ->fast_switch() callback invoked by this function must be
+ * suitable for being called from within RCU-sched read-side critical sections
+ * and it is expected to select the minimum available frequency greater than or
+ * equal to @target_freq (CPUFREQ_RELATION_L).
+ *
+ * This function must not be called if policy->fast_switch_enabled is unset.
+ *
+ * Governors calling this function must guarantee that it will never be invoked
+ * twice in parallel for the same policy and that it will never be called in
+ * parallel with either ->target() or ->target_index() for the same policy.
+ *
+ * Returns the actual frequency set for the CPU.
+ *
+ * If 0 is returned by the driver's ->fast_switch() callback to indicate an
+ * error condition, the hardware configuration must be preserved.
+ */
+unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ unsigned int freq;
+ int cpu;
+
+ target_freq = clamp_val(target_freq, policy->min, policy->max);
+ freq = cpufreq_driver->fast_switch(policy, target_freq);
+
+ if (!freq)
+ return 0;
+
+ policy->cur = freq;
+ arch_set_freq_scale(policy->related_cpus, freq,
+ policy->cpuinfo.max_freq);
+ cpufreq_stats_record_transition(policy, freq);
+
+ if (trace_cpu_frequency_enabled()) {
+ for_each_cpu(cpu, policy->cpus)
+ trace_cpu_frequency(freq, cpu);
+ }
+
+ return freq;
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
+
+/**
+ * cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
+ * @cpu: Target CPU.
+ * @min_perf: Minimum (required) performance level (units of @capacity).
+ * @target_perf: Target (desired) performance level (units of @capacity).
+ * @capacity: Capacity of the target CPU.
+ *
+ * Carry out a fast performance level switch of @cpu without sleeping.
+ *
+ * The driver's ->adjust_perf() callback invoked by this function must be
+ * suitable for being called from within RCU-sched read-side critical sections
+ * and it is expected to select a suitable performance level equal to or above
+ * @min_perf and preferably equal to or below @target_perf.
+ *
+ * This function must not be called if policy->fast_switch_enabled is unset.
+ *
+ * Governors calling this function must guarantee that it will never be invoked
+ * twice in parallel for the same CPU and that it will never be called in
+ * parallel with either ->target() or ->target_index() or ->fast_switch() for
+ * the same CPU.
+ */
+void cpufreq_driver_adjust_perf(unsigned int cpu,
+ unsigned long min_perf,
+ unsigned long target_perf,
+ unsigned long capacity)
+{
+ cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
+}
+
+/**
+ * cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
+ *
+ * Return 'true' if the ->adjust_perf callback is present for the
+ * current driver or 'false' otherwise.
+ */
+bool cpufreq_driver_has_adjust_perf(void)
+{
+ return !!cpufreq_driver->adjust_perf;
+}
+
+/* Must set freqs->new to intermediate frequency */
+static int __target_intermediate(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, int index)
+{
+ int ret;
+
+ freqs->new = cpufreq_driver->get_intermediate(policy, index);
+
+ /* We don't need to switch to intermediate freq */
+ if (!freqs->new)
+ return 0;
+
+ pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
+ __func__, policy->cpu, freqs->old, freqs->new);
+
+ cpufreq_freq_transition_begin(policy, freqs);
+ ret = cpufreq_driver->target_intermediate(policy, index);
+ cpufreq_freq_transition_end(policy, freqs, ret);
+
+ if (ret)
+ pr_err("%s: Failed to change to intermediate frequency: %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int __target_index(struct cpufreq_policy *policy, int index)
+{
+ struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
+ unsigned int restore_freq, intermediate_freq = 0;
+ unsigned int newfreq = policy->freq_table[index].frequency;
+ int retval = -EINVAL;
+ bool notify;
+
+ if (newfreq == policy->cur)
+ return 0;
+
+ /* Save last value to restore later on errors */
+ restore_freq = policy->cur;
+
+ notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
+ if (notify) {
+ /* Handle switching to intermediate frequency */
+ if (cpufreq_driver->get_intermediate) {
+ retval = __target_intermediate(policy, &freqs, index);
+ if (retval)
+ return retval;
+
+ intermediate_freq = freqs.new;
+ /* Set old freq to intermediate */
+ if (intermediate_freq)
+ freqs.old = freqs.new;
+ }
+
+ freqs.new = newfreq;
+ pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
+ __func__, policy->cpu, freqs.old, freqs.new);
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ }
+
+ retval = cpufreq_driver->target_index(policy, index);
+ if (retval)
+ pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
+ retval);
+
+ if (notify) {
+ cpufreq_freq_transition_end(policy, &freqs, retval);
+
+ /*
+ * Failed after setting to intermediate freq? Driver should have
+ * reverted back to initial frequency and so should we. Check
+ * here for intermediate_freq instead of get_intermediate, in
+ * case we haven't switched to intermediate freq at all.
+ */
+ if (unlikely(retval && intermediate_freq)) {
+ freqs.old = intermediate_freq;
+ freqs.new = restore_freq;
+ cpufreq_freq_transition_begin(policy, &freqs);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+ }
+ }
+
+ return retval;
+}
+
+int __cpufreq_driver_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int old_target_freq = target_freq;
+
+ if (cpufreq_disabled())
+ return -ENODEV;
+
+ target_freq = __resolve_freq(policy, target_freq, relation);
+
+ pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
+ policy->cpu, target_freq, relation, old_target_freq);
+
+ /*
+ * This might look like a redundant call as we are checking it again
+ * after finding index. But it is left intentionally for cases where
+ * exactly same freq is called again and so we can save on few function
+ * calls.
+ */
+ if (target_freq == policy->cur &&
+ !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
+ return 0;
+
+ if (cpufreq_driver->target) {
+ /*
+ * If the driver hasn't setup a single inefficient frequency,
+ * it's unlikely it knows how to decode CPUFREQ_RELATION_E.
+ */
+ if (!policy->efficiencies_available)
+ relation &= ~CPUFREQ_RELATION_E;
+
+ return cpufreq_driver->target(policy, target_freq, relation);
+ }
+
+ if (!cpufreq_driver->target_index)
+ return -EINVAL;
+
+ return __target_index(policy, policy->cached_resolved_idx);
+}
+EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
+
+int cpufreq_driver_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ int ret;
+
+ down_write(&policy->rwsem);
+
+ ret = __cpufreq_driver_target(policy, target_freq, relation);
+
+ up_write(&policy->rwsem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_target);
+
+__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
+{
+ return NULL;
+}
+
+static int cpufreq_init_governor(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ /* Don't start any governor operations if we are entering suspend */
+ if (cpufreq_suspended)
+ return 0;
+ /*
+ * Governor might not be initiated here if ACPI _PPC changed
+ * notification happened, so check it.
+ */
+ if (!policy->governor)
+ return -EINVAL;
+
+ /* Platform doesn't want dynamic frequency switching ? */
+ if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
+ cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
+ struct cpufreq_governor *gov = cpufreq_fallback_governor();
+
+ if (gov) {
+ pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
+ policy->governor->name, gov->name);
+ policy->governor = gov;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ if (!try_module_get(policy->governor->owner))
+ return -EINVAL;
+
+ pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
+ if (policy->governor->init) {
+ ret = policy->governor->init(policy);
+ if (ret) {
+ module_put(policy->governor->owner);
+ return ret;
+ }
+ }
+
+ policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
+
+ return 0;
+}
+
+static void cpufreq_exit_governor(struct cpufreq_policy *policy)
+{
+ if (cpufreq_suspended || !policy->governor)
+ return;
+
+ pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
+ if (policy->governor->exit)
+ policy->governor->exit(policy);
+
+ module_put(policy->governor->owner);
+}
+
+int cpufreq_start_governor(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ if (cpufreq_suspended)
+ return 0;
+
+ if (!policy->governor)
+ return -EINVAL;
+
+ pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
+ if (cpufreq_driver->get)
+ cpufreq_verify_current_freq(policy, false);
+
+ if (policy->governor->start) {
+ ret = policy->governor->start(policy);
+ if (ret)
+ return ret;
+ }
+
+ if (policy->governor->limits)
+ policy->governor->limits(policy);
+
+ return 0;
+}
+
+void cpufreq_stop_governor(struct cpufreq_policy *policy)
+{
+ if (cpufreq_suspended || !policy->governor)
+ return;
+
+ pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
+ if (policy->governor->stop)
+ policy->governor->stop(policy);
+}
+
+static void cpufreq_governor_limits(struct cpufreq_policy *policy)
+{
+ if (cpufreq_suspended || !policy->governor)
+ return;
+
+ pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
+ if (policy->governor->limits)
+ policy->governor->limits(policy);
+}
+
+int cpufreq_register_governor(struct cpufreq_governor *governor)
+{
+ int err;
+
+ if (!governor)
+ return -EINVAL;
+
+ if (cpufreq_disabled())
+ return -ENODEV;
+
+ mutex_lock(&cpufreq_governor_mutex);
+
+ err = -EBUSY;
+ if (!find_governor(governor->name)) {
+ err = 0;
+ list_add(&governor->governor_list, &cpufreq_governor_list);
+ }
+
+ mutex_unlock(&cpufreq_governor_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(cpufreq_register_governor);
+
+void cpufreq_unregister_governor(struct cpufreq_governor *governor)
+{
+ struct cpufreq_policy *policy;
+ unsigned long flags;
+
+ if (!governor)
+ return;
+
+ if (cpufreq_disabled())
+ return;
+
+ /* clear last_governor for all inactive policies */
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
+ for_each_inactive_policy(policy) {
+ if (!strcmp(policy->last_governor, governor->name)) {
+ policy->governor = NULL;
+ strcpy(policy->last_governor, "\0");
+ }
+ }
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ mutex_lock(&cpufreq_governor_mutex);
+ list_del(&governor->governor_list);
+ mutex_unlock(&cpufreq_governor_mutex);
+}
+EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
+
+
+/*********************************************************************
+ * POLICY INTERFACE *
+ *********************************************************************/
+
+/**
+ * cpufreq_get_policy - get the current cpufreq_policy
+ * @policy: struct cpufreq_policy into which the current cpufreq_policy
+ * is written
+ * @cpu: CPU to find the policy for
+ *
+ * Reads the current cpufreq policy.
+ */
+int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
+{
+ struct cpufreq_policy *cpu_policy;
+ if (!policy)
+ return -EINVAL;
+
+ cpu_policy = cpufreq_cpu_get(cpu);
+ if (!cpu_policy)
+ return -EINVAL;
+
+ memcpy(policy, cpu_policy, sizeof(*policy));
+
+ cpufreq_cpu_put(cpu_policy);
+ return 0;
+}
+EXPORT_SYMBOL(cpufreq_get_policy);
+
+/**
+ * cpufreq_set_policy - Modify cpufreq policy parameters.
+ * @policy: Policy object to modify.
+ * @new_gov: Policy governor pointer.
+ * @new_pol: Policy value (for drivers with built-in governors).
+ *
+ * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
+ * limits to be set for the policy, update @policy with the verified limits
+ * values and either invoke the driver's ->setpolicy() callback (if present) or
+ * carry out a governor update for @policy. That is, run the current governor's
+ * ->limits() callback (if @new_gov points to the same object as the one in
+ * @policy) or replace the governor for @policy with @new_gov.
+ *
+ * The cpuinfo part of @policy is not updated by this function.
+ */
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_governor *new_gov,
+ unsigned int new_pol)
+{
+ struct cpufreq_policy_data new_data;
+ struct cpufreq_governor *old_gov;
+ int ret;
+
+ memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
+ new_data.freq_table = policy->freq_table;
+ new_data.cpu = policy->cpu;
+ /*
+ * PM QoS framework collects all the requests from users and provide us
+ * the final aggregated value here.
+ */
+ new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
+ new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
+
+ pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
+ new_data.cpu, new_data.min, new_data.max);
+
+ /*
+ * Verify that the CPU speed can be set within these limits and make sure
+ * that min <= max.
+ */
+ ret = cpufreq_driver->verify(&new_data);
+ if (ret)
+ return ret;
+
+ /*
+ * Resolve policy min/max to available frequencies. It ensures
+ * no frequency resolution will neither overshoot the requested maximum
+ * nor undershoot the requested minimum.
+ */
+ policy->min = new_data.min;
+ policy->max = new_data.max;
+ policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
+ policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
+ trace_cpu_frequency_limits(policy);
+
+ policy->cached_target_freq = UINT_MAX;
+
+ pr_debug("new min and max freqs are %u - %u kHz\n",
+ policy->min, policy->max);
+
+ if (cpufreq_driver->setpolicy) {
+ policy->policy = new_pol;
+ pr_debug("setting range\n");
+ return cpufreq_driver->setpolicy(policy);
+ }
+
+ if (new_gov == policy->governor) {
+ pr_debug("governor limits update\n");
+ cpufreq_governor_limits(policy);
+ return 0;
+ }
+
+ pr_debug("governor switch\n");
+
+ /* save old, working values */
+ old_gov = policy->governor;
+ /* end old governor */
+ if (old_gov) {
+ cpufreq_stop_governor(policy);
+ cpufreq_exit_governor(policy);
+ }
+
+ /* start new governor */
+ policy->governor = new_gov;
+ ret = cpufreq_init_governor(policy);
+ if (!ret) {
+ ret = cpufreq_start_governor(policy);
+ if (!ret) {
+ pr_debug("governor change\n");
+ sched_cpufreq_governor_change(policy, old_gov);
+ return 0;
+ }
+ cpufreq_exit_governor(policy);
+ }
+
+ /* new governor failed, so re-start old one */
+ pr_debug("starting governor %s failed\n", policy->governor->name);
+ if (old_gov) {
+ policy->governor = old_gov;
+ if (cpufreq_init_governor(policy))
+ policy->governor = NULL;
+ else
+ cpufreq_start_governor(policy);
+ }
+
+ return ret;
+}
+
+/**
+ * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
+ * @cpu: CPU to re-evaluate the policy for.
+ *
+ * Update the current frequency for the cpufreq policy of @cpu and use
+ * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
+ * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
+ * for the policy in question, among other things.
+ */
+void cpufreq_update_policy(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
+
+ if (!policy)
+ return;
+
+ /*
+ * BIOS might change freq behind our back
+ * -> ask driver for current freq and notify governors about a change
+ */
+ if (cpufreq_driver->get && has_target() &&
+ (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
+ goto unlock;
+
+ refresh_frequency_limits(policy);
+
+unlock:
+ cpufreq_cpu_release(policy);
+}
+EXPORT_SYMBOL(cpufreq_update_policy);
+
+/**
+ * cpufreq_update_limits - Update policy limits for a given CPU.
+ * @cpu: CPU to update the policy limits for.
+ *
+ * Invoke the driver's ->update_limits callback if present or call
+ * cpufreq_update_policy() for @cpu.
+ */
+void cpufreq_update_limits(unsigned int cpu)
+{
+ if (cpufreq_driver->update_limits)
+ cpufreq_driver->update_limits(cpu);
+ else
+ cpufreq_update_policy(cpu);
+}
+EXPORT_SYMBOL_GPL(cpufreq_update_limits);
+
+/*********************************************************************
+ * BOOST *
+ *********************************************************************/
+static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
+{
+ int ret;
+
+ if (!policy->freq_table)
+ return -ENXIO;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ if (ret) {
+ pr_err("%s: Policy frequency update failed\n", __func__);
+ return ret;
+ }
+
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int cpufreq_boost_trigger_state(int state)
+{
+ struct cpufreq_policy *policy;
+ unsigned long flags;
+ int ret = 0;
+
+ if (cpufreq_driver->boost_enabled == state)
+ return 0;
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver->boost_enabled = state;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ cpus_read_lock();
+ for_each_active_policy(policy) {
+ ret = cpufreq_driver->set_boost(policy, state);
+ if (ret)
+ goto err_reset_state;
+ }
+ cpus_read_unlock();
+
+ return 0;
+
+err_reset_state:
+ cpus_read_unlock();
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver->boost_enabled = !state;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ pr_err("%s: Cannot %s BOOST\n",
+ __func__, state ? "enable" : "disable");
+
+ return ret;
+}
+
+static bool cpufreq_boost_supported(void)
+{
+ return cpufreq_driver->set_boost;
+}
+
+static int create_boost_sysfs_file(void)
+{
+ int ret;
+
+ ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
+ if (ret)
+ pr_err("%s: cannot register global BOOST sysfs file\n",
+ __func__);
+
+ return ret;
+}
+
+static void remove_boost_sysfs_file(void)
+{
+ if (cpufreq_boost_supported())
+ sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
+}
+
+int cpufreq_enable_boost_support(void)
+{
+ if (!cpufreq_driver)
+ return -EINVAL;
+
+ if (cpufreq_boost_supported())
+ return 0;
+
+ cpufreq_driver->set_boost = cpufreq_boost_set_sw;
+
+ /* This will get removed on driver unregister */
+ return create_boost_sysfs_file();
+}
+EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
+
+int cpufreq_boost_enabled(void)
+{
+ return cpufreq_driver->boost_enabled;
+}
+EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
+
+/*********************************************************************
+ * REGISTER / UNREGISTER CPUFREQ DRIVER *
+ *********************************************************************/
+static enum cpuhp_state hp_online;
+
+static int cpuhp_cpufreq_online(unsigned int cpu)
+{
+ cpufreq_online(cpu);
+
+ return 0;
+}
+
+static int cpuhp_cpufreq_offline(unsigned int cpu)
+{
+ cpufreq_offline(cpu);
+
+ return 0;
+}
+
+/**
+ * cpufreq_register_driver - register a CPU Frequency driver
+ * @driver_data: A struct cpufreq_driver containing the values#
+ * submitted by the CPU Frequency driver.
+ *
+ * Registers a CPU Frequency driver to this core code. This code
+ * returns zero on success, -EEXIST when another driver got here first
+ * (and isn't unregistered in the meantime).
+ *
+ */
+int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+{
+ unsigned long flags;
+ int ret;
+
+ if (cpufreq_disabled())
+ return -ENODEV;
+
+ /*
+ * The cpufreq core depends heavily on the availability of device
+ * structure, make sure they are available before proceeding further.
+ */
+ if (!get_cpu_device(0))
+ return -EPROBE_DEFER;
+
+ if (!driver_data || !driver_data->verify || !driver_data->init ||
+ !(driver_data->setpolicy || driver_data->target_index ||
+ driver_data->target) ||
+ (driver_data->setpolicy && (driver_data->target_index ||
+ driver_data->target)) ||
+ (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
+ (!driver_data->online != !driver_data->offline))
+ return -EINVAL;
+
+ pr_debug("trying to register driver %s\n", driver_data->name);
+
+ /* Protect against concurrent CPU online/offline. */
+ cpus_read_lock();
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ if (cpufreq_driver) {
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ ret = -EEXIST;
+ goto out;
+ }
+ cpufreq_driver = driver_data;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ /*
+ * Mark support for the scheduler's frequency invariance engine for
+ * drivers that implement target(), target_index() or fast_switch().
+ */
+ if (!cpufreq_driver->setpolicy) {
+ static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
+ pr_debug("supports frequency invariance");
+ }
+
+ if (driver_data->setpolicy)
+ driver_data->flags |= CPUFREQ_CONST_LOOPS;
+
+ if (cpufreq_boost_supported()) {
+ ret = create_boost_sysfs_file();
+ if (ret)
+ goto err_null_driver;
+ }
+
+ ret = subsys_interface_register(&cpufreq_interface);
+ if (ret)
+ goto err_boost_unreg;
+
+ if (unlikely(list_empty(&cpufreq_policy_list))) {
+ /* if all ->init() calls failed, unregister */
+ ret = -ENODEV;
+ pr_debug("%s: No CPU initialized for driver %s\n", __func__,
+ driver_data->name);
+ goto err_if_unreg;
+ }
+
+ ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
+ "cpufreq:online",
+ cpuhp_cpufreq_online,
+ cpuhp_cpufreq_offline);
+ if (ret < 0)
+ goto err_if_unreg;
+ hp_online = ret;
+ ret = 0;
+
+ pr_debug("driver %s up and running\n", driver_data->name);
+ goto out;
+
+err_if_unreg:
+ subsys_interface_unregister(&cpufreq_interface);
+err_boost_unreg:
+ remove_boost_sysfs_file();
+err_null_driver:
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver = NULL;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+out:
+ cpus_read_unlock();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_register_driver);
+
+/*
+ * cpufreq_unregister_driver - unregister the current CPUFreq driver
+ *
+ * Unregister the current CPUFreq driver. Only call this if you have
+ * the right to do so, i.e. if you have succeeded in initialising before!
+ * Returns zero if successful, and -EINVAL if the cpufreq_driver is
+ * currently not initialised.
+ */
+int cpufreq_unregister_driver(struct cpufreq_driver *driver)
+{
+ unsigned long flags;
+
+ if (!cpufreq_driver || (driver != cpufreq_driver))
+ return -EINVAL;
+
+ pr_debug("unregistering driver %s\n", driver->name);
+
+ /* Protect against concurrent cpu hotplug */
+ cpus_read_lock();
+ subsys_interface_unregister(&cpufreq_interface);
+ remove_boost_sysfs_file();
+ static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
+ cpuhp_remove_state_nocalls_cpuslocked(hp_online);
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ cpufreq_driver = NULL;
+
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ cpus_read_unlock();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
+
+static int __init cpufreq_core_init(void)
+{
+ struct cpufreq_governor *gov = cpufreq_default_governor();
+
+ if (cpufreq_disabled())
+ return -ENODEV;
+
+ cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
+ BUG_ON(!cpufreq_global_kobject);
+
+ if (!strlen(default_governor))
+ strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
+
+ return 0;
+}
+module_param(off, int, 0444);
+module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
+core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
new file mode 100644
index 000000000..b6bd0ff35
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * drivers/cpufreq/cpufreq_conservative.c
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
+ */
+
+#include <linux/slab.h>
+#include "cpufreq_governor.h"
+
+struct cs_policy_dbs_info {
+ struct policy_dbs_info policy_dbs;
+ unsigned int down_skip;
+ unsigned int requested_freq;
+};
+
+static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
+{
+ return container_of(policy_dbs, struct cs_policy_dbs_info, policy_dbs);
+}
+
+struct cs_dbs_tuners {
+ unsigned int down_threshold;
+ unsigned int freq_step;
+};
+
+/* Conservative governor macros */
+#define DEF_FREQUENCY_UP_THRESHOLD (80)
+#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
+#define DEF_FREQUENCY_STEP (5)
+#define DEF_SAMPLING_DOWN_FACTOR (1)
+#define MAX_SAMPLING_DOWN_FACTOR (10)
+
+static inline unsigned int get_freq_step(struct cs_dbs_tuners *cs_tuners,
+ struct cpufreq_policy *policy)
+{
+ unsigned int freq_step = (cs_tuners->freq_step * policy->max) / 100;
+
+ /* max freq cannot be less than 100. But who knows... */
+ if (unlikely(freq_step == 0))
+ freq_step = DEF_FREQUENCY_STEP;
+
+ return freq_step;
+}
+
+/*
+ * Every sampling_rate, we check, if current idle time is less than 20%
+ * (default), then we try to increase frequency. Every sampling_rate *
+ * sampling_down_factor, we check, if current idle time is more than 80%
+ * (default), then we try to decrease frequency
+ *
+ * Frequency updates happen at minimum steps of 5% (default) of maximum
+ * frequency
+ */
+static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
+{
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ unsigned int requested_freq = dbs_info->requested_freq;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ unsigned int load = dbs_update(policy);
+ unsigned int freq_step;
+
+ /*
+ * break out if we 'cannot' reduce the speed as the user might
+ * want freq_step to be zero
+ */
+ if (cs_tuners->freq_step == 0)
+ goto out;
+
+ /*
+ * If requested_freq is out of range, it is likely that the limits
+ * changed in the meantime, so fall back to current frequency in that
+ * case.
+ */
+ if (requested_freq > policy->max || requested_freq < policy->min) {
+ requested_freq = policy->cur;
+ dbs_info->requested_freq = requested_freq;
+ }
+
+ freq_step = get_freq_step(cs_tuners, policy);
+
+ /*
+ * Decrease requested_freq one freq_step for each idle period that
+ * we didn't update the frequency.
+ */
+ if (policy_dbs->idle_periods < UINT_MAX) {
+ unsigned int freq_steps = policy_dbs->idle_periods * freq_step;
+
+ if (requested_freq > policy->min + freq_steps)
+ requested_freq -= freq_steps;
+ else
+ requested_freq = policy->min;
+
+ policy_dbs->idle_periods = UINT_MAX;
+ }
+
+ /* Check for frequency increase */
+ if (load > dbs_data->up_threshold) {
+ dbs_info->down_skip = 0;
+
+ /* if we are already at full speed then break out early */
+ if (requested_freq == policy->max)
+ goto out;
+
+ requested_freq += freq_step;
+ if (requested_freq > policy->max)
+ requested_freq = policy->max;
+
+ __cpufreq_driver_target(policy, requested_freq,
+ CPUFREQ_RELATION_HE);
+ dbs_info->requested_freq = requested_freq;
+ goto out;
+ }
+
+ /* if sampling_down_factor is active break out early */
+ if (++dbs_info->down_skip < dbs_data->sampling_down_factor)
+ goto out;
+ dbs_info->down_skip = 0;
+
+ /* Check for frequency decrease */
+ if (load < cs_tuners->down_threshold) {
+ /*
+ * if we cannot reduce the frequency anymore, break out early
+ */
+ if (requested_freq == policy->min)
+ goto out;
+
+ if (requested_freq > freq_step)
+ requested_freq -= freq_step;
+ else
+ requested_freq = policy->min;
+
+ __cpufreq_driver_target(policy, requested_freq,
+ CPUFREQ_RELATION_LE);
+ dbs_info->requested_freq = requested_freq;
+ }
+
+ out:
+ return dbs_data->sampling_rate;
+}
+
+/************************** sysfs interface ************************/
+
+static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
+{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ return -EINVAL;
+
+ dbs_data->sampling_down_factor = input;
+ return count;
+}
+
+static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
+{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
+ return -EINVAL;
+
+ dbs_data->up_threshold = input;
+ return count;
+}
+
+static ssize_t down_threshold_store(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
+{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ /* cannot be lower than 1 otherwise freq will not fall */
+ if (ret != 1 || input < 1 || input > 100 ||
+ input >= dbs_data->up_threshold)
+ return -EINVAL;
+
+ cs_tuners->down_threshold = input;
+ return count;
+}
+
+static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
+{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (input > 1)
+ input = 1;
+
+ if (input == dbs_data->ignore_nice_load) /* nothing to do */
+ return count;
+
+ dbs_data->ignore_nice_load = input;
+
+ /* we need to re-evaluate prev_cpu_idle */
+ gov_update_cpu_data(dbs_data);
+
+ return count;
+}
+
+static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf,
+ size_t count)
+{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ if (input > 100)
+ input = 100;
+
+ /*
+ * no need to test here if freq_step is zero as the user might actually
+ * want this, they would be crazy though :)
+ */
+ cs_tuners->freq_step = input;
+ return count;
+}
+
+gov_show_one_common(sampling_rate);
+gov_show_one_common(sampling_down_factor);
+gov_show_one_common(up_threshold);
+gov_show_one_common(ignore_nice_load);
+gov_show_one(cs, down_threshold);
+gov_show_one(cs, freq_step);
+
+gov_attr_rw(sampling_rate);
+gov_attr_rw(sampling_down_factor);
+gov_attr_rw(up_threshold);
+gov_attr_rw(ignore_nice_load);
+gov_attr_rw(down_threshold);
+gov_attr_rw(freq_step);
+
+static struct attribute *cs_attrs[] = {
+ &sampling_rate.attr,
+ &sampling_down_factor.attr,
+ &up_threshold.attr,
+ &down_threshold.attr,
+ &ignore_nice_load.attr,
+ &freq_step.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(cs);
+
+/************************** sysfs end ************************/
+
+static struct policy_dbs_info *cs_alloc(void)
+{
+ struct cs_policy_dbs_info *dbs_info;
+
+ dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
+ return dbs_info ? &dbs_info->policy_dbs : NULL;
+}
+
+static void cs_free(struct policy_dbs_info *policy_dbs)
+{
+ kfree(to_dbs_info(policy_dbs));
+}
+
+static int cs_init(struct dbs_data *dbs_data)
+{
+ struct cs_dbs_tuners *tuners;
+
+ tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
+ if (!tuners)
+ return -ENOMEM;
+
+ tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
+ tuners->freq_step = DEF_FREQUENCY_STEP;
+ dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
+ dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
+ dbs_data->ignore_nice_load = 0;
+ dbs_data->tuners = tuners;
+
+ return 0;
+}
+
+static void cs_exit(struct dbs_data *dbs_data)
+{
+ kfree(dbs_data->tuners);
+}
+
+static void cs_start(struct cpufreq_policy *policy)
+{
+ struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
+
+ dbs_info->down_skip = 0;
+ dbs_info->requested_freq = policy->cur;
+}
+
+static struct dbs_governor cs_governor = {
+ .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"),
+ .kobj_type = { .default_groups = cs_groups },
+ .gov_dbs_update = cs_dbs_update,
+ .alloc = cs_alloc,
+ .free = cs_free,
+ .init = cs_init,
+ .exit = cs_exit,
+ .start = cs_start,
+};
+
+#define CPU_FREQ_GOV_CONSERVATIVE (cs_governor.gov)
+
+MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
+MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
+ "Low Latency Frequency Transition capable processors "
+ "optimised for use in a battery environment");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return &CPU_FREQ_GOV_CONSERVATIVE;
+}
+#endif
+
+cpufreq_governor_init(CPU_FREQ_GOV_CONSERVATIVE);
+cpufreq_governor_exit(CPU_FREQ_GOV_CONSERVATIVE);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
new file mode 100644
index 000000000..85da677c4
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * drivers/cpufreq/cpufreq_governor.c
+ *
+ * CPUFREQ governors common code
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
+ * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
+ * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/export.h>
+#include <linux/kernel_stat.h>
+#include <linux/slab.h>
+
+#include "cpufreq_governor.h"
+
+#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC)
+
+static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
+
+static DEFINE_MUTEX(gov_dbs_data_mutex);
+
+/* Common sysfs tunables */
+/*
+ * sampling_rate_store - update sampling rate effective immediately if needed.
+ *
+ * If new rate is smaller than the old, simply updating
+ * dbs.sampling_rate might not be appropriate. For example, if the
+ * original sampling_rate was 1 second and the requested new sampling rate is 10
+ * ms because the user needs immediate reaction from ondemand governor, but not
+ * sure if higher frequency will be required or not, then, the governor may
+ * change the sampling rate too late; up to 1 second later. Thus, if we are
+ * reducing the sampling rate, we need to make the new value effective
+ * immediately.
+ *
+ * This must be called with dbs_data->mutex held, otherwise traversing
+ * policy_dbs_list isn't safe.
+ */
+ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf,
+ size_t count)
+{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
+ struct policy_dbs_info *policy_dbs;
+ unsigned int sampling_interval;
+ int ret;
+
+ ret = sscanf(buf, "%u", &sampling_interval);
+ if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
+ return -EINVAL;
+
+ dbs_data->sampling_rate = sampling_interval;
+
+ /*
+ * We are operating under dbs_data->mutex and so the list and its
+ * entries can't be freed concurrently.
+ */
+ list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
+ mutex_lock(&policy_dbs->update_mutex);
+ /*
+ * On 32-bit architectures this may race with the
+ * sample_delay_ns read in dbs_update_util_handler(), but that
+ * really doesn't matter. If the read returns a value that's
+ * too big, the sample will be skipped, but the next invocation
+ * of dbs_update_util_handler() (when the update has been
+ * completed) will take a sample.
+ *
+ * If this runs in parallel with dbs_work_handler(), we may end
+ * up overwriting the sample_delay_ns value that it has just
+ * written, but it will be corrected next time a sample is
+ * taken, so it shouldn't be significant.
+ */
+ gov_update_sample_delay(policy_dbs, 0);
+ mutex_unlock(&policy_dbs->update_mutex);
+ }
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(sampling_rate_store);
+
+/**
+ * gov_update_cpu_data - Update CPU load data.
+ * @dbs_data: Top-level governor data pointer.
+ *
+ * Update CPU load data for all CPUs in the domain governed by @dbs_data
+ * (that may be a single policy or a bunch of them if governor tunables are
+ * system-wide).
+ *
+ * Call under the @dbs_data mutex.
+ */
+void gov_update_cpu_data(struct dbs_data *dbs_data)
+{
+ struct policy_dbs_info *policy_dbs;
+
+ list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
+ unsigned int j;
+
+ for_each_cpu(j, policy_dbs->policy->cpus) {
+ struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
+
+ j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
+ dbs_data->io_is_busy);
+ if (dbs_data->ignore_nice_load)
+ j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(gov_update_cpu_data);
+
+unsigned int dbs_update(struct cpufreq_policy *policy)
+{
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ unsigned int ignore_nice = dbs_data->ignore_nice_load;
+ unsigned int max_load = 0, idle_periods = UINT_MAX;
+ unsigned int sampling_rate, io_busy, j;
+
+ /*
+ * Sometimes governors may use an additional multiplier to increase
+ * sample delays temporarily. Apply that multiplier to sampling_rate
+ * so as to keep the wake-up-from-idle detection logic a bit
+ * conservative.
+ */
+ sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
+ /*
+ * For the purpose of ondemand, waiting for disk IO is an indication
+ * that you're performance critical, and not that the system is actually
+ * idle, so do not add the iowait time to the CPU idle time then.
+ */
+ io_busy = dbs_data->io_is_busy;
+
+ /* Get Absolute Load */
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
+ u64 update_time, cur_idle_time;
+ unsigned int idle_time, time_elapsed;
+ unsigned int load;
+
+ cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
+
+ time_elapsed = update_time - j_cdbs->prev_update_time;
+ j_cdbs->prev_update_time = update_time;
+
+ idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+ j_cdbs->prev_cpu_idle = cur_idle_time;
+
+ if (ignore_nice) {
+ u64 cur_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
+
+ idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
+ j_cdbs->prev_cpu_nice = cur_nice;
+ }
+
+ if (unlikely(!time_elapsed)) {
+ /*
+ * That can only happen when this function is called
+ * twice in a row with a very short interval between the
+ * calls, so the previous load value can be used then.
+ */
+ load = j_cdbs->prev_load;
+ } else if (unlikely((int)idle_time > 2 * sampling_rate &&
+ j_cdbs->prev_load)) {
+ /*
+ * If the CPU had gone completely idle and a task has
+ * just woken up on this CPU now, it would be unfair to
+ * calculate 'load' the usual way for this elapsed
+ * time-window, because it would show near-zero load,
+ * irrespective of how CPU intensive that task actually
+ * was. This is undesirable for latency-sensitive bursty
+ * workloads.
+ *
+ * To avoid this, reuse the 'load' from the previous
+ * time-window and give this task a chance to start with
+ * a reasonably high CPU frequency. However, that
+ * shouldn't be over-done, lest we get stuck at a high
+ * load (high frequency) for too long, even when the
+ * current system load has actually dropped down, so
+ * clear prev_load to guarantee that the load will be
+ * computed again next time.
+ *
+ * Detecting this situation is easy: an unusually large
+ * 'idle_time' (as compared to the sampling rate)
+ * indicates this scenario.
+ */
+ load = j_cdbs->prev_load;
+ j_cdbs->prev_load = 0;
+ } else {
+ if (time_elapsed >= idle_time) {
+ load = 100 * (time_elapsed - idle_time) / time_elapsed;
+ } else {
+ /*
+ * That can happen if idle_time is returned by
+ * get_cpu_idle_time_jiffy(). In that case
+ * idle_time is roughly equal to the difference
+ * between time_elapsed and "busy time" obtained
+ * from CPU statistics. Then, the "busy time"
+ * can end up being greater than time_elapsed
+ * (for example, if jiffies_64 and the CPU
+ * statistics are updated by different CPUs),
+ * so idle_time may in fact be negative. That
+ * means, though, that the CPU was busy all
+ * the time (on the rough average) during the
+ * last sampling interval and 100 can be
+ * returned as the load.
+ */
+ load = (int)idle_time < 0 ? 100 : 0;
+ }
+ j_cdbs->prev_load = load;
+ }
+
+ if (unlikely((int)idle_time > 2 * sampling_rate)) {
+ unsigned int periods = idle_time / sampling_rate;
+
+ if (periods < idle_periods)
+ idle_periods = periods;
+ }
+
+ if (load > max_load)
+ max_load = load;
+ }
+
+ policy_dbs->idle_periods = idle_periods;
+
+ return max_load;
+}
+EXPORT_SYMBOL_GPL(dbs_update);
+
+static void dbs_work_handler(struct work_struct *work)
+{
+ struct policy_dbs_info *policy_dbs;
+ struct cpufreq_policy *policy;
+ struct dbs_governor *gov;
+
+ policy_dbs = container_of(work, struct policy_dbs_info, work);
+ policy = policy_dbs->policy;
+ gov = dbs_governor_of(policy);
+
+ /*
+ * Make sure cpufreq_governor_limits() isn't evaluating load or the
+ * ondemand governor isn't updating the sampling rate in parallel.
+ */
+ mutex_lock(&policy_dbs->update_mutex);
+ gov_update_sample_delay(policy_dbs, gov->gov_dbs_update(policy));
+ mutex_unlock(&policy_dbs->update_mutex);
+
+ /* Allow the utilization update handler to queue up more work. */
+ atomic_set(&policy_dbs->work_count, 0);
+ /*
+ * If the update below is reordered with respect to the sample delay
+ * modification, the utilization update handler may end up using a stale
+ * sample delay value.
+ */
+ smp_wmb();
+ policy_dbs->work_in_progress = false;
+}
+
+static void dbs_irq_work(struct irq_work *irq_work)
+{
+ struct policy_dbs_info *policy_dbs;
+
+ policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
+ schedule_work_on(smp_processor_id(), &policy_dbs->work);
+}
+
+static void dbs_update_util_handler(struct update_util_data *data, u64 time,
+ unsigned int flags)
+{
+ struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
+ struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
+ u64 delta_ns, lst;
+
+ if (!cpufreq_this_cpu_can_update(policy_dbs->policy))
+ return;
+
+ /*
+ * The work may not be allowed to be queued up right now.
+ * Possible reasons:
+ * - Work has already been queued up or is in progress.
+ * - It is too early (too little time from the previous sample).
+ */
+ if (policy_dbs->work_in_progress)
+ return;
+
+ /*
+ * If the reads below are reordered before the check above, the value
+ * of sample_delay_ns used in the computation may be stale.
+ */
+ smp_rmb();
+ lst = READ_ONCE(policy_dbs->last_sample_time);
+ delta_ns = time - lst;
+ if ((s64)delta_ns < policy_dbs->sample_delay_ns)
+ return;
+
+ /*
+ * If the policy is not shared, the irq_work may be queued up right away
+ * at this point. Otherwise, we need to ensure that only one of the
+ * CPUs sharing the policy will do that.
+ */
+ if (policy_dbs->is_shared) {
+ if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
+ return;
+
+ /*
+ * If another CPU updated last_sample_time in the meantime, we
+ * shouldn't be here, so clear the work counter and bail out.
+ */
+ if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
+ atomic_set(&policy_dbs->work_count, 0);
+ return;
+ }
+ }
+
+ policy_dbs->last_sample_time = time;
+ policy_dbs->work_in_progress = true;
+ irq_work_queue(&policy_dbs->irq_work);
+}
+
+static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
+ unsigned int delay_us)
+{
+ struct cpufreq_policy *policy = policy_dbs->policy;
+ int cpu;
+
+ gov_update_sample_delay(policy_dbs, delay_us);
+ policy_dbs->last_sample_time = 0;
+
+ for_each_cpu(cpu, policy->cpus) {
+ struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
+
+ cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
+ dbs_update_util_handler);
+ }
+}
+
+static inline void gov_clear_update_util(struct cpufreq_policy *policy)
+{
+ int i;
+
+ for_each_cpu(i, policy->cpus)
+ cpufreq_remove_update_util_hook(i);
+
+ synchronize_rcu();
+}
+
+static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
+ struct dbs_governor *gov)
+{
+ struct policy_dbs_info *policy_dbs;
+ int j;
+
+ /* Allocate memory for per-policy governor data. */
+ policy_dbs = gov->alloc();
+ if (!policy_dbs)
+ return NULL;
+
+ policy_dbs->policy = policy;
+ mutex_init(&policy_dbs->update_mutex);
+ atomic_set(&policy_dbs->work_count, 0);
+ init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
+ INIT_WORK(&policy_dbs->work, dbs_work_handler);
+
+ /* Set policy_dbs for all CPUs, online+offline */
+ for_each_cpu(j, policy->related_cpus) {
+ struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
+
+ j_cdbs->policy_dbs = policy_dbs;
+ }
+ return policy_dbs;
+}
+
+static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
+ struct dbs_governor *gov)
+{
+ int j;
+
+ mutex_destroy(&policy_dbs->update_mutex);
+
+ for_each_cpu(j, policy_dbs->policy->related_cpus) {
+ struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
+
+ j_cdbs->policy_dbs = NULL;
+ j_cdbs->update_util.func = NULL;
+ }
+ gov->free(policy_dbs);
+}
+
+static void cpufreq_dbs_data_release(struct kobject *kobj)
+{
+ struct dbs_data *dbs_data = to_dbs_data(to_gov_attr_set(kobj));
+ struct dbs_governor *gov = dbs_data->gov;
+
+ gov->exit(dbs_data);
+ kfree(dbs_data);
+}
+
+int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
+{
+ struct dbs_governor *gov = dbs_governor_of(policy);
+ struct dbs_data *dbs_data;
+ struct policy_dbs_info *policy_dbs;
+ int ret = 0;
+
+ /* State should be equivalent to EXIT */
+ if (policy->governor_data)
+ return -EBUSY;
+
+ policy_dbs = alloc_policy_dbs_info(policy, gov);
+ if (!policy_dbs)
+ return -ENOMEM;
+
+ /* Protect gov->gdbs_data against concurrent updates. */
+ mutex_lock(&gov_dbs_data_mutex);
+
+ dbs_data = gov->gdbs_data;
+ if (dbs_data) {
+ if (WARN_ON(have_governor_per_policy())) {
+ ret = -EINVAL;
+ goto free_policy_dbs_info;
+ }
+ policy_dbs->dbs_data = dbs_data;
+ policy->governor_data = policy_dbs;
+
+ gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
+ goto out;
+ }
+
+ dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
+ if (!dbs_data) {
+ ret = -ENOMEM;
+ goto free_policy_dbs_info;
+ }
+
+ dbs_data->gov = gov;
+ gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
+
+ ret = gov->init(dbs_data);
+ if (ret)
+ goto free_policy_dbs_info;
+
+ /*
+ * The sampling interval should not be less than the transition latency
+ * of the CPU and it also cannot be too small for dbs_update() to work
+ * correctly.
+ */
+ dbs_data->sampling_rate = max_t(unsigned int,
+ CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
+ cpufreq_policy_transition_delay_us(policy));
+
+ if (!have_governor_per_policy())
+ gov->gdbs_data = dbs_data;
+
+ policy_dbs->dbs_data = dbs_data;
+ policy->governor_data = policy_dbs;
+
+ gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
+ gov->kobj_type.release = cpufreq_dbs_data_release;
+ ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
+ get_governor_parent_kobj(policy),
+ "%s", gov->gov.name);
+ if (!ret)
+ goto out;
+
+ /* Failure, so roll back. */
+ pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
+
+ kobject_put(&dbs_data->attr_set.kobj);
+
+ policy->governor_data = NULL;
+
+ if (!have_governor_per_policy())
+ gov->gdbs_data = NULL;
+ gov->exit(dbs_data);
+ kfree(dbs_data);
+
+free_policy_dbs_info:
+ free_policy_dbs_info(policy_dbs, gov);
+
+out:
+ mutex_unlock(&gov_dbs_data_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init);
+
+void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
+{
+ struct dbs_governor *gov = dbs_governor_of(policy);
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ unsigned int count;
+
+ /* Protect gov->gdbs_data against concurrent updates. */
+ mutex_lock(&gov_dbs_data_mutex);
+
+ count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
+
+ policy->governor_data = NULL;
+
+ if (!count && !have_governor_per_policy())
+ gov->gdbs_data = NULL;
+
+ free_policy_dbs_info(policy_dbs, gov);
+
+ mutex_unlock(&gov_dbs_data_mutex);
+}
+EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit);
+
+int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
+{
+ struct dbs_governor *gov = dbs_governor_of(policy);
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ unsigned int sampling_rate, ignore_nice, j;
+ unsigned int io_busy;
+
+ if (!policy->cur)
+ return -EINVAL;
+
+ policy_dbs->is_shared = policy_is_shared(policy);
+ policy_dbs->rate_mult = 1;
+
+ sampling_rate = dbs_data->sampling_rate;
+ ignore_nice = dbs_data->ignore_nice_load;
+ io_busy = dbs_data->io_is_busy;
+
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
+
+ j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
+ /*
+ * Make the first invocation of dbs_update() compute the load.
+ */
+ j_cdbs->prev_load = 0;
+
+ if (ignore_nice)
+ j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
+ }
+
+ gov->start(policy);
+
+ gov_set_update_util(policy_dbs, sampling_rate);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start);
+
+void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy)
+{
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+
+ gov_clear_update_util(policy_dbs->policy);
+ irq_work_sync(&policy_dbs->irq_work);
+ cancel_work_sync(&policy_dbs->work);
+ atomic_set(&policy_dbs->work_count, 0);
+ policy_dbs->work_in_progress = false;
+}
+EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
+
+void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
+{
+ struct policy_dbs_info *policy_dbs;
+
+ /* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */
+ mutex_lock(&gov_dbs_data_mutex);
+ policy_dbs = policy->governor_data;
+ if (!policy_dbs)
+ goto out;
+
+ mutex_lock(&policy_dbs->update_mutex);
+ cpufreq_policy_apply_limits(policy);
+ gov_update_sample_delay(policy_dbs, 0);
+ mutex_unlock(&policy_dbs->update_mutex);
+
+out:
+ mutex_unlock(&gov_dbs_data_mutex);
+}
+EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
new file mode 100644
index 000000000..168c23fd7
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * drivers/cpufreq/cpufreq_governor.h
+ *
+ * Header file for CPUFreq governors common code
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
+ * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
+ * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
+ */
+
+#ifndef _CPUFREQ_GOVERNOR_H
+#define _CPUFREQ_GOVERNOR_H
+
+#include <linux/atomic.h>
+#include <linux/irq_work.h>
+#include <linux/cpufreq.h>
+#include <linux/sched/cpufreq.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+/* Ondemand Sampling types */
+enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
+
+/*
+ * Abbreviations:
+ * dbs: used as a shortform for demand based switching It helps to keep variable
+ * names smaller, simpler
+ * cdbs: common dbs
+ * od_*: On-demand governor
+ * cs_*: Conservative governor
+ */
+
+/* Governor demand based switching data (per-policy or global). */
+struct dbs_data {
+ struct gov_attr_set attr_set;
+ struct dbs_governor *gov;
+ void *tuners;
+ unsigned int ignore_nice_load;
+ unsigned int sampling_rate;
+ unsigned int sampling_down_factor;
+ unsigned int up_threshold;
+ unsigned int io_is_busy;
+};
+
+static inline struct dbs_data *to_dbs_data(struct gov_attr_set *attr_set)
+{
+ return container_of(attr_set, struct dbs_data, attr_set);
+}
+
+#define gov_show_one(_gov, file_name) \
+static ssize_t file_name##_show \
+(struct gov_attr_set *attr_set, char *buf) \
+{ \
+ struct dbs_data *dbs_data = to_dbs_data(attr_set); \
+ struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
+ return sprintf(buf, "%u\n", tuners->file_name); \
+}
+
+#define gov_show_one_common(file_name) \
+static ssize_t file_name##_show \
+(struct gov_attr_set *attr_set, char *buf) \
+{ \
+ struct dbs_data *dbs_data = to_dbs_data(attr_set); \
+ return sprintf(buf, "%u\n", dbs_data->file_name); \
+}
+
+#define gov_attr_ro(_name) \
+static struct governor_attr _name = __ATTR_RO(_name)
+
+#define gov_attr_rw(_name) \
+static struct governor_attr _name = __ATTR_RW(_name)
+
+/* Common to all CPUs of a policy */
+struct policy_dbs_info {
+ struct cpufreq_policy *policy;
+ /*
+ * Per policy mutex that serializes load evaluation from limit-change
+ * and work-handler.
+ */
+ struct mutex update_mutex;
+
+ u64 last_sample_time;
+ s64 sample_delay_ns;
+ atomic_t work_count;
+ struct irq_work irq_work;
+ struct work_struct work;
+ /* dbs_data may be shared between multiple policy objects */
+ struct dbs_data *dbs_data;
+ struct list_head list;
+ /* Multiplier for increasing sample delay temporarily. */
+ unsigned int rate_mult;
+ unsigned int idle_periods; /* For conservative */
+ /* Status indicators */
+ bool is_shared; /* This object is used by multiple CPUs */
+ bool work_in_progress; /* Work is being queued up or in progress */
+};
+
+static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,
+ unsigned int delay_us)
+{
+ policy_dbs->sample_delay_ns = delay_us * NSEC_PER_USEC;
+}
+
+/* Per cpu structures */
+struct cpu_dbs_info {
+ u64 prev_cpu_idle;
+ u64 prev_update_time;
+ u64 prev_cpu_nice;
+ /*
+ * Used to keep track of load in the previous interval. However, when
+ * explicitly set to zero, it is used as a flag to ensure that we copy
+ * the previous load to the current interval only once, upon the first
+ * wake-up from idle.
+ */
+ unsigned int prev_load;
+ struct update_util_data update_util;
+ struct policy_dbs_info *policy_dbs;
+};
+
+/* Common Governor data across policies */
+struct dbs_governor {
+ struct cpufreq_governor gov;
+ struct kobj_type kobj_type;
+
+ /*
+ * Common data for platforms that don't set
+ * CPUFREQ_HAVE_GOVERNOR_PER_POLICY
+ */
+ struct dbs_data *gdbs_data;
+
+ unsigned int (*gov_dbs_update)(struct cpufreq_policy *policy);
+ struct policy_dbs_info *(*alloc)(void);
+ void (*free)(struct policy_dbs_info *policy_dbs);
+ int (*init)(struct dbs_data *dbs_data);
+ void (*exit)(struct dbs_data *dbs_data);
+ void (*start)(struct cpufreq_policy *policy);
+};
+
+static inline struct dbs_governor *dbs_governor_of(struct cpufreq_policy *policy)
+{
+ return container_of(policy->governor, struct dbs_governor, gov);
+}
+
+/* Governor callback routines */
+int cpufreq_dbs_governor_init(struct cpufreq_policy *policy);
+void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy);
+int cpufreq_dbs_governor_start(struct cpufreq_policy *policy);
+void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy);
+void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
+
+#define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \
+ { \
+ .name = _name_, \
+ .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, \
+ .owner = THIS_MODULE, \
+ .init = cpufreq_dbs_governor_init, \
+ .exit = cpufreq_dbs_governor_exit, \
+ .start = cpufreq_dbs_governor_start, \
+ .stop = cpufreq_dbs_governor_stop, \
+ .limits = cpufreq_dbs_governor_limits, \
+ }
+
+/* Governor specific operations */
+struct od_ops {
+ unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
+ unsigned int freq_next, unsigned int relation);
+};
+
+unsigned int dbs_update(struct cpufreq_policy *policy);
+void od_register_powersave_bias_handler(unsigned int (*f)
+ (struct cpufreq_policy *, unsigned int, unsigned int),
+ unsigned int powersave_bias);
+void od_unregister_powersave_bias_handler(void);
+ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf,
+ size_t count);
+void gov_update_cpu_data(struct dbs_data *dbs_data);
+#endif /* _CPUFREQ_GOVERNOR_H */
diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c
new file mode 100644
index 000000000..771770ea0
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_governor_attr_set.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Abstract code for CPUFreq governor tunable sysfs attributes.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ */
+
+#include "cpufreq_governor.h"
+
+static inline struct governor_attr *to_gov_attr(struct attribute *attr)
+{
+ return container_of(attr, struct governor_attr, attr);
+}
+
+static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct governor_attr *gattr = to_gov_attr(attr);
+
+ return gattr->show(to_gov_attr_set(kobj), buf);
+}
+
+static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
+ struct governor_attr *gattr = to_gov_attr(attr);
+ int ret;
+
+ mutex_lock(&attr_set->update_lock);
+ ret = attr_set->usage_count ? gattr->store(attr_set, buf, count) : -EBUSY;
+ mutex_unlock(&attr_set->update_lock);
+ return ret;
+}
+
+const struct sysfs_ops governor_sysfs_ops = {
+ .show = governor_show,
+ .store = governor_store,
+};
+EXPORT_SYMBOL_GPL(governor_sysfs_ops);
+
+void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+ INIT_LIST_HEAD(&attr_set->policy_list);
+ mutex_init(&attr_set->update_lock);
+ attr_set->usage_count = 1;
+ list_add(list_node, &attr_set->policy_list);
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_init);
+
+void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+ mutex_lock(&attr_set->update_lock);
+ attr_set->usage_count++;
+ list_add(list_node, &attr_set->policy_list);
+ mutex_unlock(&attr_set->update_lock);
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_get);
+
+unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+ unsigned int count;
+
+ mutex_lock(&attr_set->update_lock);
+ list_del(list_node);
+ count = --attr_set->usage_count;
+ mutex_unlock(&attr_set->update_lock);
+ if (count)
+ return count;
+
+ mutex_destroy(&attr_set->update_lock);
+ kobject_put(&attr_set->kobj);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_put);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
new file mode 100644
index 000000000..c52d19d67
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -0,0 +1,486 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * drivers/cpufreq/cpufreq_ondemand.c
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * Jun Nakajima <jun.nakajima@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpu.h>
+#include <linux/percpu-defs.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+#include <linux/sched/cpufreq.h>
+
+#include "cpufreq_ondemand.h"
+
+/* On-demand governor macros */
+#define DEF_FREQUENCY_UP_THRESHOLD (80)
+#define DEF_SAMPLING_DOWN_FACTOR (1)
+#define MAX_SAMPLING_DOWN_FACTOR (100000)
+#define MICRO_FREQUENCY_UP_THRESHOLD (95)
+#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
+#define MIN_FREQUENCY_UP_THRESHOLD (1)
+#define MAX_FREQUENCY_UP_THRESHOLD (100)
+
+static struct od_ops od_ops;
+
+static unsigned int default_powersave_bias;
+
+/*
+ * Not all CPUs want IO time to be accounted as busy; this depends on how
+ * efficient idling at a higher frequency/voltage is.
+ * Pavel Machek says this is not so for various generations of AMD and old
+ * Intel systems.
+ * Mike Chan (android.com) claims this is also not true for ARM.
+ * Because of this, whitelist specific known (series) of CPUs by default, and
+ * leave all others up to the user.
+ */
+static int should_io_be_busy(void)
+{
+#if defined(CONFIG_X86)
+ /*
+ * For Intel, Core 2 (model 15) and later have an efficient idle.
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model >= 15)
+ return 1;
+#endif
+ return 0;
+}
+
+/*
+ * Find right freq to be set now with powersave_bias on.
+ * Returns the freq_hi to be used right now and will set freq_hi_delay_us,
+ * freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
+ */
+static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
+ unsigned int freq_next, unsigned int relation)
+{
+ unsigned int freq_req, freq_reduc, freq_avg;
+ unsigned int freq_hi, freq_lo;
+ unsigned int index;
+ unsigned int delay_hi_us;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ struct cpufreq_frequency_table *freq_table = policy->freq_table;
+
+ if (!freq_table) {
+ dbs_info->freq_lo = 0;
+ dbs_info->freq_lo_delay_us = 0;
+ return freq_next;
+ }
+
+ index = cpufreq_frequency_table_target(policy, freq_next, relation);
+ freq_req = freq_table[index].frequency;
+ freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
+ freq_avg = freq_req - freq_reduc;
+
+ /* Find freq bounds for freq_avg in freq_table */
+ index = cpufreq_table_find_index_h(policy, freq_avg,
+ relation & CPUFREQ_RELATION_E);
+ freq_lo = freq_table[index].frequency;
+ index = cpufreq_table_find_index_l(policy, freq_avg,
+ relation & CPUFREQ_RELATION_E);
+ freq_hi = freq_table[index].frequency;
+
+ /* Find out how long we have to be in hi and lo freqs */
+ if (freq_hi == freq_lo) {
+ dbs_info->freq_lo = 0;
+ dbs_info->freq_lo_delay_us = 0;
+ return freq_lo;
+ }
+ delay_hi_us = (freq_avg - freq_lo) * dbs_data->sampling_rate;
+ delay_hi_us += (freq_hi - freq_lo) / 2;
+ delay_hi_us /= freq_hi - freq_lo;
+ dbs_info->freq_hi_delay_us = delay_hi_us;
+ dbs_info->freq_lo = freq_lo;
+ dbs_info->freq_lo_delay_us = dbs_data->sampling_rate - delay_hi_us;
+ return freq_hi;
+}
+
+static void ondemand_powersave_bias_init(struct cpufreq_policy *policy)
+{
+ struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
+
+ dbs_info->freq_lo = 0;
+}
+
+static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
+{
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+
+ if (od_tuners->powersave_bias)
+ freq = od_ops.powersave_bias_target(policy, freq,
+ CPUFREQ_RELATION_HE);
+ else if (policy->cur == policy->max)
+ return;
+
+ __cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
+ CPUFREQ_RELATION_LE : CPUFREQ_RELATION_HE);
+}
+
+/*
+ * Every sampling_rate, we check, if current idle time is less than 20%
+ * (default), then we try to increase frequency. Else, we adjust the frequency
+ * proportional to load.
+ */
+static void od_update(struct cpufreq_policy *policy)
+{
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ unsigned int load = dbs_update(policy);
+
+ dbs_info->freq_lo = 0;
+
+ /* Check for frequency increase */
+ if (load > dbs_data->up_threshold) {
+ /* If switching to max speed, apply sampling_down_factor */
+ if (policy->cur < policy->max)
+ policy_dbs->rate_mult = dbs_data->sampling_down_factor;
+ dbs_freq_increase(policy, policy->max);
+ } else {
+ /* Calculate the next frequency proportional to load */
+ unsigned int freq_next, min_f, max_f;
+
+ min_f = policy->cpuinfo.min_freq;
+ max_f = policy->cpuinfo.max_freq;
+ freq_next = min_f + load * (max_f - min_f) / 100;
+
+ /* No longer fully busy, reset rate_mult */
+ policy_dbs->rate_mult = 1;
+
+ if (od_tuners->powersave_bias)
+ freq_next = od_ops.powersave_bias_target(policy,
+ freq_next,
+ CPUFREQ_RELATION_LE);
+
+ __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_CE);
+ }
+}
+
+static unsigned int od_dbs_update(struct cpufreq_policy *policy)
+{
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ int sample_type = dbs_info->sample_type;
+
+ /* Common NORMAL_SAMPLE setup */
+ dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ /*
+ * OD_SUB_SAMPLE doesn't make sense if sample_delay_ns is 0, so ignore
+ * it then.
+ */
+ if (sample_type == OD_SUB_SAMPLE && policy_dbs->sample_delay_ns > 0) {
+ __cpufreq_driver_target(policy, dbs_info->freq_lo,
+ CPUFREQ_RELATION_HE);
+ return dbs_info->freq_lo_delay_us;
+ }
+
+ od_update(policy);
+
+ if (dbs_info->freq_lo) {
+ /* Setup SUB_SAMPLE */
+ dbs_info->sample_type = OD_SUB_SAMPLE;
+ return dbs_info->freq_hi_delay_us;
+ }
+
+ return dbs_data->sampling_rate * policy_dbs->rate_mult;
+}
+
+/************************** sysfs interface ************************/
+static struct dbs_governor od_dbs_gov;
+
+static ssize_t io_is_busy_store(struct gov_attr_set *attr_set, const char *buf,
+ size_t count)
+{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+ dbs_data->io_is_busy = !!input;
+
+ /* we need to re-evaluate prev_cpu_idle */
+ gov_update_cpu_data(dbs_data);
+
+ return count;
+}
+
+static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
+{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
+ input < MIN_FREQUENCY_UP_THRESHOLD) {
+ return -EINVAL;
+ }
+
+ dbs_data->up_threshold = input;
+ return count;
+}
+
+static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
+{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
+ struct policy_dbs_info *policy_dbs;
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ return -EINVAL;
+
+ dbs_data->sampling_down_factor = input;
+
+ /* Reset down sampling multiplier in case it was active */
+ list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
+ /*
+ * Doing this without locking might lead to using different
+ * rate_mult values in od_update() and od_dbs_update().
+ */
+ mutex_lock(&policy_dbs->update_mutex);
+ policy_dbs->rate_mult = 1;
+ mutex_unlock(&policy_dbs->update_mutex);
+ }
+
+ return count;
+}
+
+static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
+{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (input > 1)
+ input = 1;
+
+ if (input == dbs_data->ignore_nice_load) { /* nothing to do */
+ return count;
+ }
+ dbs_data->ignore_nice_load = input;
+
+ /* we need to re-evaluate prev_cpu_idle */
+ gov_update_cpu_data(dbs_data);
+
+ return count;
+}
+
+static ssize_t powersave_bias_store(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
+{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ struct policy_dbs_info *policy_dbs;
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ if (input > 1000)
+ input = 1000;
+
+ od_tuners->powersave_bias = input;
+
+ list_for_each_entry(policy_dbs, &attr_set->policy_list, list)
+ ondemand_powersave_bias_init(policy_dbs->policy);
+
+ return count;
+}
+
+gov_show_one_common(sampling_rate);
+gov_show_one_common(up_threshold);
+gov_show_one_common(sampling_down_factor);
+gov_show_one_common(ignore_nice_load);
+gov_show_one_common(io_is_busy);
+gov_show_one(od, powersave_bias);
+
+gov_attr_rw(sampling_rate);
+gov_attr_rw(io_is_busy);
+gov_attr_rw(up_threshold);
+gov_attr_rw(sampling_down_factor);
+gov_attr_rw(ignore_nice_load);
+gov_attr_rw(powersave_bias);
+
+static struct attribute *od_attrs[] = {
+ &sampling_rate.attr,
+ &up_threshold.attr,
+ &sampling_down_factor.attr,
+ &ignore_nice_load.attr,
+ &powersave_bias.attr,
+ &io_is_busy.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(od);
+
+/************************** sysfs end ************************/
+
+static struct policy_dbs_info *od_alloc(void)
+{
+ struct od_policy_dbs_info *dbs_info;
+
+ dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
+ return dbs_info ? &dbs_info->policy_dbs : NULL;
+}
+
+static void od_free(struct policy_dbs_info *policy_dbs)
+{
+ kfree(to_dbs_info(policy_dbs));
+}
+
+static int od_init(struct dbs_data *dbs_data)
+{
+ struct od_dbs_tuners *tuners;
+ u64 idle_time;
+ int cpu;
+
+ tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
+ if (!tuners)
+ return -ENOMEM;
+
+ cpu = get_cpu();
+ idle_time = get_cpu_idle_time_us(cpu, NULL);
+ put_cpu();
+ if (idle_time != -1ULL) {
+ /* Idle micro accounting is supported. Use finer thresholds */
+ dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
+ } else {
+ dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
+ }
+
+ dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
+ dbs_data->ignore_nice_load = 0;
+ tuners->powersave_bias = default_powersave_bias;
+ dbs_data->io_is_busy = should_io_be_busy();
+
+ dbs_data->tuners = tuners;
+ return 0;
+}
+
+static void od_exit(struct dbs_data *dbs_data)
+{
+ kfree(dbs_data->tuners);
+}
+
+static void od_start(struct cpufreq_policy *policy)
+{
+ struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
+
+ dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ ondemand_powersave_bias_init(policy);
+}
+
+static struct od_ops od_ops = {
+ .powersave_bias_target = generic_powersave_bias_target,
+};
+
+static struct dbs_governor od_dbs_gov = {
+ .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
+ .kobj_type = { .default_groups = od_groups },
+ .gov_dbs_update = od_dbs_update,
+ .alloc = od_alloc,
+ .free = od_free,
+ .init = od_init,
+ .exit = od_exit,
+ .start = od_start,
+};
+
+#define CPU_FREQ_GOV_ONDEMAND (od_dbs_gov.gov)
+
+static void od_set_powersave_bias(unsigned int powersave_bias)
+{
+ unsigned int cpu;
+ cpumask_var_t done;
+
+ if (!alloc_cpumask_var(&done, GFP_KERNEL))
+ return;
+
+ default_powersave_bias = powersave_bias;
+ cpumask_clear(done);
+
+ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy;
+ struct policy_dbs_info *policy_dbs;
+ struct dbs_data *dbs_data;
+ struct od_dbs_tuners *od_tuners;
+
+ if (cpumask_test_cpu(cpu, done))
+ continue;
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (!policy || policy->governor != &CPU_FREQ_GOV_ONDEMAND)
+ continue;
+
+ policy_dbs = policy->governor_data;
+ if (!policy_dbs)
+ continue;
+
+ cpumask_or(done, done, policy->cpus);
+
+ dbs_data = policy_dbs->dbs_data;
+ od_tuners = dbs_data->tuners;
+ od_tuners->powersave_bias = default_powersave_bias;
+ }
+ cpus_read_unlock();
+
+ free_cpumask_var(done);
+}
+
+void od_register_powersave_bias_handler(unsigned int (*f)
+ (struct cpufreq_policy *, unsigned int, unsigned int),
+ unsigned int powersave_bias)
+{
+ od_ops.powersave_bias_target = f;
+ od_set_powersave_bias(powersave_bias);
+}
+EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
+
+void od_unregister_powersave_bias_handler(void)
+{
+ od_ops.powersave_bias_target = generic_powersave_bias_target;
+ od_set_powersave_bias(0);
+}
+EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
+
+MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
+MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
+MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
+ "Low Latency Frequency Transition capable processors");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return &CPU_FREQ_GOV_ONDEMAND;
+}
+#endif
+
+cpufreq_governor_init(CPU_FREQ_GOV_ONDEMAND);
+cpufreq_governor_exit(CPU_FREQ_GOV_ONDEMAND);
diff --git a/drivers/cpufreq/cpufreq_ondemand.h b/drivers/cpufreq/cpufreq_ondemand.h
new file mode 100644
index 000000000..1af8e5c4b
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_ondemand.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Header file for CPUFreq ondemand governor and related code.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ */
+
+#include "cpufreq_governor.h"
+
+struct od_policy_dbs_info {
+ struct policy_dbs_info policy_dbs;
+ unsigned int freq_lo;
+ unsigned int freq_lo_delay_us;
+ unsigned int freq_hi_delay_us;
+ unsigned int sample_type:1;
+};
+
+static inline struct od_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
+{
+ return container_of(policy_dbs, struct od_policy_dbs_info, policy_dbs);
+}
+
+struct od_dbs_tuners {
+ unsigned int powersave_bias;
+};
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
new file mode 100644
index 000000000..addd93f2a
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/cpufreq/cpufreq_performance.c
+ *
+ * Copyright (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+static void cpufreq_gov_performance_limits(struct cpufreq_policy *policy)
+{
+ pr_debug("setting to %u kHz\n", policy->max);
+ __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+}
+
+static struct cpufreq_governor cpufreq_gov_performance = {
+ .name = "performance",
+ .owner = THIS_MODULE,
+ .flags = CPUFREQ_GOV_STRICT_TARGET,
+ .limits = cpufreq_gov_performance_limits,
+};
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return &cpufreq_gov_performance;
+}
+#endif
+#ifndef CONFIG_CPU_FREQ_GOV_PERFORMANCE_MODULE
+struct cpufreq_governor *cpufreq_fallback_governor(void)
+{
+ return &cpufreq_gov_performance;
+}
+#endif
+
+MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
+MODULE_DESCRIPTION("CPUfreq policy governor 'performance'");
+MODULE_LICENSE("GPL");
+
+cpufreq_governor_init(cpufreq_gov_performance);
+cpufreq_governor_exit(cpufreq_gov_performance);
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
new file mode 100644
index 000000000..8d830d860
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/cpufreq/cpufreq_powersave.c
+ *
+ * Copyright (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+static void cpufreq_gov_powersave_limits(struct cpufreq_policy *policy)
+{
+ pr_debug("setting to %u kHz\n", policy->min);
+ __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+}
+
+static struct cpufreq_governor cpufreq_gov_powersave = {
+ .name = "powersave",
+ .limits = cpufreq_gov_powersave_limits,
+ .owner = THIS_MODULE,
+ .flags = CPUFREQ_GOV_STRICT_TARGET,
+};
+
+MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
+MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return &cpufreq_gov_powersave;
+}
+#endif
+
+cpufreq_governor_init(cpufreq_gov_powersave);
+cpufreq_governor_exit(cpufreq_gov_powersave);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
new file mode 100644
index 000000000..6e57df7a2
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * drivers/cpufreq/cpufreq_stats.c
+ *
+ * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/sched/clock.h>
+#include <linux/slab.h>
+
+struct cpufreq_stats {
+ unsigned int total_trans;
+ unsigned long long last_time;
+ unsigned int max_state;
+ unsigned int state_num;
+ unsigned int last_index;
+ u64 *time_in_state;
+ unsigned int *freq_table;
+ unsigned int *trans_table;
+
+ /* Deferred reset */
+ unsigned int reset_pending;
+ unsigned long long reset_time;
+};
+
+static void cpufreq_stats_update(struct cpufreq_stats *stats,
+ unsigned long long time)
+{
+ unsigned long long cur_time = local_clock();
+
+ stats->time_in_state[stats->last_index] += cur_time - time;
+ stats->last_time = cur_time;
+}
+
+static void cpufreq_stats_reset_table(struct cpufreq_stats *stats)
+{
+ unsigned int count = stats->max_state;
+
+ memset(stats->time_in_state, 0, count * sizeof(u64));
+ memset(stats->trans_table, 0, count * count * sizeof(int));
+ stats->last_time = local_clock();
+ stats->total_trans = 0;
+
+ /* Adjust for the time elapsed since reset was requested */
+ WRITE_ONCE(stats->reset_pending, 0);
+ /*
+ * Prevent the reset_time read from being reordered before the
+ * reset_pending accesses in cpufreq_stats_record_transition().
+ */
+ smp_rmb();
+ cpufreq_stats_update(stats, READ_ONCE(stats->reset_time));
+}
+
+static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
+{
+ struct cpufreq_stats *stats = policy->stats;
+
+ if (READ_ONCE(stats->reset_pending))
+ return sprintf(buf, "%d\n", 0);
+ else
+ return sprintf(buf, "%u\n", stats->total_trans);
+}
+cpufreq_freq_attr_ro(total_trans);
+
+static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
+{
+ struct cpufreq_stats *stats = policy->stats;
+ bool pending = READ_ONCE(stats->reset_pending);
+ unsigned long long time;
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < stats->state_num; i++) {
+ if (pending) {
+ if (i == stats->last_index) {
+ /*
+ * Prevent the reset_time read from occurring
+ * before the reset_pending read above.
+ */
+ smp_rmb();
+ time = local_clock() - READ_ONCE(stats->reset_time);
+ } else {
+ time = 0;
+ }
+ } else {
+ time = stats->time_in_state[i];
+ if (i == stats->last_index)
+ time += local_clock() - stats->last_time;
+ }
+
+ len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
+ nsec_to_clock_t(time));
+ }
+ return len;
+}
+cpufreq_freq_attr_ro(time_in_state);
+
+/* We don't care what is written to the attribute */
+static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
+ size_t count)
+{
+ struct cpufreq_stats *stats = policy->stats;
+
+ /*
+ * Defer resetting of stats to cpufreq_stats_record_transition() to
+ * avoid races.
+ */
+ WRITE_ONCE(stats->reset_time, local_clock());
+ /*
+ * The memory barrier below is to prevent the readers of reset_time from
+ * seeing a stale or partially updated value.
+ */
+ smp_wmb();
+ WRITE_ONCE(stats->reset_pending, 1);
+
+ return count;
+}
+cpufreq_freq_attr_wo(reset);
+
+static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
+{
+ struct cpufreq_stats *stats = policy->stats;
+ bool pending = READ_ONCE(stats->reset_pending);
+ ssize_t len = 0;
+ int i, j, count;
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, " : ");
+ for (i = 0; i < stats->state_num; i++) {
+ if (len >= PAGE_SIZE - 1)
+ break;
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ",
+ stats->freq_table[i]);
+ }
+ if (len >= PAGE_SIZE - 1)
+ return PAGE_SIZE - 1;
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+
+ for (i = 0; i < stats->state_num; i++) {
+ if (len >= PAGE_SIZE - 1)
+ break;
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%9u: ",
+ stats->freq_table[i]);
+
+ for (j = 0; j < stats->state_num; j++) {
+ if (len >= PAGE_SIZE - 1)
+ break;
+
+ if (pending)
+ count = 0;
+ else
+ count = stats->trans_table[i * stats->max_state + j];
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ", count);
+ }
+ if (len >= PAGE_SIZE - 1)
+ break;
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ }
+
+ if (len >= PAGE_SIZE - 1) {
+ pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
+ return -EFBIG;
+ }
+ return len;
+}
+cpufreq_freq_attr_ro(trans_table);
+
+static struct attribute *default_attrs[] = {
+ &total_trans.attr,
+ &time_in_state.attr,
+ &reset.attr,
+ &trans_table.attr,
+ NULL
+};
+static const struct attribute_group stats_attr_group = {
+ .attrs = default_attrs,
+ .name = "stats"
+};
+
+static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq)
+{
+ int index;
+ for (index = 0; index < stats->max_state; index++)
+ if (stats->freq_table[index] == freq)
+ return index;
+ return -1;
+}
+
+void cpufreq_stats_free_table(struct cpufreq_policy *policy)
+{
+ struct cpufreq_stats *stats = policy->stats;
+
+ /* Already freed */
+ if (!stats)
+ return;
+
+ pr_debug("%s: Free stats table\n", __func__);
+
+ sysfs_remove_group(&policy->kobj, &stats_attr_group);
+ kfree(stats->time_in_state);
+ kfree(stats);
+ policy->stats = NULL;
+}
+
+void cpufreq_stats_create_table(struct cpufreq_policy *policy)
+{
+ unsigned int i = 0, count;
+ struct cpufreq_stats *stats;
+ unsigned int alloc_size;
+ struct cpufreq_frequency_table *pos;
+
+ count = cpufreq_table_count_valid_entries(policy);
+ if (!count)
+ return;
+
+ /* stats already initialized */
+ if (policy->stats)
+ return;
+
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return;
+
+ alloc_size = count * sizeof(int) + count * sizeof(u64);
+
+ alloc_size += count * count * sizeof(int);
+
+ /* Allocate memory for time_in_state/freq_table/trans_table in one go */
+ stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
+ if (!stats->time_in_state)
+ goto free_stat;
+
+ stats->freq_table = (unsigned int *)(stats->time_in_state + count);
+
+ stats->trans_table = stats->freq_table + count;
+
+ stats->max_state = count;
+
+ /* Find valid-unique entries */
+ cpufreq_for_each_valid_entry(pos, policy->freq_table)
+ if (freq_table_get_index(stats, pos->frequency) == -1)
+ stats->freq_table[i++] = pos->frequency;
+
+ stats->state_num = i;
+ stats->last_time = local_clock();
+ stats->last_index = freq_table_get_index(stats, policy->cur);
+
+ policy->stats = stats;
+ if (!sysfs_create_group(&policy->kobj, &stats_attr_group))
+ return;
+
+ /* We failed, release resources */
+ policy->stats = NULL;
+ kfree(stats->time_in_state);
+free_stat:
+ kfree(stats);
+}
+
+void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
+ unsigned int new_freq)
+{
+ struct cpufreq_stats *stats = policy->stats;
+ int old_index, new_index;
+
+ if (unlikely(!stats))
+ return;
+
+ if (unlikely(READ_ONCE(stats->reset_pending)))
+ cpufreq_stats_reset_table(stats);
+
+ old_index = stats->last_index;
+ new_index = freq_table_get_index(stats, new_freq);
+
+ /* We can't do stats->time_in_state[-1]= .. */
+ if (unlikely(old_index == -1 || new_index == -1 || old_index == new_index))
+ return;
+
+ cpufreq_stats_update(stats, stats->last_time);
+
+ stats->last_index = new_index;
+ stats->trans_table[old_index * stats->max_state + new_index]++;
+ stats->total_trans++;
+}
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
new file mode 100644
index 000000000..50a4d7846
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * linux/drivers/cpufreq/cpufreq_userspace.c
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
+static DEFINE_MUTEX(userspace_mutex);
+
+/**
+ * cpufreq_set - set the CPU frequency
+ * @policy: pointer to policy struct where freq is being set
+ * @freq: target frequency in kHz
+ *
+ * Sets the CPU frequency to freq.
+ */
+static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
+{
+ int ret = -EINVAL;
+ unsigned int *setspeed = policy->governor_data;
+
+ pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
+
+ mutex_lock(&userspace_mutex);
+ if (!per_cpu(cpu_is_managed, policy->cpu))
+ goto err;
+
+ *setspeed = freq;
+
+ ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
+ err:
+ mutex_unlock(&userspace_mutex);
+ return ret;
+}
+
+static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%u\n", policy->cur);
+}
+
+static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
+{
+ unsigned int *setspeed;
+
+ setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
+ if (!setspeed)
+ return -ENOMEM;
+
+ policy->governor_data = setspeed;
+ return 0;
+}
+
+static void cpufreq_userspace_policy_exit(struct cpufreq_policy *policy)
+{
+ mutex_lock(&userspace_mutex);
+ kfree(policy->governor_data);
+ policy->governor_data = NULL;
+ mutex_unlock(&userspace_mutex);
+}
+
+static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy)
+{
+ unsigned int *setspeed = policy->governor_data;
+
+ BUG_ON(!policy->cur);
+ pr_debug("started managing cpu %u\n", policy->cpu);
+
+ mutex_lock(&userspace_mutex);
+ per_cpu(cpu_is_managed, policy->cpu) = 1;
+ *setspeed = policy->cur;
+ mutex_unlock(&userspace_mutex);
+ return 0;
+}
+
+static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy)
+{
+ unsigned int *setspeed = policy->governor_data;
+
+ pr_debug("managing cpu %u stopped\n", policy->cpu);
+
+ mutex_lock(&userspace_mutex);
+ per_cpu(cpu_is_managed, policy->cpu) = 0;
+ *setspeed = 0;
+ mutex_unlock(&userspace_mutex);
+}
+
+static void cpufreq_userspace_policy_limits(struct cpufreq_policy *policy)
+{
+ unsigned int *setspeed = policy->governor_data;
+
+ mutex_lock(&userspace_mutex);
+
+ pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
+ policy->cpu, policy->min, policy->max, policy->cur, *setspeed);
+
+ if (policy->max < *setspeed)
+ __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > *setspeed)
+ __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+ else
+ __cpufreq_driver_target(policy, *setspeed, CPUFREQ_RELATION_L);
+
+ mutex_unlock(&userspace_mutex);
+}
+
+static struct cpufreq_governor cpufreq_gov_userspace = {
+ .name = "userspace",
+ .init = cpufreq_userspace_policy_init,
+ .exit = cpufreq_userspace_policy_exit,
+ .start = cpufreq_userspace_policy_start,
+ .stop = cpufreq_userspace_policy_stop,
+ .limits = cpufreq_userspace_policy_limits,
+ .store_setspeed = cpufreq_set,
+ .show_setspeed = show_speed,
+ .owner = THIS_MODULE,
+};
+
+MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>, "
+ "Russell King <rmk@arm.linux.org.uk>");
+MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return &cpufreq_gov_userspace;
+}
+#endif
+
+cpufreq_governor_init(cpufreq_gov_userspace);
+cpufreq_governor_exit(cpufreq_gov_userspace);
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
new file mode 100644
index 000000000..ebb3a8102
--- /dev/null
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPU frequency scaling for DaVinci
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ * Based on linux/arch/arm/plat-omap/cpu-omap.c. Original Copyright follows:
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Written by Tony Lindgren <tony@atomide.com>
+ *
+ * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
+ *
+ * Copyright (C) 2007-2008 Texas Instruments, Inc.
+ * Updated to support OMAP3
+ * Rajendra Nayak <rnayak@ti.com>
+ */
+#include <linux/types.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/platform_data/davinci-cpufreq.h>
+#include <linux/platform_device.h>
+#include <linux/export.h>
+
+struct davinci_cpufreq {
+ struct device *dev;
+ struct clk *armclk;
+ struct clk *asyncclk;
+ unsigned long asyncrate;
+};
+static struct davinci_cpufreq cpufreq;
+
+static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
+{
+ struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
+ struct clk *armclk = cpufreq.armclk;
+ unsigned int old_freq, new_freq;
+ int ret = 0;
+
+ old_freq = policy->cur;
+ new_freq = pdata->freq_table[idx].frequency;
+
+ /* if moving to higher frequency, up the voltage beforehand */
+ if (pdata->set_voltage && new_freq > old_freq) {
+ ret = pdata->set_voltage(idx);
+ if (ret)
+ return ret;
+ }
+
+ ret = clk_set_rate(armclk, new_freq * 1000);
+ if (ret)
+ return ret;
+
+ if (cpufreq.asyncclk) {
+ ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate);
+ if (ret)
+ return ret;
+ }
+
+ /* if moving to lower freq, lower the voltage after lowering freq */
+ if (pdata->set_voltage && new_freq < old_freq)
+ pdata->set_voltage(idx);
+
+ return 0;
+}
+
+static int davinci_cpu_init(struct cpufreq_policy *policy)
+{
+ int result = 0;
+ struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
+ struct cpufreq_frequency_table *freq_table = pdata->freq_table;
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ /* Finish platform specific initialization */
+ if (pdata->init) {
+ result = pdata->init();
+ if (result)
+ return result;
+ }
+
+ policy->clk = cpufreq.armclk;
+
+ /*
+ * Time measurement across the target() function yields ~1500-1800us
+ * time taken with no drivers on notification list.
+ * Setting the latency to 2000 us to accommodate addition of drivers
+ * to pre/post change notification list.
+ */
+ cpufreq_generic_init(policy, freq_table, 2000 * 1000);
+ return 0;
+}
+
+static struct cpufreq_driver davinci_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = davinci_target,
+ .get = cpufreq_generic_get,
+ .init = davinci_cpu_init,
+ .name = "davinci",
+ .attr = cpufreq_generic_attr,
+};
+
+static int __init davinci_cpufreq_probe(struct platform_device *pdev)
+{
+ struct davinci_cpufreq_config *pdata = pdev->dev.platform_data;
+ struct clk *asyncclk;
+
+ if (!pdata)
+ return -EINVAL;
+ if (!pdata->freq_table)
+ return -EINVAL;
+
+ cpufreq.dev = &pdev->dev;
+
+ cpufreq.armclk = clk_get(NULL, "arm");
+ if (IS_ERR(cpufreq.armclk)) {
+ dev_err(cpufreq.dev, "Unable to get ARM clock\n");
+ return PTR_ERR(cpufreq.armclk);
+ }
+
+ asyncclk = clk_get(cpufreq.dev, "async");
+ if (!IS_ERR(asyncclk)) {
+ cpufreq.asyncclk = asyncclk;
+ cpufreq.asyncrate = clk_get_rate(asyncclk);
+ }
+
+ return cpufreq_register_driver(&davinci_driver);
+}
+
+static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&davinci_driver);
+
+ clk_put(cpufreq.armclk);
+
+ if (cpufreq.asyncclk)
+ clk_put(cpufreq.asyncclk);
+
+ return 0;
+}
+
+static struct platform_driver davinci_cpufreq_driver = {
+ .driver = {
+ .name = "cpufreq-davinci",
+ },
+ .remove = __exit_p(davinci_cpufreq_remove),
+};
+
+int __init davinci_cpufreq_init(void)
+{
+ return platform_driver_probe(&davinci_cpufreq_driver,
+ davinci_cpufreq_probe);
+}
+
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
new file mode 100644
index 000000000..ab93bce8a
--- /dev/null
+++ b/drivers/cpufreq/e_powersaver.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on documentation provided by Dave Jones. Thanks!
+ *
+ * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/timex.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
+#include <asm/tsc.h>
+
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+#endif
+
+#define EPS_BRAND_C7M 0
+#define EPS_BRAND_C7 1
+#define EPS_BRAND_EDEN 2
+#define EPS_BRAND_C3 3
+#define EPS_BRAND_C7D 4
+
+struct eps_cpu_data {
+ u32 fsb;
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
+ u32 bios_limit;
+#endif
+ struct cpufreq_frequency_table freq_table[];
+};
+
+static struct eps_cpu_data *eps_cpu[NR_CPUS];
+
+/* Module parameters */
+static int freq_failsafe_off;
+static int voltage_failsafe_off;
+static int set_max_voltage;
+
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
+static int ignore_acpi_limit;
+
+static struct acpi_processor_performance *eps_acpi_cpu_perf;
+
+/* Minimum necessary to get acpi_processor_get_bios_limit() working */
+static int eps_acpi_init(void)
+{
+ eps_acpi_cpu_perf = kzalloc(sizeof(*eps_acpi_cpu_perf),
+ GFP_KERNEL);
+ if (!eps_acpi_cpu_perf)
+ return -ENOMEM;
+
+ if (!zalloc_cpumask_var(&eps_acpi_cpu_perf->shared_cpu_map,
+ GFP_KERNEL)) {
+ kfree(eps_acpi_cpu_perf);
+ eps_acpi_cpu_perf = NULL;
+ return -ENOMEM;
+ }
+
+ if (acpi_processor_register_performance(eps_acpi_cpu_perf, 0)) {
+ free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map);
+ kfree(eps_acpi_cpu_perf);
+ eps_acpi_cpu_perf = NULL;
+ return -EIO;
+ }
+ return 0;
+}
+
+static int eps_acpi_exit(struct cpufreq_policy *policy)
+{
+ if (eps_acpi_cpu_perf) {
+ acpi_processor_unregister_performance(0);
+ free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map);
+ kfree(eps_acpi_cpu_perf);
+ eps_acpi_cpu_perf = NULL;
+ }
+ return 0;
+}
+#endif
+
+static unsigned int eps_get(unsigned int cpu)
+{
+ struct eps_cpu_data *centaur;
+ u32 lo, hi;
+
+ if (cpu)
+ return 0;
+ centaur = eps_cpu[cpu];
+ if (centaur == NULL)
+ return 0;
+
+ /* Return current frequency */
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ return centaur->fsb * ((lo >> 8) & 0xff);
+}
+
+static int eps_set_state(struct eps_cpu_data *centaur,
+ struct cpufreq_policy *policy,
+ u32 dest_state)
+{
+ u32 lo, hi;
+ int i;
+
+ /* Wait while CPU is busy */
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ i = 0;
+ while (lo & ((1 << 16) | (1 << 17))) {
+ udelay(16);
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ i++;
+ if (unlikely(i > 64)) {
+ return -ENODEV;
+ }
+ }
+ /* Set new multiplier and voltage */
+ wrmsr(MSR_IA32_PERF_CTL, dest_state & 0xffff, 0);
+ /* Wait until transition end */
+ i = 0;
+ do {
+ udelay(16);
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ i++;
+ if (unlikely(i > 64)) {
+ return -ENODEV;
+ }
+ } while (lo & ((1 << 16) | (1 << 17)));
+
+#ifdef DEBUG
+ {
+ u8 current_multiplier, current_voltage;
+
+ /* Print voltage and multiplier */
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ current_voltage = lo & 0xff;
+ pr_info("Current voltage = %dmV\n", current_voltage * 16 + 700);
+ current_multiplier = (lo >> 8) & 0xff;
+ pr_info("Current multiplier = %d\n", current_multiplier);
+ }
+#endif
+ return 0;
+}
+
+static int eps_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct eps_cpu_data *centaur;
+ unsigned int cpu = policy->cpu;
+ unsigned int dest_state;
+ int ret;
+
+ if (unlikely(eps_cpu[cpu] == NULL))
+ return -ENODEV;
+ centaur = eps_cpu[cpu];
+
+ /* Make frequency transition */
+ dest_state = centaur->freq_table[index].driver_data & 0xffff;
+ ret = eps_set_state(centaur, policy, dest_state);
+ if (ret)
+ pr_err("Timeout!\n");
+ return ret;
+}
+
+static int eps_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int i;
+ u32 lo, hi;
+ u64 val;
+ u8 current_multiplier, current_voltage;
+ u8 max_multiplier, max_voltage;
+ u8 min_multiplier, min_voltage;
+ u8 brand = 0;
+ u32 fsb;
+ struct eps_cpu_data *centaur;
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ struct cpufreq_frequency_table *f_table;
+ int k, step, voltage;
+ int states;
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
+ unsigned int limit;
+#endif
+
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+ /* Check brand */
+ pr_info("Detected VIA ");
+
+ switch (c->x86_model) {
+ case 10:
+ rdmsr(0x1153, lo, hi);
+ brand = (((lo >> 2) ^ lo) >> 18) & 3;
+ pr_cont("Model A ");
+ break;
+ case 13:
+ rdmsr(0x1154, lo, hi);
+ brand = (((lo >> 4) ^ (lo >> 2))) & 0x000000ff;
+ pr_cont("Model D ");
+ break;
+ }
+
+ switch (brand) {
+ case EPS_BRAND_C7M:
+ pr_cont("C7-M\n");
+ break;
+ case EPS_BRAND_C7:
+ pr_cont("C7\n");
+ break;
+ case EPS_BRAND_EDEN:
+ pr_cont("Eden\n");
+ break;
+ case EPS_BRAND_C7D:
+ pr_cont("C7-D\n");
+ break;
+ case EPS_BRAND_C3:
+ pr_cont("C3\n");
+ return -ENODEV;
+ }
+ /* Enable Enhanced PowerSaver */
+ rdmsrl(MSR_IA32_MISC_ENABLE, val);
+ if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
+ val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
+ wrmsrl(MSR_IA32_MISC_ENABLE, val);
+ /* Can be locked at 0 */
+ rdmsrl(MSR_IA32_MISC_ENABLE, val);
+ if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
+ pr_info("Can't enable Enhanced PowerSaver\n");
+ return -ENODEV;
+ }
+ }
+
+ /* Print voltage and multiplier */
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ current_voltage = lo & 0xff;
+ pr_info("Current voltage = %dmV\n", current_voltage * 16 + 700);
+ current_multiplier = (lo >> 8) & 0xff;
+ pr_info("Current multiplier = %d\n", current_multiplier);
+
+ /* Print limits */
+ max_voltage = hi & 0xff;
+ pr_info("Highest voltage = %dmV\n", max_voltage * 16 + 700);
+ max_multiplier = (hi >> 8) & 0xff;
+ pr_info("Highest multiplier = %d\n", max_multiplier);
+ min_voltage = (hi >> 16) & 0xff;
+ pr_info("Lowest voltage = %dmV\n", min_voltage * 16 + 700);
+ min_multiplier = (hi >> 24) & 0xff;
+ pr_info("Lowest multiplier = %d\n", min_multiplier);
+
+ /* Sanity checks */
+ if (current_multiplier == 0 || max_multiplier == 0
+ || min_multiplier == 0)
+ return -EINVAL;
+ if (current_multiplier > max_multiplier
+ || max_multiplier <= min_multiplier)
+ return -EINVAL;
+ if (current_voltage > 0x1f || max_voltage > 0x1f)
+ return -EINVAL;
+ if (max_voltage < min_voltage
+ || current_voltage < min_voltage
+ || current_voltage > max_voltage)
+ return -EINVAL;
+
+ /* Check for systems using underclocked CPU */
+ if (!freq_failsafe_off && max_multiplier != current_multiplier) {
+ pr_info("Your processor is running at different frequency then its maximum. Aborting.\n");
+ pr_info("You can use freq_failsafe_off option to disable this check.\n");
+ return -EINVAL;
+ }
+ if (!voltage_failsafe_off && max_voltage != current_voltage) {
+ pr_info("Your processor is running at different voltage then its maximum. Aborting.\n");
+ pr_info("You can use voltage_failsafe_off option to disable this check.\n");
+ return -EINVAL;
+ }
+
+ /* Calc FSB speed */
+ fsb = cpu_khz / current_multiplier;
+
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
+ /* Check for ACPI processor speed limit */
+ if (!ignore_acpi_limit && !eps_acpi_init()) {
+ if (!acpi_processor_get_bios_limit(policy->cpu, &limit)) {
+ pr_info("ACPI limit %u.%uGHz\n",
+ limit/1000000,
+ (limit%1000000)/10000);
+ eps_acpi_exit(policy);
+ /* Check if max_multiplier is in BIOS limits */
+ if (limit && max_multiplier * fsb > limit) {
+ pr_info("Aborting\n");
+ return -EINVAL;
+ }
+ }
+ }
+#endif
+
+ /* Allow user to set lower maximum voltage then that reported
+ * by processor */
+ if (brand == EPS_BRAND_C7M && set_max_voltage) {
+ u32 v;
+
+ /* Change mV to something hardware can use */
+ v = (set_max_voltage - 700) / 16;
+ /* Check if voltage is within limits */
+ if (v >= min_voltage && v <= max_voltage) {
+ pr_info("Setting %dmV as maximum\n", v * 16 + 700);
+ max_voltage = v;
+ }
+ }
+
+ /* Calc number of p-states supported */
+ if (brand == EPS_BRAND_C7M)
+ states = max_multiplier - min_multiplier + 1;
+ else
+ states = 2;
+
+ /* Allocate private data and frequency table for current cpu */
+ centaur = kzalloc(struct_size(centaur, freq_table, states + 1),
+ GFP_KERNEL);
+ if (!centaur)
+ return -ENOMEM;
+ eps_cpu[0] = centaur;
+
+ /* Copy basic values */
+ centaur->fsb = fsb;
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
+ centaur->bios_limit = limit;
+#endif
+
+ /* Fill frequency and MSR value table */
+ f_table = &centaur->freq_table[0];
+ if (brand != EPS_BRAND_C7M) {
+ f_table[0].frequency = fsb * min_multiplier;
+ f_table[0].driver_data = (min_multiplier << 8) | min_voltage;
+ f_table[1].frequency = fsb * max_multiplier;
+ f_table[1].driver_data = (max_multiplier << 8) | max_voltage;
+ f_table[2].frequency = CPUFREQ_TABLE_END;
+ } else {
+ k = 0;
+ step = ((max_voltage - min_voltage) * 256)
+ / (max_multiplier - min_multiplier);
+ for (i = min_multiplier; i <= max_multiplier; i++) {
+ voltage = (k * step) / 256 + min_voltage;
+ f_table[k].frequency = fsb * i;
+ f_table[k].driver_data = (i << 8) | voltage;
+ k++;
+ }
+ f_table[k].frequency = CPUFREQ_TABLE_END;
+ }
+
+ policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
+ policy->freq_table = &centaur->freq_table[0];
+
+ return 0;
+}
+
+static int eps_cpu_exit(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+
+ /* Bye */
+ kfree(eps_cpu[cpu]);
+ eps_cpu[cpu] = NULL;
+ return 0;
+}
+
+static struct cpufreq_driver eps_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = eps_target,
+ .init = eps_cpu_init,
+ .exit = eps_cpu_exit,
+ .get = eps_get,
+ .name = "e_powersaver",
+ .attr = cpufreq_generic_attr,
+};
+
+
+/* This driver will work only on Centaur C7 processors with
+ * Enhanced SpeedStep/PowerSaver registers */
+static const struct x86_cpu_id eps_cpu_id[] = {
+ X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 6, X86_FEATURE_EST, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, eps_cpu_id);
+
+static int __init eps_init(void)
+{
+ if (!x86_match_cpu(eps_cpu_id) || boot_cpu_data.x86_model < 10)
+ return -ENODEV;
+ if (cpufreq_register_driver(&eps_driver))
+ return -EINVAL;
+ return 0;
+}
+
+static void __exit eps_exit(void)
+{
+ cpufreq_unregister_driver(&eps_driver);
+}
+
+/* Allow user to overclock his machine or to change frequency to higher after
+ * unloading module */
+module_param(freq_failsafe_off, int, 0644);
+MODULE_PARM_DESC(freq_failsafe_off, "Disable current vs max frequency check");
+module_param(voltage_failsafe_off, int, 0644);
+MODULE_PARM_DESC(voltage_failsafe_off, "Disable current vs max voltage check");
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
+module_param(ignore_acpi_limit, int, 0644);
+MODULE_PARM_DESC(ignore_acpi_limit, "Don't check ACPI's processor speed limit");
+#endif
+module_param(set_max_voltage, int, 0644);
+MODULE_PARM_DESC(set_max_voltage, "Set maximum CPU voltage (mV) C7-M only");
+
+MODULE_AUTHOR("Rafal Bilski <rafalbilski@interia.pl>");
+MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's.");
+MODULE_LICENSE("GPL");
+
+module_init(eps_init);
+module_exit(eps_exit);
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
new file mode 100644
index 000000000..4ce5eb35d
--- /dev/null
+++ b/drivers/cpufreq/elanfreq.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * elanfreq: cpufreq driver for the AMD ELAN family
+ *
+ * (c) Copyright 2002 Robert Schwebel <r.schwebel@pengutronix.de>
+ *
+ * Parts of this code are (c) Sven Geggus <sven@geggus.net>
+ *
+ * All Rights Reserved.
+ *
+ * 2002-02-13: - initial revision for 2.4.18-pre9 by Robert Schwebel
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/delay.h>
+#include <linux/cpufreq.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
+#include <linux/timex.h>
+#include <linux/io.h>
+
+#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */
+#define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */
+
+/* Module parameter */
+static int max_freq;
+
+struct s_elan_multiplier {
+ int clock; /* frequency in kHz */
+ int val40h; /* PMU Force Mode register */
+ int val80h; /* CPU Clock Speed Register */
+};
+
+/*
+ * It is important that the frequencies
+ * are listed in ascending order here!
+ */
+static struct s_elan_multiplier elan_multiplier[] = {
+ {1000, 0x02, 0x18},
+ {2000, 0x02, 0x10},
+ {4000, 0x02, 0x08},
+ {8000, 0x00, 0x00},
+ {16000, 0x00, 0x02},
+ {33000, 0x00, 0x04},
+ {66000, 0x01, 0x04},
+ {99000, 0x01, 0x05}
+};
+
+static struct cpufreq_frequency_table elanfreq_table[] = {
+ {0, 0, 1000},
+ {0, 1, 2000},
+ {0, 2, 4000},
+ {0, 3, 8000},
+ {0, 4, 16000},
+ {0, 5, 33000},
+ {0, 6, 66000},
+ {0, 7, 99000},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+
+/**
+ * elanfreq_get_cpu_frequency: determine current cpu speed
+ *
+ * Finds out at which frequency the CPU of the Elan SOC runs
+ * at the moment. Frequencies from 1 to 33 MHz are generated
+ * the normal way, 66 and 99 MHz are called "Hyperspeed Mode"
+ * and have the rest of the chip running with 33 MHz.
+ */
+
+static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
+{
+ u8 clockspeed_reg; /* Clock Speed Register */
+
+ local_irq_disable();
+ outb_p(0x80, REG_CSCIR);
+ clockspeed_reg = inb_p(REG_CSCDR);
+ local_irq_enable();
+
+ if ((clockspeed_reg & 0xE0) == 0xE0)
+ return 0;
+
+ /* Are we in CPU clock multiplied mode (66/99 MHz)? */
+ if ((clockspeed_reg & 0xE0) == 0xC0) {
+ if ((clockspeed_reg & 0x01) == 0)
+ return 66000;
+ else
+ return 99000;
+ }
+
+ /* 33 MHz is not 32 MHz... */
+ if ((clockspeed_reg & 0xE0) == 0xA0)
+ return 33000;
+
+ return (1<<((clockspeed_reg & 0xE0) >> 5)) * 1000;
+}
+
+
+static int elanfreq_target(struct cpufreq_policy *policy,
+ unsigned int state)
+{
+ /*
+ * Access to the Elan's internal registers is indexed via
+ * 0x22: Chip Setup & Control Register Index Register (CSCI)
+ * 0x23: Chip Setup & Control Register Data Register (CSCD)
+ *
+ */
+
+ /*
+ * 0x40 is the Power Management Unit's Force Mode Register.
+ * Bit 6 enables Hyperspeed Mode (66/100 MHz core frequency)
+ */
+
+ local_irq_disable();
+ outb_p(0x40, REG_CSCIR); /* Disable hyperspeed mode */
+ outb_p(0x00, REG_CSCDR);
+ local_irq_enable(); /* wait till internal pipelines and */
+ udelay(1000); /* buffers have cleaned up */
+
+ local_irq_disable();
+
+ /* now, set the CPU clock speed register (0x80) */
+ outb_p(0x80, REG_CSCIR);
+ outb_p(elan_multiplier[state].val80h, REG_CSCDR);
+
+ /* now, the hyperspeed bit in PMU Force Mode Register (0x40) */
+ outb_p(0x40, REG_CSCIR);
+ outb_p(elan_multiplier[state].val40h, REG_CSCDR);
+ udelay(10000);
+ local_irq_enable();
+
+ return 0;
+}
+/*
+ * Module init and exit code
+ */
+
+static int elanfreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ struct cpufreq_frequency_table *pos;
+
+ /* capability check */
+ if ((c->x86_vendor != X86_VENDOR_AMD) ||
+ (c->x86 != 4) || (c->x86_model != 10))
+ return -ENODEV;
+
+ /* max freq */
+ if (!max_freq)
+ max_freq = elanfreq_get_cpu_frequency(0);
+
+ /* table init */
+ cpufreq_for_each_entry(pos, elanfreq_table)
+ if (pos->frequency > max_freq)
+ pos->frequency = CPUFREQ_ENTRY_INVALID;
+
+ policy->freq_table = elanfreq_table;
+ return 0;
+}
+
+
+#ifndef MODULE
+/**
+ * elanfreq_setup - elanfreq command line parameter parsing
+ *
+ * elanfreq command line parameter. Use:
+ * elanfreq=66000
+ * to set the maximum CPU frequency to 66 MHz. Note that in
+ * case you do not give this boot parameter, the maximum
+ * frequency will fall back to _current_ CPU frequency which
+ * might be lower. If you build this as a module, use the
+ * max_freq module parameter instead.
+ */
+static int __init elanfreq_setup(char *str)
+{
+ max_freq = simple_strtoul(str, &str, 0);
+ pr_warn("You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
+ return 1;
+}
+__setup("elanfreq=", elanfreq_setup);
+#endif
+
+
+static struct cpufreq_driver elanfreq_driver = {
+ .get = elanfreq_get_cpu_frequency,
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = elanfreq_target,
+ .init = elanfreq_cpu_init,
+ .name = "elanfreq",
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct x86_cpu_id elan_id[] = {
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 4, 10, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, elan_id);
+
+static int __init elanfreq_init(void)
+{
+ if (!x86_match_cpu(elan_id))
+ return -ENODEV;
+ return cpufreq_register_driver(&elanfreq_driver);
+}
+
+
+static void __exit elanfreq_exit(void)
+{
+ cpufreq_unregister_driver(&elanfreq_driver);
+}
+
+
+module_param(max_freq, int, 0444);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, "
+ "Sven Geggus <sven@geggus.net>");
+MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs");
+
+module_init(elanfreq_init);
+module_exit(elanfreq_exit);
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
new file mode 100644
index 000000000..67e56cf63
--- /dev/null
+++ b/drivers/cpufreq/freq_table.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/drivers/cpufreq/freq_table.c
+ *
+ * Copyright (C) 2002 - 2003 Dominik Brodowski
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+
+/*********************************************************************
+ * FREQUENCY TABLE HELPERS *
+ *********************************************************************/
+
+bool policy_has_boost_freq(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
+
+ if (!table)
+ return false;
+
+ cpufreq_for_each_valid_entry(pos, table)
+ if (pos->flags & CPUFREQ_BOOST_FREQ)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(policy_has_boost_freq);
+
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table)
+{
+ struct cpufreq_frequency_table *pos;
+ unsigned int min_freq = ~0;
+ unsigned int max_freq = 0;
+ unsigned int freq;
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
+
+ if (!cpufreq_boost_enabled()
+ && (pos->flags & CPUFREQ_BOOST_FREQ))
+ continue;
+
+ pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq);
+ if (freq < min_freq)
+ min_freq = freq;
+ if (freq > max_freq)
+ max_freq = freq;
+ }
+
+ policy->min = policy->cpuinfo.min_freq = min_freq;
+ policy->max = max_freq;
+ /*
+ * If the driver has set its own cpuinfo.max_freq above max_freq, leave
+ * it as is.
+ */
+ if (policy->cpuinfo.max_freq < max_freq)
+ policy->max = policy->cpuinfo.max_freq = max_freq;
+
+ if (policy->min == ~0)
+ return -EINVAL;
+ else
+ return 0;
+}
+
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
+ struct cpufreq_frequency_table *table)
+{
+ struct cpufreq_frequency_table *pos;
+ unsigned int freq, next_larger = ~0;
+ bool found = false;
+
+ pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
+ policy->min, policy->max, policy->cpu);
+
+ cpufreq_verify_within_cpu_limits(policy);
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
+
+ if ((freq >= policy->min) && (freq <= policy->max)) {
+ found = true;
+ break;
+ }
+
+ if ((next_larger > freq) && (freq > policy->max))
+ next_larger = freq;
+ }
+
+ if (!found) {
+ policy->max = next_larger;
+ cpufreq_verify_within_cpu_limits(policy);
+ }
+
+ pr_debug("verification lead to (%u - %u kHz) for cpu %u\n",
+ policy->min, policy->max, policy->cpu);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
+
+/*
+ * Generic routine to verify policy & frequency table, requires driver to set
+ * policy->freq_table prior to it.
+ */
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
+{
+ if (!policy->freq_table)
+ return -ENODEV;
+
+ return cpufreq_frequency_table_verify(policy, policy->freq_table);
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
+
+int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_frequency_table optimal = {
+ .driver_data = ~0,
+ .frequency = 0,
+ };
+ struct cpufreq_frequency_table suboptimal = {
+ .driver_data = ~0,
+ .frequency = 0,
+ };
+ struct cpufreq_frequency_table *pos;
+ struct cpufreq_frequency_table *table = policy->freq_table;
+ unsigned int freq, diff, i = 0;
+ int index;
+
+ pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
+ target_freq, relation, policy->cpu);
+
+ switch (relation) {
+ case CPUFREQ_RELATION_H:
+ suboptimal.frequency = ~0;
+ break;
+ case CPUFREQ_RELATION_L:
+ case CPUFREQ_RELATION_C:
+ optimal.frequency = ~0;
+ break;
+ }
+
+ cpufreq_for_each_valid_entry_idx(pos, table, i) {
+ freq = pos->frequency;
+
+ if ((freq < policy->min) || (freq > policy->max))
+ continue;
+ if (freq == target_freq) {
+ optimal.driver_data = i;
+ break;
+ }
+ switch (relation) {
+ case CPUFREQ_RELATION_H:
+ if (freq < target_freq) {
+ if (freq >= optimal.frequency) {
+ optimal.frequency = freq;
+ optimal.driver_data = i;
+ }
+ } else {
+ if (freq <= suboptimal.frequency) {
+ suboptimal.frequency = freq;
+ suboptimal.driver_data = i;
+ }
+ }
+ break;
+ case CPUFREQ_RELATION_L:
+ if (freq > target_freq) {
+ if (freq <= optimal.frequency) {
+ optimal.frequency = freq;
+ optimal.driver_data = i;
+ }
+ } else {
+ if (freq >= suboptimal.frequency) {
+ suboptimal.frequency = freq;
+ suboptimal.driver_data = i;
+ }
+ }
+ break;
+ case CPUFREQ_RELATION_C:
+ diff = abs(freq - target_freq);
+ if (diff < optimal.frequency ||
+ (diff == optimal.frequency &&
+ freq > table[optimal.driver_data].frequency)) {
+ optimal.frequency = diff;
+ optimal.driver_data = i;
+ }
+ break;
+ }
+ }
+ if (optimal.driver_data > i) {
+ if (suboptimal.driver_data > i) {
+ WARN(1, "Invalid frequency table: %d\n", policy->cpu);
+ return 0;
+ }
+
+ index = suboptimal.driver_data;
+ } else
+ index = optimal.driver_data;
+
+ pr_debug("target index is %u, freq is:%u kHz\n", index,
+ table[index].frequency);
+ return index;
+}
+EXPORT_SYMBOL_GPL(cpufreq_table_index_unsorted);
+
+int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
+ unsigned int freq)
+{
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
+ int idx;
+
+ if (unlikely(!table)) {
+ pr_debug("%s: Unable to find frequency table\n", __func__);
+ return -ENOENT;
+ }
+
+ cpufreq_for_each_valid_entry_idx(pos, table, idx)
+ if (pos->frequency == freq)
+ return idx;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index);
+
+/*
+ * show_available_freqs - show available frequencies for the specified CPU
+ */
+static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
+ bool show_boost)
+{
+ ssize_t count = 0;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
+
+ if (!table)
+ return -ENODEV;
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ /*
+ * show_boost = true and driver_data = BOOST freq
+ * display BOOST freqs
+ *
+ * show_boost = false and driver_data = BOOST freq
+ * show_boost = true and driver_data != BOOST freq
+ * continue - do not display anything
+ *
+ * show_boost = false and driver_data != BOOST freq
+ * display NON BOOST freqs
+ */
+ if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ))
+ continue;
+
+ count += sprintf(&buf[count], "%d ", pos->frequency);
+ }
+ count += sprintf(&buf[count], "\n");
+
+ return count;
+
+}
+
+#define cpufreq_attr_available_freq(_name) \
+struct freq_attr cpufreq_freq_attr_##_name##_freqs = \
+__ATTR_RO(_name##_frequencies)
+
+/*
+ * scaling_available_frequencies_show - show available normal frequencies for
+ * the specified CPU
+ */
+static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
+ char *buf)
+{
+ return show_available_freqs(policy, buf, false);
+}
+cpufreq_attr_available_freq(scaling_available);
+EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
+
+/*
+ * scaling_boost_frequencies_show - show available boost frequencies for
+ * the specified CPU
+ */
+static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
+ char *buf)
+{
+ return show_available_freqs(policy, buf, true);
+}
+cpufreq_attr_available_freq(scaling_boost);
+EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
+
+struct freq_attr *cpufreq_generic_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
+
+static int set_freq_table_sorted(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
+ struct cpufreq_frequency_table *prev = NULL;
+ int ascending = 0;
+
+ policy->freq_table_sorted = CPUFREQ_TABLE_UNSORTED;
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ if (!prev) {
+ prev = pos;
+ continue;
+ }
+
+ if (pos->frequency == prev->frequency) {
+ pr_warn("Duplicate freq-table entries: %u\n",
+ pos->frequency);
+ return -EINVAL;
+ }
+
+ /* Frequency increased from prev to pos */
+ if (pos->frequency > prev->frequency) {
+ /* But frequency was decreasing earlier */
+ if (ascending < 0) {
+ pr_debug("Freq table is unsorted\n");
+ return 0;
+ }
+
+ ascending++;
+ } else {
+ /* Frequency decreased from prev to pos */
+
+ /* But frequency was increasing earlier */
+ if (ascending > 0) {
+ pr_debug("Freq table is unsorted\n");
+ return 0;
+ }
+
+ ascending--;
+ }
+
+ prev = pos;
+ }
+
+ if (ascending > 0)
+ policy->freq_table_sorted = CPUFREQ_TABLE_SORTED_ASCENDING;
+ else
+ policy->freq_table_sorted = CPUFREQ_TABLE_SORTED_DESCENDING;
+
+ pr_debug("Freq table is sorted in %s order\n",
+ ascending > 0 ? "ascending" : "descending");
+
+ return 0;
+}
+
+int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ if (!policy->freq_table)
+ return 0;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ if (ret)
+ return ret;
+
+ return set_freq_table_sorted(policy);
+}
+
+MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
+MODULE_DESCRIPTION("CPUfreq frequency table helpers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
new file mode 100644
index 000000000..75b3ef7ec
--- /dev/null
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -0,0 +1,498 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Cyrix MediaGX and NatSemi Geode Suspend Modulation
+ * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com>
+ * (C) 2002 Hiroshi Miura <miura@da-cha.org>
+ * All Rights Reserved
+ *
+ * The author(s) of this software shall not be held liable for damages
+ * of any nature resulting due to the use of this software. This
+ * software is provided AS-IS with no warranties.
+ *
+ * Theoretical note:
+ *
+ * (see Geode(tm) CS5530 manual (rev.4.1) page.56)
+ *
+ * CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0
+ * are based on Suspend Modulation.
+ *
+ * Suspend Modulation works by asserting and de-asserting the SUSP# pin
+ * to CPU(GX1/GXLV) for configurable durations. When asserting SUSP#
+ * the CPU enters an idle state. GX1 stops its core clock when SUSP# is
+ * asserted then power consumption is reduced.
+ *
+ * Suspend Modulation's OFF/ON duration are configurable
+ * with 'Suspend Modulation OFF Count Register'
+ * and 'Suspend Modulation ON Count Register'.
+ * These registers are 8bit counters that represent the number of
+ * 32us intervals which the SUSP# pin is asserted(ON)/de-asserted(OFF)
+ * to the processor.
+ *
+ * These counters define a ratio which is the effective frequency
+ * of operation of the system.
+ *
+ * OFF Count
+ * F_eff = Fgx * ----------------------
+ * OFF Count + ON Count
+ *
+ * 0 <= On Count, Off Count <= 255
+ *
+ * From these limits, we can get register values
+ *
+ * off_duration + on_duration <= MAX_DURATION
+ * on_duration = off_duration * (stock_freq - freq) / freq
+ *
+ * off_duration = (freq * DURATION) / stock_freq
+ * on_duration = DURATION - off_duration
+ *
+ *---------------------------------------------------------------------------
+ *
+ * ChangeLog:
+ * Dec. 12, 2003 Hiroshi Miura <miura@da-cha.org>
+ * - fix on/off register mistake
+ * - fix cpu_khz calc when it stops cpu modulation.
+ *
+ * Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org>
+ * - rewrite for Cyrix MediaGX Cx5510/5520 and
+ * NatSemi Geode Cs5530(A).
+ *
+ * Jul. ??, 2002 Zwane Mwaikambo <zwane@commfireservices.com>
+ * - cs5530_mod patch for 2.4.19-rc1.
+ *
+ *---------------------------------------------------------------------------
+ *
+ * Todo
+ * Test on machines with 5510, 5530, 5530A
+ */
+
+/************************************************************************
+ * Suspend Modulation - Definitions *
+ ************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/cpufreq.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/processor-cyrix.h>
+
+/* PCI config registers, all at F0 */
+#define PCI_PMER1 0x80 /* power management enable register 1 */
+#define PCI_PMER2 0x81 /* power management enable register 2 */
+#define PCI_PMER3 0x82 /* power management enable register 3 */
+#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */
+#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */
+#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */
+#define PCI_MODON 0x95 /* suspend modulation ON counter register */
+#define PCI_SUSCFG 0x96 /* suspend configuration register */
+
+/* PMER1 bits */
+#define GPM (1<<0) /* global power management */
+#define GIT (1<<1) /* globally enable PM device idle timers */
+#define GTR (1<<2) /* globally enable IO traps */
+#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */
+#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */
+
+/* SUSCFG bits */
+#define SUSMOD (1<<0) /* enable/disable suspend modulation */
+/* the below is supported only with cs5530 (after rev.1.2)/cs5530A */
+#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */
+ /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */
+#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */
+/* the below is supported only with cs5530A */
+#define PWRSVE_ISA (1<<3) /* stop ISA clock */
+#define PWRSVE (1<<4) /* active idle */
+
+struct gxfreq_params {
+ u8 on_duration;
+ u8 off_duration;
+ u8 pci_suscfg;
+ u8 pci_pmer1;
+ u8 pci_pmer2;
+ struct pci_dev *cs55x0;
+};
+
+static struct gxfreq_params *gx_params;
+static int stock_freq;
+
+/* PCI bus clock - defaults to 30.000 if cpu_khz is not available */
+static int pci_busclk;
+module_param(pci_busclk, int, 0444);
+
+/* maximum duration for which the cpu may be suspended
+ * (32us * MAX_DURATION). If no parameter is given, this defaults
+ * to 255.
+ * Note that this leads to a maximum of 8 ms(!) where the CPU clock
+ * is suspended -- processing power is just 0.39% of what it used to be,
+ * though. 781.25 kHz(!) for a 200 MHz processor -- wow. */
+static int max_duration = 255;
+module_param(max_duration, int, 0444);
+
+/* For the default policy, we want at least some processing power
+ * - let's say 5%. (min = maxfreq / POLICY_MIN_DIV)
+ */
+#define POLICY_MIN_DIV 20
+
+
+/**
+ * we can detect a core multiplier from dir0_lsb
+ * from GX1 datasheet p.56,
+ * MULT[3:0]:
+ * 0000 = SYSCLK multiplied by 4 (test only)
+ * 0001 = SYSCLK multiplied by 10
+ * 0010 = SYSCLK multiplied by 4
+ * 0011 = SYSCLK multiplied by 6
+ * 0100 = SYSCLK multiplied by 9
+ * 0101 = SYSCLK multiplied by 5
+ * 0110 = SYSCLK multiplied by 7
+ * 0111 = SYSCLK multiplied by 8
+ * of 33.3MHz
+ **/
+static int gx_freq_mult[16] = {
+ 4, 10, 4, 6, 9, 5, 7, 8,
+ 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+
+/****************************************************************
+ * Low Level chipset interface *
+ ****************************************************************/
+static struct pci_device_id gx_chipset_tbl[] __initdata = {
+ { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY), },
+ { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), },
+ { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, gx_chipset_tbl);
+
+static void gx_write_byte(int reg, int value)
+{
+ pci_write_config_byte(gx_params->cs55x0, reg, value);
+}
+
+/**
+ * gx_detect_chipset:
+ *
+ **/
+static struct pci_dev * __init gx_detect_chipset(void)
+{
+ struct pci_dev *gx_pci = NULL;
+
+ /* detect which companion chip is used */
+ for_each_pci_dev(gx_pci) {
+ if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL)
+ return gx_pci;
+ }
+
+ pr_debug("error: no supported chipset found!\n");
+ return NULL;
+}
+
+/**
+ * gx_get_cpuspeed:
+ *
+ * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi
+ * Geode CPU runs.
+ */
+static unsigned int gx_get_cpuspeed(unsigned int cpu)
+{
+ if ((gx_params->pci_suscfg & SUSMOD) == 0)
+ return stock_freq;
+
+ return (stock_freq * gx_params->off_duration)
+ / (gx_params->on_duration + gx_params->off_duration);
+}
+
+/**
+ * gx_validate_speed:
+ * determine current cpu speed
+ *
+ **/
+
+static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration,
+ u8 *off_duration)
+{
+ unsigned int i;
+ u8 tmp_on, tmp_off;
+ int old_tmp_freq = stock_freq;
+ int tmp_freq;
+
+ *off_duration = 1;
+ *on_duration = 0;
+
+ for (i = max_duration; i > 0; i--) {
+ tmp_off = ((khz * i) / stock_freq) & 0xff;
+ tmp_on = i - tmp_off;
+ tmp_freq = (stock_freq * tmp_off) / i;
+ /* if this relation is closer to khz, use this. If it's equal,
+ * prefer it, too - lower latency */
+ if (abs(tmp_freq - khz) <= abs(old_tmp_freq - khz)) {
+ *on_duration = tmp_on;
+ *off_duration = tmp_off;
+ old_tmp_freq = tmp_freq;
+ }
+ }
+
+ return old_tmp_freq;
+}
+
+
+/**
+ * gx_set_cpuspeed:
+ * set cpu speed in khz.
+ **/
+
+static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
+{
+ u8 suscfg, pmer1;
+ unsigned int new_khz;
+ unsigned long flags;
+ struct cpufreq_freqs freqs;
+
+ freqs.old = gx_get_cpuspeed(0);
+
+ new_khz = gx_validate_speed(khz, &gx_params->on_duration,
+ &gx_params->off_duration);
+
+ freqs.new = new_khz;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ local_irq_save(flags);
+
+ if (new_khz != stock_freq) {
+ /* if new khz == 100% of CPU speed, it is special case */
+ switch (gx_params->cs55x0->device) {
+ case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
+ pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP;
+ /* FIXME: need to test other values -- Zwane,Miura */
+ /* typical 2 to 4ms */
+ gx_write_byte(PCI_IRQTC, 4);
+ /* typical 50 to 100ms */
+ gx_write_byte(PCI_VIDTC, 100);
+ gx_write_byte(PCI_PMER1, pmer1);
+
+ if (gx_params->cs55x0->revision < 0x10) {
+ /* CS5530(rev 1.2, 1.3) */
+ suscfg = gx_params->pci_suscfg|SUSMOD;
+ } else {
+ /* CS5530A,B.. */
+ suscfg = gx_params->pci_suscfg|SUSMOD|PWRSVE;
+ }
+ break;
+ case PCI_DEVICE_ID_CYRIX_5520:
+ case PCI_DEVICE_ID_CYRIX_5510:
+ suscfg = gx_params->pci_suscfg | SUSMOD;
+ break;
+ default:
+ local_irq_restore(flags);
+ pr_debug("fatal: try to set unknown chipset.\n");
+ return;
+ }
+ } else {
+ suscfg = gx_params->pci_suscfg & ~(SUSMOD);
+ gx_params->off_duration = 0;
+ gx_params->on_duration = 0;
+ pr_debug("suspend modulation disabled: cpu runs 100%% speed.\n");
+ }
+
+ gx_write_byte(PCI_MODOFF, gx_params->off_duration);
+ gx_write_byte(PCI_MODON, gx_params->on_duration);
+
+ gx_write_byte(PCI_SUSCFG, suscfg);
+ pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg);
+
+ local_irq_restore(flags);
+
+ gx_params->pci_suscfg = suscfg;
+
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+
+ pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
+ gx_params->on_duration * 32, gx_params->off_duration * 32);
+ pr_debug("suspend modulation w/ clock speed: %d kHz.\n", freqs.new);
+}
+
+/****************************************************************
+ * High level functions *
+ ****************************************************************/
+
+/*
+ * cpufreq_gx_verify: test if frequency range is valid
+ *
+ * This function checks if a given frequency range in kHz is valid
+ * for the hardware supported by the driver.
+ */
+
+static int cpufreq_gx_verify(struct cpufreq_policy_data *policy)
+{
+ unsigned int tmp_freq = 0;
+ u8 tmp1, tmp2;
+
+ if (!stock_freq || !policy)
+ return -EINVAL;
+
+ policy->cpu = 0;
+ cpufreq_verify_within_limits(policy, (stock_freq / max_duration),
+ stock_freq);
+
+ /* it needs to be assured that at least one supported frequency is
+ * within policy->min and policy->max. If it is not, policy->max
+ * needs to be increased until one frequency is supported.
+ * policy->min may not be decreased, though. This way we guarantee a
+ * specific processing capacity.
+ */
+ tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2);
+ if (tmp_freq < policy->min)
+ tmp_freq += stock_freq / max_duration;
+ policy->min = tmp_freq;
+ if (policy->min > policy->max)
+ policy->max = tmp_freq;
+ tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2);
+ if (tmp_freq > policy->max)
+ tmp_freq -= stock_freq / max_duration;
+ policy->max = tmp_freq;
+ if (policy->max < policy->min)
+ policy->max = policy->min;
+ cpufreq_verify_within_limits(policy, (stock_freq / max_duration),
+ stock_freq);
+
+ return 0;
+}
+
+/*
+ * cpufreq_gx_target:
+ *
+ */
+static int cpufreq_gx_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ u8 tmp1, tmp2;
+ unsigned int tmp_freq;
+
+ if (!stock_freq || !policy)
+ return -EINVAL;
+
+ policy->cpu = 0;
+
+ tmp_freq = gx_validate_speed(target_freq, &tmp1, &tmp2);
+ while (tmp_freq < policy->min) {
+ tmp_freq += stock_freq / max_duration;
+ tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2);
+ }
+ while (tmp_freq > policy->max) {
+ tmp_freq -= stock_freq / max_duration;
+ tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2);
+ }
+
+ gx_set_cpuspeed(policy, tmp_freq);
+
+ return 0;
+}
+
+static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int maxfreq;
+
+ if (!policy || policy->cpu != 0)
+ return -ENODEV;
+
+ /* determine maximum frequency */
+ if (pci_busclk)
+ maxfreq = pci_busclk * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
+ else if (cpu_khz)
+ maxfreq = cpu_khz;
+ else
+ maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
+
+ stock_freq = maxfreq;
+
+ pr_debug("cpu max frequency is %d.\n", maxfreq);
+
+ /* setup basic struct for cpufreq API */
+ policy->cpu = 0;
+
+ if (max_duration < POLICY_MIN_DIV)
+ policy->min = maxfreq / max_duration;
+ else
+ policy->min = maxfreq / POLICY_MIN_DIV;
+ policy->max = maxfreq;
+ policy->cpuinfo.min_freq = maxfreq / max_duration;
+ policy->cpuinfo.max_freq = maxfreq;
+
+ return 0;
+}
+
+/*
+ * cpufreq_gx_init:
+ * MediaGX/Geode GX initialize cpufreq driver
+ */
+static struct cpufreq_driver gx_suspmod_driver = {
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
+ .get = gx_get_cpuspeed,
+ .verify = cpufreq_gx_verify,
+ .target = cpufreq_gx_target,
+ .init = cpufreq_gx_cpu_init,
+ .name = "gx-suspmod",
+};
+
+static int __init cpufreq_gx_init(void)
+{
+ int ret;
+ struct gxfreq_params *params;
+ struct pci_dev *gx_pci;
+
+ /* Test if we have the right hardware */
+ gx_pci = gx_detect_chipset();
+ if (gx_pci == NULL)
+ return -ENODEV;
+
+ /* check whether module parameters are sane */
+ if (max_duration > 0xff)
+ max_duration = 0xff;
+
+ pr_debug("geode suspend modulation available.\n");
+
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (params == NULL)
+ return -ENOMEM;
+
+ params->cs55x0 = gx_pci;
+ gx_params = params;
+
+ /* keep cs55x0 configurations */
+ pci_read_config_byte(params->cs55x0, PCI_SUSCFG, &(params->pci_suscfg));
+ pci_read_config_byte(params->cs55x0, PCI_PMER1, &(params->pci_pmer1));
+ pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2));
+ pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration));
+ pci_read_config_byte(params->cs55x0, PCI_MODOFF,
+ &(params->off_duration));
+
+ ret = cpufreq_register_driver(&gx_suspmod_driver);
+ if (ret) {
+ kfree(params);
+ return ret; /* register error! */
+ }
+
+ return 0;
+}
+
+static void __exit cpufreq_gx_exit(void)
+{
+ cpufreq_unregister_driver(&gx_suspmod_driver);
+ pci_dev_put(gx_params->cs55x0);
+ kfree(gx_params);
+}
+
+MODULE_AUTHOR("Hiroshi Miura <miura@da-cha.org>");
+MODULE_DESCRIPTION("Cpufreq driver for Cyrix MediaGX and NatSemi Geode");
+MODULE_LICENSE("GPL");
+
+module_init(cpufreq_gx_init);
+module_exit(cpufreq_gx_exit);
+
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
new file mode 100644
index 000000000..a45864701
--- /dev/null
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Calxeda, Inc.
+ *
+ * This driver provides the clk notifier callbacks that are used when
+ * the cpufreq-dt driver changes to frequency to alert the highbank
+ * EnergyCore Management Engine (ECME) about the need to change
+ * voltage. The ECME interfaces with the actual voltage regulators.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/pl320-ipc.h>
+#include <linux/platform_device.h>
+
+#define HB_CPUFREQ_CHANGE_NOTE 0x80000001
+#define HB_CPUFREQ_IPC_LEN 7
+#define HB_CPUFREQ_VOLT_RETRIES 15
+
+static int hb_voltage_change(unsigned int freq)
+{
+ u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000};
+
+ return pl320_ipc_transmit(msg);
+}
+
+static int hb_cpufreq_clk_notify(struct notifier_block *nb,
+ unsigned long action, void *hclk)
+{
+ struct clk_notifier_data *clk_data = hclk;
+ int i = 0;
+
+ if (action == PRE_RATE_CHANGE) {
+ if (clk_data->new_rate > clk_data->old_rate)
+ while (hb_voltage_change(clk_data->new_rate))
+ if (i++ > HB_CPUFREQ_VOLT_RETRIES)
+ return NOTIFY_BAD;
+ } else if (action == POST_RATE_CHANGE) {
+ if (clk_data->new_rate < clk_data->old_rate)
+ while (hb_voltage_change(clk_data->new_rate))
+ if (i++ > HB_CPUFREQ_VOLT_RETRIES)
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block hb_cpufreq_clk_nb = {
+ .notifier_call = hb_cpufreq_clk_notify,
+};
+
+static int __init hb_cpufreq_driver_init(void)
+{
+ struct platform_device_info devinfo = { .name = "cpufreq-dt", };
+ struct device *cpu_dev;
+ struct clk *cpu_clk;
+ struct device_node *np;
+ int ret;
+
+ if ((!of_machine_is_compatible("calxeda,highbank")) &&
+ (!of_machine_is_compatible("calxeda,ecx-2000")))
+ return -ENODEV;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ pr_err("failed to get highbank cpufreq device\n");
+ return -ENODEV;
+ }
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np) {
+ pr_err("failed to find highbank cpufreq node\n");
+ return -ENOENT;
+ }
+
+ cpu_clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(cpu_clk)) {
+ ret = PTR_ERR(cpu_clk);
+ pr_err("failed to get cpu0 clock: %d\n", ret);
+ goto out_put_node;
+ }
+
+ ret = clk_notifier_register(cpu_clk, &hb_cpufreq_clk_nb);
+ if (ret) {
+ pr_err("failed to register clk notifier: %d\n", ret);
+ goto out_put_node;
+ }
+
+ /* Instantiate cpufreq-dt */
+ platform_device_register_full(&devinfo);
+
+out_put_node:
+ of_node_put(np);
+ return ret;
+}
+module_init(hb_cpufreq_driver_init);
+
+static const struct of_device_id __maybe_unused hb_cpufreq_of_match[] = {
+ { .compatible = "calxeda,highbank" },
+ { .compatible = "calxeda,ecx-2000" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, hb_cpufreq_of_match);
+
+MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
+MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
new file mode 100644
index 000000000..c6bdc4555
--- /dev/null
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This file provides the ACPI based P-state support. This
+ * module works with generic cpufreq infrastructure. Most of
+ * the code is based on i386 version
+ * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c)
+ *
+ * Copyright (C) 2005 Intel Corp
+ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/proc_fs.h>
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <asm/pal.h>
+
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+
+MODULE_AUTHOR("Venkatesh Pallipadi");
+MODULE_DESCRIPTION("ACPI Processor P-States Driver");
+MODULE_LICENSE("GPL");
+
+struct cpufreq_acpi_io {
+ struct acpi_processor_performance acpi_data;
+ unsigned int resume;
+};
+
+struct cpufreq_acpi_req {
+ unsigned int cpu;
+ unsigned int state;
+};
+
+static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
+
+static struct cpufreq_driver acpi_cpufreq_driver;
+
+
+static int
+processor_set_pstate (
+ u32 value)
+{
+ s64 retval;
+
+ pr_debug("processor_set_pstate\n");
+
+ retval = ia64_pal_set_pstate((u64)value);
+
+ if (retval) {
+ pr_debug("Failed to set freq to 0x%x, with error 0x%llx\n",
+ value, retval);
+ return -ENODEV;
+ }
+ return (int)retval;
+}
+
+
+static int
+processor_get_pstate (
+ u32 *value)
+{
+ u64 pstate_index = 0;
+ s64 retval;
+
+ pr_debug("processor_get_pstate\n");
+
+ retval = ia64_pal_get_pstate(&pstate_index,
+ PAL_GET_PSTATE_TYPE_INSTANT);
+ *value = (u32) pstate_index;
+
+ if (retval)
+ pr_debug("Failed to get current freq with "
+ "error 0x%llx, idx 0x%x\n", retval, *value);
+
+ return (int)retval;
+}
+
+
+/* To be used only after data->acpi_data is initialized */
+static unsigned
+extract_clock (
+ struct cpufreq_acpi_io *data,
+ unsigned value)
+{
+ unsigned long i;
+
+ pr_debug("extract_clock\n");
+
+ for (i = 0; i < data->acpi_data.state_count; i++) {
+ if (value == data->acpi_data.states[i].status)
+ return data->acpi_data.states[i].core_frequency;
+ }
+ return data->acpi_data.states[i-1].core_frequency;
+}
+
+
+static long
+processor_get_freq (
+ void *arg)
+{
+ struct cpufreq_acpi_req *req = arg;
+ unsigned int cpu = req->cpu;
+ struct cpufreq_acpi_io *data = acpi_io_data[cpu];
+ u32 value;
+ int ret;
+
+ pr_debug("processor_get_freq\n");
+ if (smp_processor_id() != cpu)
+ return -EAGAIN;
+
+ /* processor_get_pstate gets the instantaneous frequency */
+ ret = processor_get_pstate(&value);
+ if (ret) {
+ pr_warn("get performance failed with error %d\n", ret);
+ return ret;
+ }
+ return 1000 * extract_clock(data, value);
+}
+
+
+static long
+processor_set_freq (
+ void *arg)
+{
+ struct cpufreq_acpi_req *req = arg;
+ unsigned int cpu = req->cpu;
+ struct cpufreq_acpi_io *data = acpi_io_data[cpu];
+ int ret, state = req->state;
+ u32 value;
+
+ pr_debug("processor_set_freq\n");
+ if (smp_processor_id() != cpu)
+ return -EAGAIN;
+
+ if (state == data->acpi_data.state) {
+ if (unlikely(data->resume)) {
+ pr_debug("Called after resume, resetting to P%d\n", state);
+ data->resume = 0;
+ } else {
+ pr_debug("Already at target state (P%d)\n", state);
+ return 0;
+ }
+ }
+
+ pr_debug("Transitioning from P%d to P%d\n",
+ data->acpi_data.state, state);
+
+ /*
+ * First we write the target state's 'control' value to the
+ * control_register.
+ */
+ value = (u32) data->acpi_data.states[state].control;
+
+ pr_debug("Transitioning to state: 0x%08x\n", value);
+
+ ret = processor_set_pstate(value);
+ if (ret) {
+ pr_warn("Transition failed with error %d\n", ret);
+ return -ENODEV;
+ }
+
+ data->acpi_data.state = state;
+ return 0;
+}
+
+
+static unsigned int
+acpi_cpufreq_get (
+ unsigned int cpu)
+{
+ struct cpufreq_acpi_req req;
+ long ret;
+
+ req.cpu = cpu;
+ ret = work_on_cpu(cpu, processor_get_freq, &req);
+
+ return ret > 0 ? (unsigned int) ret : 0;
+}
+
+
+static int
+acpi_cpufreq_target (
+ struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct cpufreq_acpi_req req;
+
+ req.cpu = policy->cpu;
+ req.state = index;
+
+ return work_on_cpu(req.cpu, processor_set_freq, &req);
+}
+
+static int
+acpi_cpufreq_cpu_init (
+ struct cpufreq_policy *policy)
+{
+ unsigned int i;
+ unsigned int cpu = policy->cpu;
+ struct cpufreq_acpi_io *data;
+ unsigned int result = 0;
+ struct cpufreq_frequency_table *freq_table;
+
+ pr_debug("acpi_cpufreq_cpu_init\n");
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return (-ENOMEM);
+
+ acpi_io_data[cpu] = data;
+
+ result = acpi_processor_register_performance(&data->acpi_data, cpu);
+
+ if (result)
+ goto err_free;
+
+ /* capability check */
+ if (data->acpi_data.state_count <= 1) {
+ pr_debug("No P-States\n");
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ if ((data->acpi_data.control_register.space_id !=
+ ACPI_ADR_SPACE_FIXED_HARDWARE) ||
+ (data->acpi_data.status_register.space_id !=
+ ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ pr_debug("Unsupported address space [%d, %d]\n",
+ (u32) (data->acpi_data.control_register.space_id),
+ (u32) (data->acpi_data.status_register.space_id));
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ /* alloc freq_table */
+ freq_table = kcalloc(data->acpi_data.state_count + 1,
+ sizeof(*freq_table),
+ GFP_KERNEL);
+ if (!freq_table) {
+ result = -ENOMEM;
+ goto err_unreg;
+ }
+
+ /* detect transition latency */
+ policy->cpuinfo.transition_latency = 0;
+ for (i=0; i<data->acpi_data.state_count; i++) {
+ if ((data->acpi_data.states[i].transition_latency * 1000) >
+ policy->cpuinfo.transition_latency) {
+ policy->cpuinfo.transition_latency =
+ data->acpi_data.states[i].transition_latency * 1000;
+ }
+ }
+
+ /* table init */
+ for (i = 0; i <= data->acpi_data.state_count; i++)
+ {
+ if (i < data->acpi_data.state_count) {
+ freq_table[i].frequency =
+ data->acpi_data.states[i].core_frequency * 1000;
+ } else {
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+ }
+ }
+
+ policy->freq_table = freq_table;
+
+ /* notify BIOS that we exist */
+ acpi_processor_notify_smm(THIS_MODULE);
+
+ pr_info("CPU%u - ACPI performance management activated\n", cpu);
+
+ for (i = 0; i < data->acpi_data.state_count; i++)
+ pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
+ (i == data->acpi_data.state?'*':' '), i,
+ (u32) data->acpi_data.states[i].core_frequency,
+ (u32) data->acpi_data.states[i].power,
+ (u32) data->acpi_data.states[i].transition_latency,
+ (u32) data->acpi_data.states[i].bus_master_latency,
+ (u32) data->acpi_data.states[i].status,
+ (u32) data->acpi_data.states[i].control);
+
+ /* the first call to ->target() should result in us actually
+ * writing something to the appropriate registers. */
+ data->resume = 1;
+
+ return (result);
+
+ err_unreg:
+ acpi_processor_unregister_performance(cpu);
+ err_free:
+ kfree(data);
+ acpi_io_data[cpu] = NULL;
+
+ return (result);
+}
+
+
+static int
+acpi_cpufreq_cpu_exit (
+ struct cpufreq_policy *policy)
+{
+ struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
+
+ pr_debug("acpi_cpufreq_cpu_exit\n");
+
+ if (data) {
+ acpi_io_data[policy->cpu] = NULL;
+ acpi_processor_unregister_performance(policy->cpu);
+ kfree(policy->freq_table);
+ kfree(data);
+ }
+
+ return (0);
+}
+
+
+static struct cpufreq_driver acpi_cpufreq_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = acpi_cpufreq_target,
+ .get = acpi_cpufreq_get,
+ .init = acpi_cpufreq_cpu_init,
+ .exit = acpi_cpufreq_cpu_exit,
+ .name = "acpi-cpufreq",
+ .attr = cpufreq_generic_attr,
+};
+
+
+static int __init
+acpi_cpufreq_init (void)
+{
+ pr_debug("acpi_cpufreq_init\n");
+
+ return cpufreq_register_driver(&acpi_cpufreq_driver);
+}
+
+
+static void __exit
+acpi_cpufreq_exit (void)
+{
+ pr_debug("acpi_cpufreq_exit\n");
+
+ cpufreq_unregister_driver(&acpi_cpufreq_driver);
+}
+
+late_initcall(acpi_cpufreq_init);
+module_exit(acpi_cpufreq_exit);
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
new file mode 100644
index 000000000..535867a7d
--- /dev/null
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include "cpufreq-dt.h"
+
+#define OCOTP_CFG3_SPEED_GRADE_SHIFT 8
+#define OCOTP_CFG3_SPEED_GRADE_MASK (0x3 << 8)
+#define IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK (0xf << 8)
+#define OCOTP_CFG3_MKT_SEGMENT_SHIFT 6
+#define OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 6)
+#define IMX8MP_OCOTP_CFG3_MKT_SEGMENT_SHIFT 5
+#define IMX8MP_OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 5)
+
+#define IMX7ULP_MAX_RUN_FREQ 528000
+
+/* cpufreq-dt device registered by imx-cpufreq-dt */
+static struct platform_device *cpufreq_dt_pdev;
+static struct device *cpu_dev;
+static int cpufreq_opp_token;
+
+enum IMX7ULP_CPUFREQ_CLKS {
+ ARM,
+ CORE,
+ SCS_SEL,
+ HSRUN_CORE,
+ HSRUN_SCS_SEL,
+ FIRC,
+};
+
+static struct clk_bulk_data imx7ulp_clks[] = {
+ { .id = "arm" },
+ { .id = "core" },
+ { .id = "scs_sel" },
+ { .id = "hsrun_core" },
+ { .id = "hsrun_scs_sel" },
+ { .id = "firc" },
+};
+
+static unsigned int imx7ulp_get_intermediate(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ return clk_get_rate(imx7ulp_clks[FIRC].clk);
+}
+
+static int imx7ulp_target_intermediate(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ unsigned int newfreq = policy->freq_table[index].frequency;
+
+ clk_set_parent(imx7ulp_clks[SCS_SEL].clk, imx7ulp_clks[FIRC].clk);
+ clk_set_parent(imx7ulp_clks[HSRUN_SCS_SEL].clk, imx7ulp_clks[FIRC].clk);
+
+ if (newfreq > IMX7ULP_MAX_RUN_FREQ)
+ clk_set_parent(imx7ulp_clks[ARM].clk,
+ imx7ulp_clks[HSRUN_CORE].clk);
+ else
+ clk_set_parent(imx7ulp_clks[ARM].clk, imx7ulp_clks[CORE].clk);
+
+ return 0;
+}
+
+static struct cpufreq_dt_platform_data imx7ulp_data = {
+ .target_intermediate = imx7ulp_target_intermediate,
+ .get_intermediate = imx7ulp_get_intermediate,
+};
+
+static int imx_cpufreq_dt_probe(struct platform_device *pdev)
+{
+ struct platform_device *dt_pdev;
+ u32 cell_value, supported_hw[2];
+ int speed_grade, mkt_segment;
+ int ret;
+
+ cpu_dev = get_cpu_device(0);
+
+ if (!of_property_present(cpu_dev->of_node, "cpu-supply"))
+ return -ENODEV;
+
+ if (of_machine_is_compatible("fsl,imx7ulp")) {
+ ret = clk_bulk_get(cpu_dev, ARRAY_SIZE(imx7ulp_clks),
+ imx7ulp_clks);
+ if (ret)
+ return ret;
+
+ dt_pdev = platform_device_register_data(NULL, "cpufreq-dt",
+ -1, &imx7ulp_data,
+ sizeof(imx7ulp_data));
+ if (IS_ERR(dt_pdev)) {
+ clk_bulk_put(ARRAY_SIZE(imx7ulp_clks), imx7ulp_clks);
+ ret = PTR_ERR(dt_pdev);
+ dev_err(&pdev->dev, "Failed to register cpufreq-dt: %d\n", ret);
+ return ret;
+ }
+
+ cpufreq_dt_pdev = dt_pdev;
+
+ return 0;
+ }
+
+ ret = nvmem_cell_read_u32(cpu_dev, "speed_grade", &cell_value);
+ if (ret)
+ return ret;
+
+ if (of_machine_is_compatible("fsl,imx8mn") ||
+ of_machine_is_compatible("fsl,imx8mp"))
+ speed_grade = (cell_value & IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK)
+ >> OCOTP_CFG3_SPEED_GRADE_SHIFT;
+ else
+ speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK)
+ >> OCOTP_CFG3_SPEED_GRADE_SHIFT;
+
+ if (of_machine_is_compatible("fsl,imx8mp"))
+ mkt_segment = (cell_value & IMX8MP_OCOTP_CFG3_MKT_SEGMENT_MASK)
+ >> IMX8MP_OCOTP_CFG3_MKT_SEGMENT_SHIFT;
+ else
+ mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK)
+ >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
+
+ /*
+ * Early samples without fuses written report "0 0" which may NOT
+ * match any OPP defined in DT. So clamp to minimum OPP defined in
+ * DT to avoid warning for "no OPPs".
+ *
+ * Applies to i.MX8M series SoCs.
+ */
+ if (mkt_segment == 0 && speed_grade == 0) {
+ if (of_machine_is_compatible("fsl,imx8mm") ||
+ of_machine_is_compatible("fsl,imx8mq"))
+ speed_grade = 1;
+ if (of_machine_is_compatible("fsl,imx8mn") ||
+ of_machine_is_compatible("fsl,imx8mp"))
+ speed_grade = 0xb;
+ }
+
+ supported_hw[0] = BIT(speed_grade);
+ supported_hw[1] = BIT(mkt_segment);
+ dev_info(&pdev->dev, "cpu speed grade %d mkt segment %d supported-hw %#x %#x\n",
+ speed_grade, mkt_segment, supported_hw[0], supported_hw[1]);
+
+ cpufreq_opp_token = dev_pm_opp_set_supported_hw(cpu_dev, supported_hw, 2);
+ if (cpufreq_opp_token < 0) {
+ ret = cpufreq_opp_token;
+ dev_err(&pdev->dev, "Failed to set supported opp: %d\n", ret);
+ return ret;
+ }
+
+ cpufreq_dt_pdev = platform_device_register_data(
+ &pdev->dev, "cpufreq-dt", -1, NULL, 0);
+ if (IS_ERR(cpufreq_dt_pdev)) {
+ dev_pm_opp_put_supported_hw(cpufreq_opp_token);
+ ret = PTR_ERR(cpufreq_dt_pdev);
+ dev_err(&pdev->dev, "Failed to register cpufreq-dt: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int imx_cpufreq_dt_remove(struct platform_device *pdev)
+{
+ platform_device_unregister(cpufreq_dt_pdev);
+ if (!of_machine_is_compatible("fsl,imx7ulp"))
+ dev_pm_opp_put_supported_hw(cpufreq_opp_token);
+ else
+ clk_bulk_put(ARRAY_SIZE(imx7ulp_clks), imx7ulp_clks);
+
+ return 0;
+}
+
+static struct platform_driver imx_cpufreq_dt_driver = {
+ .probe = imx_cpufreq_dt_probe,
+ .remove = imx_cpufreq_dt_remove,
+ .driver = {
+ .name = "imx-cpufreq-dt",
+ },
+};
+module_platform_driver(imx_cpufreq_dt_driver);
+
+MODULE_ALIAS("platform:imx-cpufreq-dt");
+MODULE_DESCRIPTION("Freescale i.MX cpufreq speed grading driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
new file mode 100644
index 000000000..39b0362a3
--- /dev/null
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#define PU_SOC_VOLTAGE_NORMAL 1250000
+#define PU_SOC_VOLTAGE_HIGH 1275000
+#define FREQ_1P2_GHZ 1200000000
+
+static struct regulator *arm_reg;
+static struct regulator *pu_reg;
+static struct regulator *soc_reg;
+
+enum IMX6_CPUFREQ_CLKS {
+ ARM,
+ PLL1_SYS,
+ STEP,
+ PLL1_SW,
+ PLL2_PFD2_396M,
+ /* MX6UL requires two more clks */
+ PLL2_BUS,
+ SECONDARY_SEL,
+};
+#define IMX6Q_CPUFREQ_CLK_NUM 5
+#define IMX6UL_CPUFREQ_CLK_NUM 7
+
+static int num_clks;
+static struct clk_bulk_data clks[] = {
+ { .id = "arm" },
+ { .id = "pll1_sys" },
+ { .id = "step" },
+ { .id = "pll1_sw" },
+ { .id = "pll2_pfd2_396m" },
+ { .id = "pll2_bus" },
+ { .id = "secondary_sel" },
+};
+
+static struct device *cpu_dev;
+static struct cpufreq_frequency_table *freq_table;
+static unsigned int max_freq;
+static unsigned int transition_latency;
+
+static u32 *imx6_soc_volt;
+static u32 soc_opp_count;
+
+static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct dev_pm_opp *opp;
+ unsigned long freq_hz, volt, volt_old;
+ unsigned int old_freq, new_freq;
+ bool pll1_sys_temp_enabled = false;
+ int ret;
+
+ new_freq = freq_table[index].frequency;
+ freq_hz = new_freq * 1000;
+ old_freq = clk_get_rate(clks[ARM].clk) / 1000;
+
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
+ if (IS_ERR(opp)) {
+ dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
+ return PTR_ERR(opp);
+ }
+
+ volt = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ volt_old = regulator_get_voltage(arm_reg);
+
+ dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
+ old_freq / 1000, volt_old / 1000,
+ new_freq / 1000, volt / 1000);
+
+ /* scaling up? scale voltage before frequency */
+ if (new_freq > old_freq) {
+ if (!IS_ERR(pu_reg)) {
+ ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret);
+ return ret;
+ }
+ }
+ ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_err(cpu_dev, "failed to scale vddsoc up: %d\n", ret);
+ return ret;
+ }
+ ret = regulator_set_voltage_tol(arm_reg, volt, 0);
+ if (ret) {
+ dev_err(cpu_dev,
+ "failed to scale vddarm up: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /*
+ * The setpoints are selected per PLL/PDF frequencies, so we need to
+ * reprogram PLL for frequency scaling. The procedure of reprogramming
+ * PLL1 is as below.
+ * For i.MX6UL, it has a secondary clk mux, the cpu frequency change
+ * flow is slightly different from other i.MX6 OSC.
+ * The cpu frequeny change flow for i.MX6(except i.MX6UL) is as below:
+ * - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it
+ * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
+ * - Disable pll2_pfd2_396m_clk
+ */
+ if (of_machine_is_compatible("fsl,imx6ul") ||
+ of_machine_is_compatible("fsl,imx6ull")) {
+ /*
+ * When changing pll1_sw_clk's parent to pll1_sys_clk,
+ * CPU may run at higher than 528MHz, this will lead to
+ * the system unstable if the voltage is lower than the
+ * voltage of 528MHz, so lower the CPU frequency to one
+ * half before changing CPU frequency.
+ */
+ clk_set_rate(clks[ARM].clk, (old_freq >> 1) * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk))
+ clk_set_parent(clks[SECONDARY_SEL].clk,
+ clks[PLL2_BUS].clk);
+ else
+ clk_set_parent(clks[SECONDARY_SEL].clk,
+ clks[PLL2_PFD2_396M].clk);
+ clk_set_parent(clks[STEP].clk, clks[SECONDARY_SEL].clk);
+ clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_BUS].clk)) {
+ clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
+ }
+ } else {
+ clk_set_parent(clks[STEP].clk, clks[PLL2_PFD2_396M].clk);
+ clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk)) {
+ clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
+ } else {
+ /* pll1_sys needs to be enabled for divider rate change to work. */
+ pll1_sys_temp_enabled = true;
+ clk_prepare_enable(clks[PLL1_SYS].clk);
+ }
+ }
+
+ /* Ensure the arm clock divider is what we expect */
+ ret = clk_set_rate(clks[ARM].clk, new_freq * 1000);
+ if (ret) {
+ int ret1;
+
+ dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
+ ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0);
+ if (ret1)
+ dev_warn(cpu_dev,
+ "failed to restore vddarm voltage: %d\n", ret1);
+ return ret;
+ }
+
+ /* PLL1 is only needed until after ARM-PODF is set. */
+ if (pll1_sys_temp_enabled)
+ clk_disable_unprepare(clks[PLL1_SYS].clk);
+
+ /* scaling down? scale voltage after frequency */
+ if (new_freq < old_freq) {
+ ret = regulator_set_voltage_tol(arm_reg, volt, 0);
+ if (ret)
+ dev_warn(cpu_dev,
+ "failed to scale vddarm down: %d\n", ret);
+ ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
+ if (ret)
+ dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret);
+ if (!IS_ERR(pu_reg)) {
+ ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
+ if (ret)
+ dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret);
+ }
+ }
+
+ return 0;
+}
+
+static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
+{
+ policy->clk = clks[ARM].clk;
+ cpufreq_generic_init(policy, freq_table, transition_latency);
+ policy->suspend_freq = max_freq;
+
+ return 0;
+}
+
+static struct cpufreq_driver imx6q_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_IS_COOLING_DEV,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = imx6q_set_target,
+ .get = cpufreq_generic_get,
+ .init = imx6q_cpufreq_init,
+ .register_em = cpufreq_register_em_with_opp,
+ .name = "imx6q-cpufreq",
+ .attr = cpufreq_generic_attr,
+ .suspend = cpufreq_generic_suspend,
+};
+
+static void imx6x_disable_freq_in_opp(struct device *dev, unsigned long freq)
+{
+ int ret = dev_pm_opp_disable(dev, freq);
+
+ if (ret < 0 && ret != -ENODEV)
+ dev_warn(dev, "failed to disable %ldMHz OPP\n", freq / 1000000);
+}
+
+#define OCOTP_CFG3 0x440
+#define OCOTP_CFG3_SPEED_SHIFT 16
+#define OCOTP_CFG3_SPEED_1P2GHZ 0x3
+#define OCOTP_CFG3_SPEED_996MHZ 0x2
+#define OCOTP_CFG3_SPEED_852MHZ 0x1
+
+static int imx6q_opp_check_speed_grading(struct device *dev)
+{
+ struct device_node *np;
+ void __iomem *base;
+ u32 val;
+ int ret;
+
+ if (of_property_present(dev->of_node, "nvmem-cells")) {
+ ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
+ if (ret)
+ return ret;
+ } else {
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
+ if (!np)
+ return -ENOENT;
+
+ base = of_iomap(np, 0);
+ of_node_put(np);
+ if (!base) {
+ dev_err(dev, "failed to map ocotp\n");
+ return -EFAULT;
+ }
+
+ /*
+ * SPEED_GRADING[1:0] defines the max speed of ARM:
+ * 2b'11: 1200000000Hz;
+ * 2b'10: 996000000Hz;
+ * 2b'01: 852000000Hz; -- i.MX6Q Only, exclusive with 996MHz.
+ * 2b'00: 792000000Hz;
+ * We need to set the max speed of ARM according to fuse map.
+ */
+ val = readl_relaxed(base + OCOTP_CFG3);
+ iounmap(base);
+ }
+
+ val >>= OCOTP_CFG3_SPEED_SHIFT;
+ val &= 0x3;
+
+ if (val < OCOTP_CFG3_SPEED_996MHZ)
+ imx6x_disable_freq_in_opp(dev, 996000000);
+
+ if (of_machine_is_compatible("fsl,imx6q") ||
+ of_machine_is_compatible("fsl,imx6qp")) {
+ if (val != OCOTP_CFG3_SPEED_852MHZ)
+ imx6x_disable_freq_in_opp(dev, 852000000);
+
+ if (val != OCOTP_CFG3_SPEED_1P2GHZ)
+ imx6x_disable_freq_in_opp(dev, 1200000000);
+ }
+
+ return 0;
+}
+
+#define OCOTP_CFG3_6UL_SPEED_696MHZ 0x2
+#define OCOTP_CFG3_6ULL_SPEED_792MHZ 0x2
+#define OCOTP_CFG3_6ULL_SPEED_900MHZ 0x3
+
+static int imx6ul_opp_check_speed_grading(struct device *dev)
+{
+ u32 val;
+ int ret = 0;
+
+ if (of_property_present(dev->of_node, "nvmem-cells")) {
+ ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
+ if (ret)
+ return ret;
+ } else {
+ struct device_node *np;
+ void __iomem *base;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
+ if (!np)
+ np = of_find_compatible_node(NULL, NULL,
+ "fsl,imx6ull-ocotp");
+ if (!np)
+ return -ENOENT;
+
+ base = of_iomap(np, 0);
+ of_node_put(np);
+ if (!base) {
+ dev_err(dev, "failed to map ocotp\n");
+ return -EFAULT;
+ }
+
+ val = readl_relaxed(base + OCOTP_CFG3);
+ iounmap(base);
+ }
+
+ /*
+ * Speed GRADING[1:0] defines the max speed of ARM:
+ * 2b'00: Reserved;
+ * 2b'01: 528000000Hz;
+ * 2b'10: 696000000Hz on i.MX6UL, 792000000Hz on i.MX6ULL;
+ * 2b'11: 900000000Hz on i.MX6ULL only;
+ * We need to set the max speed of ARM according to fuse map.
+ */
+ val >>= OCOTP_CFG3_SPEED_SHIFT;
+ val &= 0x3;
+
+ if (of_machine_is_compatible("fsl,imx6ul"))
+ if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
+ imx6x_disable_freq_in_opp(dev, 696000000);
+
+ if (of_machine_is_compatible("fsl,imx6ull")) {
+ if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ)
+ imx6x_disable_freq_in_opp(dev, 792000000);
+
+ if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
+ imx6x_disable_freq_in_opp(dev, 900000000);
+ }
+
+ return ret;
+}
+
+static int imx6q_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct dev_pm_opp *opp;
+ unsigned long min_volt, max_volt;
+ int num, ret;
+ const struct property *prop;
+ const __be32 *val;
+ u32 nr, i, j;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu0 device\n");
+ return -ENODEV;
+ }
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np) {
+ dev_err(cpu_dev, "failed to find cpu0 node\n");
+ return -ENOENT;
+ }
+
+ if (of_machine_is_compatible("fsl,imx6ul") ||
+ of_machine_is_compatible("fsl,imx6ull"))
+ num_clks = IMX6UL_CPUFREQ_CLK_NUM;
+ else
+ num_clks = IMX6Q_CPUFREQ_CLK_NUM;
+
+ ret = clk_bulk_get(cpu_dev, num_clks, clks);
+ if (ret)
+ goto put_node;
+
+ arm_reg = regulator_get(cpu_dev, "arm");
+ pu_reg = regulator_get_optional(cpu_dev, "pu");
+ soc_reg = regulator_get(cpu_dev, "soc");
+ if (PTR_ERR(arm_reg) == -EPROBE_DEFER ||
+ PTR_ERR(soc_reg) == -EPROBE_DEFER ||
+ PTR_ERR(pu_reg) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ dev_dbg(cpu_dev, "regulators not ready, defer\n");
+ goto put_reg;
+ }
+ if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) {
+ dev_err(cpu_dev, "failed to get regulators\n");
+ ret = -ENOENT;
+ goto put_reg;
+ }
+
+ ret = dev_pm_opp_of_add_table(cpu_dev);
+ if (ret < 0) {
+ dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
+ goto put_reg;
+ }
+
+ if (of_machine_is_compatible("fsl,imx6ul") ||
+ of_machine_is_compatible("fsl,imx6ull")) {
+ ret = imx6ul_opp_check_speed_grading(cpu_dev);
+ } else {
+ ret = imx6q_opp_check_speed_grading(cpu_dev);
+ }
+ if (ret) {
+ dev_err_probe(cpu_dev, ret, "failed to read ocotp\n");
+ goto out_free_opp;
+ }
+
+ num = dev_pm_opp_get_opp_count(cpu_dev);
+ if (num < 0) {
+ ret = num;
+ dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
+ goto out_free_opp;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto out_free_opp;
+ }
+
+ /* Make imx6_soc_volt array's size same as arm opp number */
+ imx6_soc_volt = devm_kcalloc(cpu_dev, num, sizeof(*imx6_soc_volt),
+ GFP_KERNEL);
+ if (imx6_soc_volt == NULL) {
+ ret = -ENOMEM;
+ goto free_freq_table;
+ }
+
+ prop = of_find_property(np, "fsl,soc-operating-points", NULL);
+ if (!prop || !prop->value)
+ goto soc_opp_out;
+
+ /*
+ * Each OPP is a set of tuples consisting of frequency and
+ * voltage like <freq-kHz vol-uV>.
+ */
+ nr = prop->length / sizeof(u32);
+ if (nr % 2 || (nr / 2) < num)
+ goto soc_opp_out;
+
+ for (j = 0; j < num; j++) {
+ val = prop->value;
+ for (i = 0; i < nr / 2; i++) {
+ unsigned long freq = be32_to_cpup(val++);
+ unsigned long volt = be32_to_cpup(val++);
+ if (freq_table[j].frequency == freq) {
+ imx6_soc_volt[soc_opp_count++] = volt;
+ break;
+ }
+ }
+ }
+
+soc_opp_out:
+ /* use fixed soc opp volt if no valid soc opp info found in dtb */
+ if (soc_opp_count != num) {
+ dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n");
+ for (j = 0; j < num; j++)
+ imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL;
+ if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ)
+ imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH;
+ }
+
+ if (of_property_read_u32(np, "clock-latency", &transition_latency))
+ transition_latency = CPUFREQ_ETERNAL;
+
+ /*
+ * Calculate the ramp time for max voltage change in the
+ * VDDSOC and VDDPU regulators.
+ */
+ ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+ if (!IS_ERR(pu_reg)) {
+ ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+ }
+
+ /*
+ * OPP is maintained in order of increasing frequency, and
+ * freq_table initialised from OPP is therefore sorted in the
+ * same order.
+ */
+ max_freq = freq_table[--num].frequency;
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
+ freq_table[0].frequency * 1000, true);
+ min_volt = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+ opp = dev_pm_opp_find_freq_exact(cpu_dev, max_freq * 1000, true);
+ max_volt = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+
+ ret = cpufreq_register_driver(&imx6q_cpufreq_driver);
+ if (ret) {
+ dev_err(cpu_dev, "failed register driver: %d\n", ret);
+ goto free_freq_table;
+ }
+
+ of_node_put(np);
+ return 0;
+
+free_freq_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_free_opp:
+ dev_pm_opp_of_remove_table(cpu_dev);
+put_reg:
+ if (!IS_ERR(arm_reg))
+ regulator_put(arm_reg);
+ if (!IS_ERR(pu_reg))
+ regulator_put(pu_reg);
+ if (!IS_ERR(soc_reg))
+ regulator_put(soc_reg);
+
+ clk_bulk_put(num_clks, clks);
+put_node:
+ of_node_put(np);
+
+ return ret;
+}
+
+static int imx6q_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&imx6q_cpufreq_driver);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+ dev_pm_opp_of_remove_table(cpu_dev);
+ regulator_put(arm_reg);
+ if (!IS_ERR(pu_reg))
+ regulator_put(pu_reg);
+ regulator_put(soc_reg);
+
+ clk_bulk_put(num_clks, clks);
+
+ return 0;
+}
+
+static struct platform_driver imx6q_cpufreq_platdrv = {
+ .driver = {
+ .name = "imx6q-cpufreq",
+ },
+ .probe = imx6q_cpufreq_probe,
+ .remove = imx6q_cpufreq_remove,
+};
+module_platform_driver(imx6q_cpufreq_platdrv);
+
+MODULE_ALIAS("platform:imx6q-cpufreq");
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("Freescale i.MX6Q cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
new file mode 100644
index 000000000..abdd26f7d
--- /dev/null
+++ b/drivers/cpufreq/intel_pstate.c
@@ -0,0 +1,3537 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * intel_pstate.c: Native P state management for Intel processors
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/slab.h>
+#include <linux/sched/cpufreq.h>
+#include <linux/list.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/acpi.h>
+#include <linux/vmalloc.h>
+#include <linux/pm_qos.h>
+#include <trace/events/power.h>
+
+#include <asm/cpu.h>
+#include <asm/div64.h>
+#include <asm/msr.h>
+#include <asm/cpu_device_id.h>
+#include <asm/cpufeature.h>
+#include <asm/intel-family.h>
+#include "../drivers/thermal/intel/thermal_interrupt.h"
+
+#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
+
+#define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
+#define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000
+#define INTEL_CPUFREQ_TRANSITION_DELAY 500
+
+#ifdef CONFIG_ACPI
+#include <acpi/processor.h>
+#include <acpi/cppc_acpi.h>
+#endif
+
+#define FRAC_BITS 8
+#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
+#define fp_toint(X) ((X) >> FRAC_BITS)
+
+#define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
+
+#define EXT_BITS 6
+#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
+#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
+#define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
+
+static inline int32_t mul_fp(int32_t x, int32_t y)
+{
+ return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
+}
+
+static inline int32_t div_fp(s64 x, s64 y)
+{
+ return div64_s64((int64_t)x << FRAC_BITS, y);
+}
+
+static inline int ceiling_fp(int32_t x)
+{
+ int mask, ret;
+
+ ret = fp_toint(x);
+ mask = (1 << FRAC_BITS) - 1;
+ if (x & mask)
+ ret += 1;
+ return ret;
+}
+
+static inline u64 mul_ext_fp(u64 x, u64 y)
+{
+ return (x * y) >> EXT_FRAC_BITS;
+}
+
+static inline u64 div_ext_fp(u64 x, u64 y)
+{
+ return div64_u64(x << EXT_FRAC_BITS, y);
+}
+
+/**
+ * struct sample - Store performance sample
+ * @core_avg_perf: Ratio of APERF/MPERF which is the actual average
+ * performance during last sample period
+ * @busy_scaled: Scaled busy value which is used to calculate next
+ * P state. This can be different than core_avg_perf
+ * to account for cpu idle period
+ * @aperf: Difference of actual performance frequency clock count
+ * read from APERF MSR between last and current sample
+ * @mperf: Difference of maximum performance frequency clock count
+ * read from MPERF MSR between last and current sample
+ * @tsc: Difference of time stamp counter between last and
+ * current sample
+ * @time: Current time from scheduler
+ *
+ * This structure is used in the cpudata structure to store performance sample
+ * data for choosing next P State.
+ */
+struct sample {
+ int32_t core_avg_perf;
+ int32_t busy_scaled;
+ u64 aperf;
+ u64 mperf;
+ u64 tsc;
+ u64 time;
+};
+
+/**
+ * struct pstate_data - Store P state data
+ * @current_pstate: Current requested P state
+ * @min_pstate: Min P state possible for this platform
+ * @max_pstate: Max P state possible for this platform
+ * @max_pstate_physical:This is physical Max P state for a processor
+ * This can be higher than the max_pstate which can
+ * be limited by platform thermal design power limits
+ * @perf_ctl_scaling: PERF_CTL P-state to frequency scaling factor
+ * @scaling: Scaling factor between performance and frequency
+ * @turbo_pstate: Max Turbo P state possible for this platform
+ * @min_freq: @min_pstate frequency in cpufreq units
+ * @max_freq: @max_pstate frequency in cpufreq units
+ * @turbo_freq: @turbo_pstate frequency in cpufreq units
+ *
+ * Stores the per cpu model P state limits and current P state.
+ */
+struct pstate_data {
+ int current_pstate;
+ int min_pstate;
+ int max_pstate;
+ int max_pstate_physical;
+ int perf_ctl_scaling;
+ int scaling;
+ int turbo_pstate;
+ unsigned int min_freq;
+ unsigned int max_freq;
+ unsigned int turbo_freq;
+};
+
+/**
+ * struct vid_data - Stores voltage information data
+ * @min: VID data for this platform corresponding to
+ * the lowest P state
+ * @max: VID data corresponding to the highest P State.
+ * @turbo: VID data for turbo P state
+ * @ratio: Ratio of (vid max - vid min) /
+ * (max P state - Min P State)
+ *
+ * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
+ * This data is used in Atom platforms, where in addition to target P state,
+ * the voltage data needs to be specified to select next P State.
+ */
+struct vid_data {
+ int min;
+ int max;
+ int turbo;
+ int32_t ratio;
+};
+
+/**
+ * struct global_params - Global parameters, mostly tunable via sysfs.
+ * @no_turbo: Whether or not to use turbo P-states.
+ * @turbo_disabled: Whether or not turbo P-states are available at all,
+ * based on the MSR_IA32_MISC_ENABLE value and whether or
+ * not the maximum reported turbo P-state is different from
+ * the maximum reported non-turbo one.
+ * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
+ * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
+ * P-state capacity.
+ * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
+ * P-state capacity.
+ */
+struct global_params {
+ bool no_turbo;
+ bool turbo_disabled;
+ bool turbo_disabled_mf;
+ int max_perf_pct;
+ int min_perf_pct;
+};
+
+/**
+ * struct cpudata - Per CPU instance data storage
+ * @cpu: CPU number for this instance data
+ * @policy: CPUFreq policy value
+ * @update_util: CPUFreq utility callback information
+ * @update_util_set: CPUFreq utility callback is set
+ * @iowait_boost: iowait-related boost fraction
+ * @last_update: Time of the last update.
+ * @pstate: Stores P state limits for this CPU
+ * @vid: Stores VID limits for this CPU
+ * @last_sample_time: Last Sample time
+ * @aperf_mperf_shift: APERF vs MPERF counting frequency difference
+ * @prev_aperf: Last APERF value read from APERF MSR
+ * @prev_mperf: Last MPERF value read from MPERF MSR
+ * @prev_tsc: Last timestamp counter (TSC) value
+ * @prev_cummulative_iowait: IO Wait time difference from last and
+ * current sample
+ * @sample: Storage for storing last Sample data
+ * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios
+ * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios
+ * @acpi_perf_data: Stores ACPI perf information read from _PSS
+ * @valid_pss_table: Set to true for valid ACPI _PSS entries found
+ * @epp_powersave: Last saved HWP energy performance preference
+ * (EPP) or energy performance bias (EPB),
+ * when policy switched to performance
+ * @epp_policy: Last saved policy used to set EPP/EPB
+ * @epp_default: Power on default HWP energy performance
+ * preference/bias
+ * @epp_cached Cached HWP energy-performance preference value
+ * @hwp_req_cached: Cached value of the last HWP Request MSR
+ * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
+ * @last_io_update: Last time when IO wake flag was set
+ * @sched_flags: Store scheduler flags for possible cross CPU update
+ * @hwp_boost_min: Last HWP boosted min performance
+ * @suspended: Whether or not the driver has been suspended.
+ * @hwp_notify_work: workqueue for HWP notifications.
+ *
+ * This structure stores per CPU instance data for all CPUs.
+ */
+struct cpudata {
+ int cpu;
+
+ unsigned int policy;
+ struct update_util_data update_util;
+ bool update_util_set;
+
+ struct pstate_data pstate;
+ struct vid_data vid;
+
+ u64 last_update;
+ u64 last_sample_time;
+ u64 aperf_mperf_shift;
+ u64 prev_aperf;
+ u64 prev_mperf;
+ u64 prev_tsc;
+ u64 prev_cummulative_iowait;
+ struct sample sample;
+ int32_t min_perf_ratio;
+ int32_t max_perf_ratio;
+#ifdef CONFIG_ACPI
+ struct acpi_processor_performance acpi_perf_data;
+ bool valid_pss_table;
+#endif
+ unsigned int iowait_boost;
+ s16 epp_powersave;
+ s16 epp_policy;
+ s16 epp_default;
+ s16 epp_cached;
+ u64 hwp_req_cached;
+ u64 hwp_cap_cached;
+ u64 last_io_update;
+ unsigned int sched_flags;
+ u32 hwp_boost_min;
+ bool suspended;
+ struct delayed_work hwp_notify_work;
+};
+
+static struct cpudata **all_cpu_data;
+
+/**
+ * struct pstate_funcs - Per CPU model specific callbacks
+ * @get_max: Callback to get maximum non turbo effective P state
+ * @get_max_physical: Callback to get maximum non turbo physical P state
+ * @get_min: Callback to get minimum P state
+ * @get_turbo: Callback to get turbo P state
+ * @get_scaling: Callback to get frequency scaling factor
+ * @get_cpu_scaling: Get frequency scaling factor for a given cpu
+ * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference
+ * @get_val: Callback to convert P state to actual MSR write value
+ * @get_vid: Callback to get VID data for Atom platforms
+ *
+ * Core and Atom CPU models have different way to get P State limits. This
+ * structure is used to store those callbacks.
+ */
+struct pstate_funcs {
+ int (*get_max)(int cpu);
+ int (*get_max_physical)(int cpu);
+ int (*get_min)(int cpu);
+ int (*get_turbo)(int cpu);
+ int (*get_scaling)(void);
+ int (*get_cpu_scaling)(int cpu);
+ int (*get_aperf_mperf_shift)(void);
+ u64 (*get_val)(struct cpudata*, int pstate);
+ void (*get_vid)(struct cpudata *);
+};
+
+static struct pstate_funcs pstate_funcs __read_mostly;
+
+static int hwp_active __read_mostly;
+static int hwp_mode_bdw __read_mostly;
+static bool per_cpu_limits __read_mostly;
+static bool hwp_boost __read_mostly;
+
+static struct cpufreq_driver *intel_pstate_driver __read_mostly;
+
+#ifdef CONFIG_ACPI
+static bool acpi_ppc;
+#endif
+
+static struct global_params global;
+
+static DEFINE_MUTEX(intel_pstate_driver_lock);
+static DEFINE_MUTEX(intel_pstate_limits_lock);
+
+#ifdef CONFIG_ACPI
+
+static bool intel_pstate_acpi_pm_profile_server(void)
+{
+ if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
+ acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
+ return true;
+
+ return false;
+}
+
+static bool intel_pstate_get_ppc_enable_status(void)
+{
+ if (intel_pstate_acpi_pm_profile_server())
+ return true;
+
+ return acpi_ppc;
+}
+
+#ifdef CONFIG_ACPI_CPPC_LIB
+
+/* The work item is needed to avoid CPU hotplug locking issues */
+static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
+{
+ sched_set_itmt_support();
+}
+
+static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
+
+#define CPPC_MAX_PERF U8_MAX
+
+static void intel_pstate_set_itmt_prio(int cpu)
+{
+ struct cppc_perf_caps cppc_perf;
+ static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
+ int ret;
+
+ ret = cppc_get_perf_caps(cpu, &cppc_perf);
+ if (ret)
+ return;
+
+ /*
+ * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
+ * In this case we can't use CPPC.highest_perf to enable ITMT.
+ * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
+ */
+ if (cppc_perf.highest_perf == CPPC_MAX_PERF)
+ cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
+
+ /*
+ * The priorities can be set regardless of whether or not
+ * sched_set_itmt_support(true) has been called and it is valid to
+ * update them at any time after it has been called.
+ */
+ sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
+
+ if (max_highest_perf <= min_highest_perf) {
+ if (cppc_perf.highest_perf > max_highest_perf)
+ max_highest_perf = cppc_perf.highest_perf;
+
+ if (cppc_perf.highest_perf < min_highest_perf)
+ min_highest_perf = cppc_perf.highest_perf;
+
+ if (max_highest_perf > min_highest_perf) {
+ /*
+ * This code can be run during CPU online under the
+ * CPU hotplug locks, so sched_set_itmt_support()
+ * cannot be called from here. Queue up a work item
+ * to invoke it.
+ */
+ schedule_work(&sched_itmt_work);
+ }
+ }
+}
+
+static int intel_pstate_get_cppc_guaranteed(int cpu)
+{
+ struct cppc_perf_caps cppc_perf;
+ int ret;
+
+ ret = cppc_get_perf_caps(cpu, &cppc_perf);
+ if (ret)
+ return ret;
+
+ if (cppc_perf.guaranteed_perf)
+ return cppc_perf.guaranteed_perf;
+
+ return cppc_perf.nominal_perf;
+}
+#else /* CONFIG_ACPI_CPPC_LIB */
+static inline void intel_pstate_set_itmt_prio(int cpu)
+{
+}
+#endif /* CONFIG_ACPI_CPPC_LIB */
+
+static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu;
+ int ret;
+ int i;
+
+ if (hwp_active) {
+ intel_pstate_set_itmt_prio(policy->cpu);
+ return;
+ }
+
+ if (!intel_pstate_get_ppc_enable_status())
+ return;
+
+ cpu = all_cpu_data[policy->cpu];
+
+ ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
+ policy->cpu);
+ if (ret)
+ return;
+
+ /*
+ * Check if the control value in _PSS is for PERF_CTL MSR, which should
+ * guarantee that the states returned by it map to the states in our
+ * list directly.
+ */
+ if (cpu->acpi_perf_data.control_register.space_id !=
+ ACPI_ADR_SPACE_FIXED_HARDWARE)
+ goto err;
+
+ /*
+ * If there is only one entry _PSS, simply ignore _PSS and continue as
+ * usual without taking _PSS into account
+ */
+ if (cpu->acpi_perf_data.state_count < 2)
+ goto err;
+
+ pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
+ for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
+ pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
+ (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
+ (u32) cpu->acpi_perf_data.states[i].core_frequency,
+ (u32) cpu->acpi_perf_data.states[i].power,
+ (u32) cpu->acpi_perf_data.states[i].control);
+ }
+
+ cpu->valid_pss_table = true;
+ pr_debug("_PPC limits will be enforced\n");
+
+ return;
+
+ err:
+ cpu->valid_pss_table = false;
+ acpi_processor_unregister_performance(policy->cpu);
+}
+
+static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu;
+
+ cpu = all_cpu_data[policy->cpu];
+ if (!cpu->valid_pss_table)
+ return;
+
+ acpi_processor_unregister_performance(policy->cpu);
+}
+#else /* CONFIG_ACPI */
+static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
+{
+}
+
+static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+{
+}
+
+static inline bool intel_pstate_acpi_pm_profile_server(void)
+{
+ return false;
+}
+#endif /* CONFIG_ACPI */
+
+#ifndef CONFIG_ACPI_CPPC_LIB
+static inline int intel_pstate_get_cppc_guaranteed(int cpu)
+{
+ return -ENOTSUPP;
+}
+#endif /* CONFIG_ACPI_CPPC_LIB */
+
+static int intel_pstate_freq_to_hwp_rel(struct cpudata *cpu, int freq,
+ unsigned int relation)
+{
+ if (freq == cpu->pstate.turbo_freq)
+ return cpu->pstate.turbo_pstate;
+
+ if (freq == cpu->pstate.max_freq)
+ return cpu->pstate.max_pstate;
+
+ switch (relation) {
+ case CPUFREQ_RELATION_H:
+ return freq / cpu->pstate.scaling;
+ case CPUFREQ_RELATION_C:
+ return DIV_ROUND_CLOSEST(freq, cpu->pstate.scaling);
+ }
+
+ return DIV_ROUND_UP(freq, cpu->pstate.scaling);
+}
+
+static int intel_pstate_freq_to_hwp(struct cpudata *cpu, int freq)
+{
+ return intel_pstate_freq_to_hwp_rel(cpu, freq, CPUFREQ_RELATION_L);
+}
+
+/**
+ * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
+ * @cpu: Target CPU.
+ *
+ * On hybrid processors, HWP may expose more performance levels than there are
+ * P-states accessible through the PERF_CTL interface. If that happens, the
+ * scaling factor between HWP performance levels and CPU frequency will be less
+ * than the scaling factor between P-state values and CPU frequency.
+ *
+ * In that case, adjust the CPU parameters used in computations accordingly.
+ */
+static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
+{
+ int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
+ int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
+ int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
+ int scaling = cpu->pstate.scaling;
+ int freq;
+
+ pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
+ pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
+ pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
+ pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
+ pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
+ pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
+
+ cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling,
+ perf_ctl_scaling);
+ cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
+ perf_ctl_scaling);
+
+ freq = perf_ctl_max_phys * perf_ctl_scaling;
+ cpu->pstate.max_pstate_physical = intel_pstate_freq_to_hwp(cpu, freq);
+
+ freq = cpu->pstate.min_pstate * perf_ctl_scaling;
+ cpu->pstate.min_freq = freq;
+ /*
+ * Cast the min P-state value retrieved via pstate_funcs.get_min() to
+ * the effective range of HWP performance levels.
+ */
+ cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
+}
+
+static inline void update_turbo_state(void)
+{
+ u64 misc_en;
+ struct cpudata *cpu;
+
+ cpu = all_cpu_data[0];
+ rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
+ global.turbo_disabled =
+ (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
+ cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
+}
+
+static int min_perf_pct_min(void)
+{
+ struct cpudata *cpu = all_cpu_data[0];
+ int turbo_pstate = cpu->pstate.turbo_pstate;
+
+ return turbo_pstate ?
+ (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
+}
+
+static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
+{
+ u64 epb;
+ int ret;
+
+ if (!boot_cpu_has(X86_FEATURE_EPB))
+ return -ENXIO;
+
+ ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ if (ret)
+ return (s16)ret;
+
+ return (s16)(epb & 0x0f);
+}
+
+static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
+{
+ s16 epp;
+
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
+ /*
+ * When hwp_req_data is 0, means that caller didn't read
+ * MSR_HWP_REQUEST, so need to read and get EPP.
+ */
+ if (!hwp_req_data) {
+ epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
+ &hwp_req_data);
+ if (epp)
+ return epp;
+ }
+ epp = (hwp_req_data >> 24) & 0xff;
+ } else {
+ /* When there is no EPP present, HWP uses EPB settings */
+ epp = intel_pstate_get_epb(cpu_data);
+ }
+
+ return epp;
+}
+
+static int intel_pstate_set_epb(int cpu, s16 pref)
+{
+ u64 epb;
+ int ret;
+
+ if (!boot_cpu_has(X86_FEATURE_EPB))
+ return -ENXIO;
+
+ ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ if (ret)
+ return ret;
+
+ epb = (epb & ~0x0f) | pref;
+ wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
+
+ return 0;
+}
+
+/*
+ * EPP/EPB display strings corresponding to EPP index in the
+ * energy_perf_strings[]
+ * index String
+ *-------------------------------------
+ * 0 default
+ * 1 performance
+ * 2 balance_performance
+ * 3 balance_power
+ * 4 power
+ */
+
+enum energy_perf_value_index {
+ EPP_INDEX_DEFAULT = 0,
+ EPP_INDEX_PERFORMANCE,
+ EPP_INDEX_BALANCE_PERFORMANCE,
+ EPP_INDEX_BALANCE_POWERSAVE,
+ EPP_INDEX_POWERSAVE,
+};
+
+static const char * const energy_perf_strings[] = {
+ [EPP_INDEX_DEFAULT] = "default",
+ [EPP_INDEX_PERFORMANCE] = "performance",
+ [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
+ [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
+ [EPP_INDEX_POWERSAVE] = "power",
+ NULL
+};
+static unsigned int epp_values[] = {
+ [EPP_INDEX_DEFAULT] = 0, /* Unused index */
+ [EPP_INDEX_PERFORMANCE] = HWP_EPP_PERFORMANCE,
+ [EPP_INDEX_BALANCE_PERFORMANCE] = HWP_EPP_BALANCE_PERFORMANCE,
+ [EPP_INDEX_BALANCE_POWERSAVE] = HWP_EPP_BALANCE_POWERSAVE,
+ [EPP_INDEX_POWERSAVE] = HWP_EPP_POWERSAVE,
+};
+
+static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp)
+{
+ s16 epp;
+ int index = -EINVAL;
+
+ *raw_epp = 0;
+ epp = intel_pstate_get_epp(cpu_data, 0);
+ if (epp < 0)
+ return epp;
+
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
+ if (epp == epp_values[EPP_INDEX_PERFORMANCE])
+ return EPP_INDEX_PERFORMANCE;
+ if (epp == epp_values[EPP_INDEX_BALANCE_PERFORMANCE])
+ return EPP_INDEX_BALANCE_PERFORMANCE;
+ if (epp == epp_values[EPP_INDEX_BALANCE_POWERSAVE])
+ return EPP_INDEX_BALANCE_POWERSAVE;
+ if (epp == epp_values[EPP_INDEX_POWERSAVE])
+ return EPP_INDEX_POWERSAVE;
+ *raw_epp = epp;
+ return 0;
+ } else if (boot_cpu_has(X86_FEATURE_EPB)) {
+ /*
+ * Range:
+ * 0x00-0x03 : Performance
+ * 0x04-0x07 : Balance performance
+ * 0x08-0x0B : Balance power
+ * 0x0C-0x0F : Power
+ * The EPB is a 4 bit value, but our ranges restrict the
+ * value which can be set. Here only using top two bits
+ * effectively.
+ */
+ index = (epp >> 2) + 1;
+ }
+
+ return index;
+}
+
+static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
+{
+ int ret;
+
+ /*
+ * Use the cached HWP Request MSR value, because in the active mode the
+ * register itself may be updated by intel_pstate_hwp_boost_up() or
+ * intel_pstate_hwp_boost_down() at any time.
+ */
+ u64 value = READ_ONCE(cpu->hwp_req_cached);
+
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ /*
+ * The only other updater of hwp_req_cached in the active mode,
+ * intel_pstate_hwp_set(), is called under the same lock as this
+ * function, so it cannot run in parallel with the update below.
+ */
+ WRITE_ONCE(cpu->hwp_req_cached, value);
+ ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ if (!ret)
+ cpu->epp_cached = epp;
+
+ return ret;
+}
+
+static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
+ int pref_index, bool use_raw,
+ u32 raw_epp)
+{
+ int epp = -EINVAL;
+ int ret;
+
+ if (!pref_index)
+ epp = cpu_data->epp_default;
+
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
+ if (use_raw)
+ epp = raw_epp;
+ else if (epp == -EINVAL)
+ epp = epp_values[pref_index];
+
+ /*
+ * To avoid confusion, refuse to set EPP to any values different
+ * from 0 (performance) if the current policy is "performance",
+ * because those values would be overridden.
+ */
+ if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
+ return -EBUSY;
+
+ ret = intel_pstate_set_epp(cpu_data, epp);
+ } else {
+ if (epp == -EINVAL)
+ epp = (pref_index - 1) << 2;
+ ret = intel_pstate_set_epb(cpu_data->cpu, epp);
+ }
+
+ return ret;
+}
+
+static ssize_t show_energy_performance_available_preferences(
+ struct cpufreq_policy *policy, char *buf)
+{
+ int i = 0;
+ int ret = 0;
+
+ while (energy_perf_strings[i] != NULL)
+ ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);
+
+ ret += sprintf(&buf[ret], "\n");
+
+ return ret;
+}
+
+cpufreq_freq_attr_ro(energy_performance_available_preferences);
+
+static struct cpufreq_driver intel_pstate;
+
+static ssize_t store_energy_performance_preference(
+ struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+ char str_preference[21];
+ bool raw = false;
+ ssize_t ret;
+ u32 epp = 0;
+
+ ret = sscanf(buf, "%20s", str_preference);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = match_string(energy_perf_strings, -1, str_preference);
+ if (ret < 0) {
+ if (!boot_cpu_has(X86_FEATURE_HWP_EPP))
+ return ret;
+
+ ret = kstrtouint(buf, 10, &epp);
+ if (ret)
+ return ret;
+
+ if (epp > 255)
+ return -EINVAL;
+
+ raw = true;
+ }
+
+ /*
+ * This function runs with the policy R/W semaphore held, which
+ * guarantees that the driver pointer will not change while it is
+ * running.
+ */
+ if (!intel_pstate_driver)
+ return -EAGAIN;
+
+ mutex_lock(&intel_pstate_limits_lock);
+
+ if (intel_pstate_driver == &intel_pstate) {
+ ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp);
+ } else {
+ /*
+ * In the passive mode the governor needs to be stopped on the
+ * target CPU before the EPP update and restarted after it,
+ * which is super-heavy-weight, so make sure it is worth doing
+ * upfront.
+ */
+ if (!raw)
+ epp = ret ? epp_values[ret] : cpu->epp_default;
+
+ if (cpu->epp_cached != epp) {
+ int err;
+
+ cpufreq_stop_governor(policy);
+ ret = intel_pstate_set_epp(cpu, epp);
+ err = cpufreq_start_governor(policy);
+ if (!ret)
+ ret = err;
+ } else {
+ ret = 0;
+ }
+ }
+
+ mutex_unlock(&intel_pstate_limits_lock);
+
+ return ret ?: count;
+}
+
+static ssize_t show_energy_performance_preference(
+ struct cpufreq_policy *policy, char *buf)
+{
+ struct cpudata *cpu_data = all_cpu_data[policy->cpu];
+ int preference, raw_epp;
+
+ preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp);
+ if (preference < 0)
+ return preference;
+
+ if (raw_epp)
+ return sprintf(buf, "%d\n", raw_epp);
+ else
+ return sprintf(buf, "%s\n", energy_perf_strings[preference]);
+}
+
+cpufreq_freq_attr_rw(energy_performance_preference);
+
+static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+ int ratio, freq;
+
+ ratio = intel_pstate_get_cppc_guaranteed(policy->cpu);
+ if (ratio <= 0) {
+ u64 cap;
+
+ rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
+ ratio = HWP_GUARANTEED_PERF(cap);
+ }
+
+ freq = ratio * cpu->pstate.scaling;
+ if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling)
+ freq = rounddown(freq, cpu->pstate.perf_ctl_scaling);
+
+ return sprintf(buf, "%d\n", freq);
+}
+
+cpufreq_freq_attr_ro(base_frequency);
+
+static struct freq_attr *hwp_cpufreq_attrs[] = {
+ &energy_performance_preference,
+ &energy_performance_available_preferences,
+ &base_frequency,
+ NULL,
+};
+
+static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
+{
+ u64 cap;
+
+ rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
+ WRITE_ONCE(cpu->hwp_cap_cached, cap);
+ cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap);
+ cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap);
+}
+
+static void intel_pstate_get_hwp_cap(struct cpudata *cpu)
+{
+ int scaling = cpu->pstate.scaling;
+
+ __intel_pstate_get_hwp_cap(cpu);
+
+ cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling;
+ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling;
+ if (scaling != cpu->pstate.perf_ctl_scaling) {
+ int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
+
+ cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq,
+ perf_ctl_scaling);
+ cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq,
+ perf_ctl_scaling);
+ }
+}
+
+static void intel_pstate_hwp_set(unsigned int cpu)
+{
+ struct cpudata *cpu_data = all_cpu_data[cpu];
+ int max, min;
+ u64 value;
+ s16 epp;
+
+ max = cpu_data->max_perf_ratio;
+ min = cpu_data->min_perf_ratio;
+
+ if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
+ min = max;
+
+ rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
+
+ value &= ~HWP_MIN_PERF(~0L);
+ value |= HWP_MIN_PERF(min);
+
+ value &= ~HWP_MAX_PERF(~0L);
+ value |= HWP_MAX_PERF(max);
+
+ if (cpu_data->epp_policy == cpu_data->policy)
+ goto skip_epp;
+
+ cpu_data->epp_policy = cpu_data->policy;
+
+ if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ epp = intel_pstate_get_epp(cpu_data, value);
+ cpu_data->epp_powersave = epp;
+ /* If EPP read was failed, then don't try to write */
+ if (epp < 0)
+ goto skip_epp;
+
+ epp = 0;
+ } else {
+ /* skip setting EPP, when saved value is invalid */
+ if (cpu_data->epp_powersave < 0)
+ goto skip_epp;
+
+ /*
+ * No need to restore EPP when it is not zero. This
+ * means:
+ * - Policy is not changed
+ * - user has manually changed
+ * - Error reading EPB
+ */
+ epp = intel_pstate_get_epp(cpu_data, value);
+ if (epp)
+ goto skip_epp;
+
+ epp = cpu_data->epp_powersave;
+ }
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ } else {
+ intel_pstate_set_epb(cpu, epp);
+ }
+skip_epp:
+ WRITE_ONCE(cpu_data->hwp_req_cached, value);
+ wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
+}
+
+static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata);
+
+static void intel_pstate_hwp_offline(struct cpudata *cpu)
+{
+ u64 value = READ_ONCE(cpu->hwp_req_cached);
+ int min_perf;
+
+ intel_pstate_disable_hwp_interrupt(cpu);
+
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
+ /*
+ * In case the EPP has been set to "performance" by the
+ * active mode "performance" scaling algorithm, replace that
+ * temporary value with the cached EPP one.
+ */
+ value &= ~GENMASK_ULL(31, 24);
+ value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
+ /*
+ * However, make sure that EPP will be set to "performance" when
+ * the CPU is brought back online again and the "performance"
+ * scaling algorithm is still in effect.
+ */
+ cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
+ }
+
+ /*
+ * Clear the desired perf field in the cached HWP request value to
+ * prevent nonzero desired values from being leaked into the active
+ * mode.
+ */
+ value &= ~HWP_DESIRED_PERF(~0L);
+ WRITE_ONCE(cpu->hwp_req_cached, value);
+
+ value &= ~GENMASK_ULL(31, 0);
+ min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
+
+ /* Set hwp_max = hwp_min */
+ value |= HWP_MAX_PERF(min_perf);
+ value |= HWP_MIN_PERF(min_perf);
+
+ /* Set EPP to min */
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP))
+ value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
+
+ wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+}
+
+#define POWER_CTL_EE_ENABLE 1
+#define POWER_CTL_EE_DISABLE 2
+
+static int power_ctl_ee_state;
+
+static void set_power_ctl_ee_state(bool input)
+{
+ u64 power_ctl;
+
+ mutex_lock(&intel_pstate_driver_lock);
+ rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ if (input) {
+ power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
+ power_ctl_ee_state = POWER_CTL_EE_ENABLE;
+ } else {
+ power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
+ power_ctl_ee_state = POWER_CTL_EE_DISABLE;
+ }
+ wrmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ mutex_unlock(&intel_pstate_driver_lock);
+}
+
+static void intel_pstate_hwp_enable(struct cpudata *cpudata);
+
+static void intel_pstate_hwp_reenable(struct cpudata *cpu)
+{
+ intel_pstate_hwp_enable(cpu);
+ wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
+}
+
+static int intel_pstate_suspend(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+
+ pr_debug("CPU %d suspending\n", cpu->cpu);
+
+ cpu->suspended = true;
+
+ /* disable HWP interrupt and cancel any pending work */
+ intel_pstate_disable_hwp_interrupt(cpu);
+
+ return 0;
+}
+
+static int intel_pstate_resume(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+
+ pr_debug("CPU %d resuming\n", cpu->cpu);
+
+ /* Only restore if the system default is changed */
+ if (power_ctl_ee_state == POWER_CTL_EE_ENABLE)
+ set_power_ctl_ee_state(true);
+ else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE)
+ set_power_ctl_ee_state(false);
+
+ if (cpu->suspended && hwp_active) {
+ mutex_lock(&intel_pstate_limits_lock);
+
+ /* Re-enable HWP, because "online" has not done that. */
+ intel_pstate_hwp_reenable(cpu);
+
+ mutex_unlock(&intel_pstate_limits_lock);
+ }
+
+ cpu->suspended = false;
+
+ return 0;
+}
+
+static void intel_pstate_update_policies(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ cpufreq_update_policy(cpu);
+}
+
+static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
+ struct cpufreq_policy *policy)
+{
+ policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
+ cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
+ refresh_frequency_limits(policy);
+}
+
+static void intel_pstate_update_max_freq(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
+
+ if (!policy)
+ return;
+
+ __intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
+
+ cpufreq_cpu_release(policy);
+}
+
+static void intel_pstate_update_limits(unsigned int cpu)
+{
+ mutex_lock(&intel_pstate_driver_lock);
+
+ update_turbo_state();
+ /*
+ * If turbo has been turned on or off globally, policy limits for
+ * all CPUs need to be updated to reflect that.
+ */
+ if (global.turbo_disabled_mf != global.turbo_disabled) {
+ global.turbo_disabled_mf = global.turbo_disabled;
+ arch_set_max_freq_ratio(global.turbo_disabled);
+ for_each_possible_cpu(cpu)
+ intel_pstate_update_max_freq(cpu);
+ } else {
+ cpufreq_update_policy(cpu);
+ }
+
+ mutex_unlock(&intel_pstate_driver_lock);
+}
+
+/************************** sysfs begin ************************/
+#define show_one(file_name, object) \
+ static ssize_t show_##file_name \
+ (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%u\n", global.object); \
+ }
+
+static ssize_t intel_pstate_show_status(char *buf);
+static int intel_pstate_update_status(const char *buf, size_t size);
+
+static ssize_t show_status(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ ssize_t ret;
+
+ mutex_lock(&intel_pstate_driver_lock);
+ ret = intel_pstate_show_status(buf);
+ mutex_unlock(&intel_pstate_driver_lock);
+
+ return ret;
+}
+
+static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
+ const char *buf, size_t count)
+{
+ char *p = memchr(buf, '\n', count);
+ int ret;
+
+ mutex_lock(&intel_pstate_driver_lock);
+ ret = intel_pstate_update_status(buf, p ? p - buf : count);
+ mutex_unlock(&intel_pstate_driver_lock);
+
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t show_turbo_pct(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct cpudata *cpu;
+ int total, no_turbo, turbo_pct;
+ uint32_t turbo_fp;
+
+ mutex_lock(&intel_pstate_driver_lock);
+
+ if (!intel_pstate_driver) {
+ mutex_unlock(&intel_pstate_driver_lock);
+ return -EAGAIN;
+ }
+
+ cpu = all_cpu_data[0];
+
+ total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
+ no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
+ turbo_fp = div_fp(no_turbo, total);
+ turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
+
+ mutex_unlock(&intel_pstate_driver_lock);
+
+ return sprintf(buf, "%u\n", turbo_pct);
+}
+
+static ssize_t show_num_pstates(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct cpudata *cpu;
+ int total;
+
+ mutex_lock(&intel_pstate_driver_lock);
+
+ if (!intel_pstate_driver) {
+ mutex_unlock(&intel_pstate_driver_lock);
+ return -EAGAIN;
+ }
+
+ cpu = all_cpu_data[0];
+ total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
+
+ mutex_unlock(&intel_pstate_driver_lock);
+
+ return sprintf(buf, "%u\n", total);
+}
+
+static ssize_t show_no_turbo(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ ssize_t ret;
+
+ mutex_lock(&intel_pstate_driver_lock);
+
+ if (!intel_pstate_driver) {
+ mutex_unlock(&intel_pstate_driver_lock);
+ return -EAGAIN;
+ }
+
+ update_turbo_state();
+ if (global.turbo_disabled)
+ ret = sprintf(buf, "%u\n", global.turbo_disabled);
+ else
+ ret = sprintf(buf, "%u\n", global.no_turbo);
+
+ mutex_unlock(&intel_pstate_driver_lock);
+
+ return ret;
+}
+
+static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&intel_pstate_driver_lock);
+
+ if (!intel_pstate_driver) {
+ mutex_unlock(&intel_pstate_driver_lock);
+ return -EAGAIN;
+ }
+
+ mutex_lock(&intel_pstate_limits_lock);
+
+ update_turbo_state();
+ if (global.turbo_disabled) {
+ pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
+ mutex_unlock(&intel_pstate_limits_lock);
+ mutex_unlock(&intel_pstate_driver_lock);
+ return -EPERM;
+ }
+
+ global.no_turbo = clamp_t(int, input, 0, 1);
+
+ if (global.no_turbo) {
+ struct cpudata *cpu = all_cpu_data[0];
+ int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
+
+ /* Squash the global minimum into the permitted range. */
+ if (global.min_perf_pct > pct)
+ global.min_perf_pct = pct;
+ }
+
+ mutex_unlock(&intel_pstate_limits_lock);
+
+ intel_pstate_update_policies();
+ arch_set_max_freq_ratio(global.no_turbo);
+
+ mutex_unlock(&intel_pstate_driver_lock);
+
+ return count;
+}
+
+static void update_qos_request(enum freq_qos_req_type type)
+{
+ struct freq_qos_request *req;
+ struct cpufreq_policy *policy;
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct cpudata *cpu = all_cpu_data[i];
+ unsigned int freq, perf_pct;
+
+ policy = cpufreq_cpu_get(i);
+ if (!policy)
+ continue;
+
+ req = policy->driver_data;
+ cpufreq_cpu_put(policy);
+
+ if (!req)
+ continue;
+
+ if (hwp_active)
+ intel_pstate_get_hwp_cap(cpu);
+
+ if (type == FREQ_QOS_MIN) {
+ perf_pct = global.min_perf_pct;
+ } else {
+ req++;
+ perf_pct = global.max_perf_pct;
+ }
+
+ freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100);
+
+ if (freq_qos_update_request(req, freq) < 0)
+ pr_warn("Failed to update freq constraint: CPU%d\n", i);
+ }
+}
+
+static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&intel_pstate_driver_lock);
+
+ if (!intel_pstate_driver) {
+ mutex_unlock(&intel_pstate_driver_lock);
+ return -EAGAIN;
+ }
+
+ mutex_lock(&intel_pstate_limits_lock);
+
+ global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100);
+
+ mutex_unlock(&intel_pstate_limits_lock);
+
+ if (intel_pstate_driver == &intel_pstate)
+ intel_pstate_update_policies();
+ else
+ update_qos_request(FREQ_QOS_MAX);
+
+ mutex_unlock(&intel_pstate_driver_lock);
+
+ return count;
+}
+
+static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&intel_pstate_driver_lock);
+
+ if (!intel_pstate_driver) {
+ mutex_unlock(&intel_pstate_driver_lock);
+ return -EAGAIN;
+ }
+
+ mutex_lock(&intel_pstate_limits_lock);
+
+ global.min_perf_pct = clamp_t(int, input,
+ min_perf_pct_min(), global.max_perf_pct);
+
+ mutex_unlock(&intel_pstate_limits_lock);
+
+ if (intel_pstate_driver == &intel_pstate)
+ intel_pstate_update_policies();
+ else
+ update_qos_request(FREQ_QOS_MIN);
+
+ mutex_unlock(&intel_pstate_driver_lock);
+
+ return count;
+}
+
+static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", hwp_boost);
+}
+
+static ssize_t store_hwp_dynamic_boost(struct kobject *a,
+ struct kobj_attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &input);
+ if (ret)
+ return ret;
+
+ mutex_lock(&intel_pstate_driver_lock);
+ hwp_boost = !!input;
+ intel_pstate_update_policies();
+ mutex_unlock(&intel_pstate_driver_lock);
+
+ return count;
+}
+
+static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ u64 power_ctl;
+ int enable;
+
+ rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE));
+ return sprintf(buf, "%d\n", !enable);
+}
+
+static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b,
+ const char *buf, size_t count)
+{
+ bool input;
+ int ret;
+
+ ret = kstrtobool(buf, &input);
+ if (ret)
+ return ret;
+
+ set_power_ctl_ee_state(input);
+
+ return count;
+}
+
+show_one(max_perf_pct, max_perf_pct);
+show_one(min_perf_pct, min_perf_pct);
+
+define_one_global_rw(status);
+define_one_global_rw(no_turbo);
+define_one_global_rw(max_perf_pct);
+define_one_global_rw(min_perf_pct);
+define_one_global_ro(turbo_pct);
+define_one_global_ro(num_pstates);
+define_one_global_rw(hwp_dynamic_boost);
+define_one_global_rw(energy_efficiency);
+
+static struct attribute *intel_pstate_attributes[] = {
+ &status.attr,
+ &no_turbo.attr,
+ NULL
+};
+
+static const struct attribute_group intel_pstate_attr_group = {
+ .attrs = intel_pstate_attributes,
+};
+
+static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[];
+
+static struct kobject *intel_pstate_kobject;
+
+static void __init intel_pstate_sysfs_expose_params(void)
+{
+ int rc;
+
+ intel_pstate_kobject = kobject_create_and_add("intel_pstate",
+ &cpu_subsys.dev_root->kobj);
+ if (WARN_ON(!intel_pstate_kobject))
+ return;
+
+ rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
+ if (WARN_ON(rc))
+ return;
+
+ if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
+ rc = sysfs_create_file(intel_pstate_kobject, &turbo_pct.attr);
+ WARN_ON(rc);
+
+ rc = sysfs_create_file(intel_pstate_kobject, &num_pstates.attr);
+ WARN_ON(rc);
+ }
+
+ /*
+ * If per cpu limits are enforced there are no global limits, so
+ * return without creating max/min_perf_pct attributes
+ */
+ if (per_cpu_limits)
+ return;
+
+ rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
+ WARN_ON(rc);
+
+ rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
+ WARN_ON(rc);
+
+ if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) {
+ rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr);
+ WARN_ON(rc);
+ }
+}
+
+static void __init intel_pstate_sysfs_remove(void)
+{
+ if (!intel_pstate_kobject)
+ return;
+
+ sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group);
+
+ if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
+ sysfs_remove_file(intel_pstate_kobject, &num_pstates.attr);
+ sysfs_remove_file(intel_pstate_kobject, &turbo_pct.attr);
+ }
+
+ if (!per_cpu_limits) {
+ sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr);
+ sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr);
+
+ if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids))
+ sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr);
+ }
+
+ kobject_put(intel_pstate_kobject);
+}
+
+static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void)
+{
+ int rc;
+
+ if (!hwp_active)
+ return;
+
+ rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
+ WARN_ON_ONCE(rc);
+}
+
+static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
+{
+ if (!hwp_active)
+ return;
+
+ sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
+}
+
+/************************** sysfs end ************************/
+
+static void intel_pstate_notify_work(struct work_struct *work)
+{
+ struct cpudata *cpudata =
+ container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
+ struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
+
+ if (policy) {
+ intel_pstate_get_hwp_cap(cpudata);
+ __intel_pstate_update_max_freq(cpudata, policy);
+
+ cpufreq_cpu_release(policy);
+ }
+
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
+}
+
+static DEFINE_SPINLOCK(hwp_notify_lock);
+static cpumask_t hwp_intr_enable_mask;
+
+void notify_hwp_interrupt(void)
+{
+ unsigned int this_cpu = smp_processor_id();
+ struct cpudata *cpudata;
+ unsigned long flags;
+ u64 value;
+
+ if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ return;
+
+ rdmsrl_safe(MSR_HWP_STATUS, &value);
+ if (!(value & 0x01))
+ return;
+
+ spin_lock_irqsave(&hwp_notify_lock, flags);
+
+ if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
+ goto ack_intr;
+
+ /*
+ * Currently we never free all_cpu_data. And we can't reach here
+ * without this allocated. But for safety for future changes, added
+ * check.
+ */
+ if (unlikely(!READ_ONCE(all_cpu_data)))
+ goto ack_intr;
+
+ /*
+ * The free is done during cleanup, when cpufreq registry is failed.
+ * We wouldn't be here if it fails on init or switch status. But for
+ * future changes, added check.
+ */
+ cpudata = READ_ONCE(all_cpu_data[this_cpu]);
+ if (unlikely(!cpudata))
+ goto ack_intr;
+
+ schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10));
+
+ spin_unlock_irqrestore(&hwp_notify_lock, flags);
+
+ return;
+
+ack_intr:
+ wrmsrl_safe(MSR_HWP_STATUS, 0);
+ spin_unlock_irqrestore(&hwp_notify_lock, flags);
+}
+
+static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
+{
+ unsigned long flags;
+
+ if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ return;
+
+ /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+
+ spin_lock_irqsave(&hwp_notify_lock, flags);
+ if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask))
+ cancel_delayed_work(&cpudata->hwp_notify_work);
+ spin_unlock_irqrestore(&hwp_notify_lock, flags);
+}
+
+static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
+{
+ /* Enable HWP notification interrupt for guaranteed performance change */
+ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwp_notify_lock, flags);
+ INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
+ cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
+ spin_unlock_irqrestore(&hwp_notify_lock, flags);
+
+ /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
+ }
+}
+
+static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
+{
+ cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
+
+ /*
+ * If this CPU gen doesn't call for change in balance_perf
+ * EPP return.
+ */
+ if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
+ return;
+
+ /*
+ * If powerup EPP is something other than chipset default 0x80 and
+ * - is more performance oriented than 0x80 (default balance_perf EPP)
+ * - But less performance oriented than performance EPP
+ * then use this as new balance_perf EPP.
+ */
+ if (cpudata->epp_default < HWP_EPP_BALANCE_PERFORMANCE &&
+ cpudata->epp_default > HWP_EPP_PERFORMANCE) {
+ epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = cpudata->epp_default;
+ return;
+ }
+
+ /*
+ * Use hard coded value per gen to update the balance_perf
+ * and default EPP.
+ */
+ cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE];
+ intel_pstate_set_epp(cpudata, cpudata->epp_default);
+}
+
+static void intel_pstate_hwp_enable(struct cpudata *cpudata)
+{
+ /* First disable HWP notification interrupt till we activate again */
+ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+
+ wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
+
+ intel_pstate_enable_hwp_interrupt(cpudata);
+
+ if (cpudata->epp_default >= 0)
+ return;
+
+ intel_pstate_update_epp_defaults(cpudata);
+}
+
+static int atom_get_min_pstate(int not_used)
+{
+ u64 value;
+
+ rdmsrl(MSR_ATOM_CORE_RATIOS, value);
+ return (value >> 8) & 0x7F;
+}
+
+static int atom_get_max_pstate(int not_used)
+{
+ u64 value;
+
+ rdmsrl(MSR_ATOM_CORE_RATIOS, value);
+ return (value >> 16) & 0x7F;
+}
+
+static int atom_get_turbo_pstate(int not_used)
+{
+ u64 value;
+
+ rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
+ return value & 0x7F;
+}
+
+static u64 atom_get_val(struct cpudata *cpudata, int pstate)
+{
+ u64 val;
+ int32_t vid_fp;
+ u32 vid;
+
+ val = (u64)pstate << 8;
+ if (global.no_turbo && !global.turbo_disabled)
+ val |= (u64)1 << 32;
+
+ vid_fp = cpudata->vid.min + mul_fp(
+ int_tofp(pstate - cpudata->pstate.min_pstate),
+ cpudata->vid.ratio);
+
+ vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
+ vid = ceiling_fp(vid_fp);
+
+ if (pstate > cpudata->pstate.max_pstate)
+ vid = cpudata->vid.turbo;
+
+ return val | vid;
+}
+
+static int silvermont_get_scaling(void)
+{
+ u64 value;
+ int i;
+ /* Defined in Table 35-6 from SDM (Sept 2015) */
+ static int silvermont_freq_table[] = {
+ 83300, 100000, 133300, 116700, 80000};
+
+ rdmsrl(MSR_FSB_FREQ, value);
+ i = value & 0x7;
+ WARN_ON(i > 4);
+
+ return silvermont_freq_table[i];
+}
+
+static int airmont_get_scaling(void)
+{
+ u64 value;
+ int i;
+ /* Defined in Table 35-10 from SDM (Sept 2015) */
+ static int airmont_freq_table[] = {
+ 83300, 100000, 133300, 116700, 80000,
+ 93300, 90000, 88900, 87500};
+
+ rdmsrl(MSR_FSB_FREQ, value);
+ i = value & 0xF;
+ WARN_ON(i > 8);
+
+ return airmont_freq_table[i];
+}
+
+static void atom_get_vid(struct cpudata *cpudata)
+{
+ u64 value;
+
+ rdmsrl(MSR_ATOM_CORE_VIDS, value);
+ cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
+ cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
+ cpudata->vid.ratio = div_fp(
+ cpudata->vid.max - cpudata->vid.min,
+ int_tofp(cpudata->pstate.max_pstate -
+ cpudata->pstate.min_pstate));
+
+ rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
+ cpudata->vid.turbo = value & 0x7f;
+}
+
+static int core_get_min_pstate(int cpu)
+{
+ u64 value;
+
+ rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
+ return (value >> 40) & 0xFF;
+}
+
+static int core_get_max_pstate_physical(int cpu)
+{
+ u64 value;
+
+ rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
+ return (value >> 8) & 0xFF;
+}
+
+static int core_get_tdp_ratio(int cpu, u64 plat_info)
+{
+ /* Check how many TDP levels present */
+ if (plat_info & 0x600000000) {
+ u64 tdp_ctrl;
+ u64 tdp_ratio;
+ int tdp_msr;
+ int err;
+
+ /* Get the TDP level (0, 1, 2) to get ratios */
+ err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
+ if (err)
+ return err;
+
+ /* TDP MSR are continuous starting at 0x648 */
+ tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
+ err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
+ if (err)
+ return err;
+
+ /* For level 1 and 2, bits[23:16] contain the ratio */
+ if (tdp_ctrl & 0x03)
+ tdp_ratio >>= 16;
+
+ tdp_ratio &= 0xff; /* ratios are only 8 bits long */
+ pr_debug("tdp_ratio %x\n", (int)tdp_ratio);
+
+ return (int)tdp_ratio;
+ }
+
+ return -ENXIO;
+}
+
+static int core_get_max_pstate(int cpu)
+{
+ u64 tar;
+ u64 plat_info;
+ int max_pstate;
+ int tdp_ratio;
+ int err;
+
+ rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
+ max_pstate = (plat_info >> 8) & 0xFF;
+
+ tdp_ratio = core_get_tdp_ratio(cpu, plat_info);
+ if (tdp_ratio <= 0)
+ return max_pstate;
+
+ if (hwp_active) {
+ /* Turbo activation ratio is not used on HWP platforms */
+ return tdp_ratio;
+ }
+
+ err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
+ if (!err) {
+ int tar_levels;
+
+ /* Do some sanity checking for safety */
+ tar_levels = tar & 0xff;
+ if (tdp_ratio - 1 == tar_levels) {
+ max_pstate = tar_levels;
+ pr_debug("max_pstate=TAC %x\n", max_pstate);
+ }
+ }
+
+ return max_pstate;
+}
+
+static int core_get_turbo_pstate(int cpu)
+{
+ u64 value;
+ int nont, ret;
+
+ rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
+ nont = core_get_max_pstate(cpu);
+ ret = (value) & 255;
+ if (ret <= nont)
+ ret = nont;
+ return ret;
+}
+
+static inline int core_get_scaling(void)
+{
+ return 100000;
+}
+
+static u64 core_get_val(struct cpudata *cpudata, int pstate)
+{
+ u64 val;
+
+ val = (u64)pstate << 8;
+ if (global.no_turbo && !global.turbo_disabled)
+ val |= (u64)1 << 32;
+
+ return val;
+}
+
+static int knl_get_aperf_mperf_shift(void)
+{
+ return 10;
+}
+
+static int knl_get_turbo_pstate(int cpu)
+{
+ u64 value;
+ int nont, ret;
+
+ rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
+ nont = core_get_max_pstate(cpu);
+ ret = (((value) >> 8) & 0xFF);
+ if (ret <= nont)
+ ret = nont;
+ return ret;
+}
+
+static void hybrid_get_type(void *data)
+{
+ u8 *cpu_type = data;
+
+ *cpu_type = get_this_hybrid_cpu_type();
+}
+
+static int hybrid_get_cpu_scaling(int cpu)
+{
+ u8 cpu_type = 0;
+
+ smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
+ /* P-cores have a smaller perf level-to-freqency scaling factor. */
+ if (cpu_type == 0x40)
+ return 78741;
+
+ return core_get_scaling();
+}
+
+static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
+{
+ trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
+ cpu->pstate.current_pstate = pstate;
+ /*
+ * Generally, there is no guarantee that this code will always run on
+ * the CPU being updated, so force the register update to run on the
+ * right CPU.
+ */
+ wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+ pstate_funcs.get_val(cpu, pstate));
+}
+
+static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+{
+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+}
+
+static void intel_pstate_max_within_limits(struct cpudata *cpu)
+{
+ int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
+
+ update_turbo_state();
+ intel_pstate_set_pstate(cpu, pstate);
+}
+
+static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+{
+ int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
+ int perf_ctl_scaling = pstate_funcs.get_scaling();
+
+ cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu);
+ cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
+ cpu->pstate.perf_ctl_scaling = perf_ctl_scaling;
+
+ if (hwp_active && !hwp_mode_bdw) {
+ __intel_pstate_get_hwp_cap(cpu);
+
+ if (pstate_funcs.get_cpu_scaling) {
+ cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu);
+ if (cpu->pstate.scaling != perf_ctl_scaling)
+ intel_pstate_hybrid_hwp_adjust(cpu);
+ } else {
+ cpu->pstate.scaling = perf_ctl_scaling;
+ }
+ } else {
+ cpu->pstate.scaling = perf_ctl_scaling;
+ cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu);
+ cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(cpu->cpu);
+ }
+
+ if (cpu->pstate.scaling == perf_ctl_scaling) {
+ cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
+ cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling;
+ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling;
+ }
+
+ if (pstate_funcs.get_aperf_mperf_shift)
+ cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
+
+ if (pstate_funcs.get_vid)
+ pstate_funcs.get_vid(cpu);
+
+ intel_pstate_set_min_pstate(cpu);
+}
+
+/*
+ * Long hold time will keep high perf limits for long time,
+ * which negatively impacts perf/watt for some workloads,
+ * like specpower. 3ms is based on experiements on some
+ * workoads.
+ */
+static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC;
+
+static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
+{
+ u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
+ u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
+ u32 max_limit = (hwp_req & 0xff00) >> 8;
+ u32 min_limit = (hwp_req & 0xff);
+ u32 boost_level1;
+
+ /*
+ * Cases to consider (User changes via sysfs or boot time):
+ * If, P0 (Turbo max) = P1 (Guaranteed max) = min:
+ * No boost, return.
+ * If, P0 (Turbo max) > P1 (Guaranteed max) = min:
+ * Should result in one level boost only for P0.
+ * If, P0 (Turbo max) = P1 (Guaranteed max) > min:
+ * Should result in two level boost:
+ * (min + p1)/2 and P1.
+ * If, P0 (Turbo max) > P1 (Guaranteed max) > min:
+ * Should result in three level boost:
+ * (min + p1)/2, P1 and P0.
+ */
+
+ /* If max and min are equal or already at max, nothing to boost */
+ if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit)
+ return;
+
+ if (!cpu->hwp_boost_min)
+ cpu->hwp_boost_min = min_limit;
+
+ /* level at half way mark between min and guranteed */
+ boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1;
+
+ if (cpu->hwp_boost_min < boost_level1)
+ cpu->hwp_boost_min = boost_level1;
+ else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap))
+ cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap);
+ else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) &&
+ max_limit != HWP_GUARANTEED_PERF(hwp_cap))
+ cpu->hwp_boost_min = max_limit;
+ else
+ return;
+
+ hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
+ wrmsrl(MSR_HWP_REQUEST, hwp_req);
+ cpu->last_update = cpu->sample.time;
+}
+
+static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
+{
+ if (cpu->hwp_boost_min) {
+ bool expired;
+
+ /* Check if we are idle for hold time to boost down */
+ expired = time_after64(cpu->sample.time, cpu->last_update +
+ hwp_boost_hold_time_ns);
+ if (expired) {
+ wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
+ cpu->hwp_boost_min = 0;
+ }
+ }
+ cpu->last_update = cpu->sample.time;
+}
+
+static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
+ u64 time)
+{
+ cpu->sample.time = time;
+
+ if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) {
+ bool do_io = false;
+
+ cpu->sched_flags = 0;
+ /*
+ * Set iowait_boost flag and update time. Since IO WAIT flag
+ * is set all the time, we can't just conclude that there is
+ * some IO bound activity is scheduled on this CPU with just
+ * one occurrence. If we receive at least two in two
+ * consecutive ticks, then we treat as boost candidate.
+ */
+ if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC))
+ do_io = true;
+
+ cpu->last_io_update = time;
+
+ if (do_io)
+ intel_pstate_hwp_boost_up(cpu);
+
+ } else {
+ intel_pstate_hwp_boost_down(cpu);
+ }
+}
+
+static inline void intel_pstate_update_util_hwp(struct update_util_data *data,
+ u64 time, unsigned int flags)
+{
+ struct cpudata *cpu = container_of(data, struct cpudata, update_util);
+
+ cpu->sched_flags |= flags;
+
+ if (smp_processor_id() == cpu->cpu)
+ intel_pstate_update_util_hwp_local(cpu, time);
+}
+
+static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
+{
+ struct sample *sample = &cpu->sample;
+
+ sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
+}
+
+static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
+{
+ u64 aperf, mperf;
+ unsigned long flags;
+ u64 tsc;
+
+ local_irq_save(flags);
+ rdmsrl(MSR_IA32_APERF, aperf);
+ rdmsrl(MSR_IA32_MPERF, mperf);
+ tsc = rdtsc();
+ if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
+ local_irq_restore(flags);
+ return false;
+ }
+ local_irq_restore(flags);
+
+ cpu->last_sample_time = cpu->sample.time;
+ cpu->sample.time = time;
+ cpu->sample.aperf = aperf;
+ cpu->sample.mperf = mperf;
+ cpu->sample.tsc = tsc;
+ cpu->sample.aperf -= cpu->prev_aperf;
+ cpu->sample.mperf -= cpu->prev_mperf;
+ cpu->sample.tsc -= cpu->prev_tsc;
+
+ cpu->prev_aperf = aperf;
+ cpu->prev_mperf = mperf;
+ cpu->prev_tsc = tsc;
+ /*
+ * First time this function is invoked in a given cycle, all of the
+ * previous sample data fields are equal to zero or stale and they must
+ * be populated with meaningful numbers for things to work, so assume
+ * that sample.time will always be reset before setting the utilization
+ * update hook and make the caller skip the sample then.
+ */
+ if (cpu->last_sample_time) {
+ intel_pstate_calc_avg_perf(cpu);
+ return true;
+ }
+ return false;
+}
+
+static inline int32_t get_avg_frequency(struct cpudata *cpu)
+{
+ return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
+}
+
+static inline int32_t get_avg_pstate(struct cpudata *cpu)
+{
+ return mul_ext_fp(cpu->pstate.max_pstate_physical,
+ cpu->sample.core_avg_perf);
+}
+
+static inline int32_t get_target_pstate(struct cpudata *cpu)
+{
+ struct sample *sample = &cpu->sample;
+ int32_t busy_frac;
+ int target, avg_pstate;
+
+ busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
+ sample->tsc);
+
+ if (busy_frac < cpu->iowait_boost)
+ busy_frac = cpu->iowait_boost;
+
+ sample->busy_scaled = busy_frac * 100;
+
+ target = global.no_turbo || global.turbo_disabled ?
+ cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+ target += target >> 2;
+ target = mul_fp(target, busy_frac);
+ if (target < cpu->pstate.min_pstate)
+ target = cpu->pstate.min_pstate;
+
+ /*
+ * If the average P-state during the previous cycle was higher than the
+ * current target, add 50% of the difference to the target to reduce
+ * possible performance oscillations and offset possible performance
+ * loss related to moving the workload from one CPU to another within
+ * a package/module.
+ */
+ avg_pstate = get_avg_pstate(cpu);
+ if (avg_pstate > target)
+ target += (avg_pstate - target) >> 1;
+
+ return target;
+}
+
+static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
+{
+ int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
+ int max_pstate = max(min_pstate, cpu->max_perf_ratio);
+
+ return clamp_t(int, pstate, min_pstate, max_pstate);
+}
+
+static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
+{
+ if (pstate == cpu->pstate.current_pstate)
+ return;
+
+ cpu->pstate.current_pstate = pstate;
+ wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
+}
+
+static void intel_pstate_adjust_pstate(struct cpudata *cpu)
+{
+ int from = cpu->pstate.current_pstate;
+ struct sample *sample;
+ int target_pstate;
+
+ update_turbo_state();
+
+ target_pstate = get_target_pstate(cpu);
+ target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+ trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
+ intel_pstate_update_pstate(cpu, target_pstate);
+
+ sample = &cpu->sample;
+ trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
+ fp_toint(sample->busy_scaled),
+ from,
+ cpu->pstate.current_pstate,
+ sample->mperf,
+ sample->aperf,
+ sample->tsc,
+ get_avg_frequency(cpu),
+ fp_toint(cpu->iowait_boost * 100));
+}
+
+static void intel_pstate_update_util(struct update_util_data *data, u64 time,
+ unsigned int flags)
+{
+ struct cpudata *cpu = container_of(data, struct cpudata, update_util);
+ u64 delta_ns;
+
+ /* Don't allow remote callbacks */
+ if (smp_processor_id() != cpu->cpu)
+ return;
+
+ delta_ns = time - cpu->last_update;
+ if (flags & SCHED_CPUFREQ_IOWAIT) {
+ /* Start over if the CPU may have been idle. */
+ if (delta_ns > TICK_NSEC) {
+ cpu->iowait_boost = ONE_EIGHTH_FP;
+ } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) {
+ cpu->iowait_boost <<= 1;
+ if (cpu->iowait_boost > int_tofp(1))
+ cpu->iowait_boost = int_tofp(1);
+ } else {
+ cpu->iowait_boost = ONE_EIGHTH_FP;
+ }
+ } else if (cpu->iowait_boost) {
+ /* Clear iowait_boost if the CPU may have been idle. */
+ if (delta_ns > TICK_NSEC)
+ cpu->iowait_boost = 0;
+ else
+ cpu->iowait_boost >>= 1;
+ }
+ cpu->last_update = time;
+ delta_ns = time - cpu->sample.time;
+ if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
+ return;
+
+ if (intel_pstate_sample(cpu, time))
+ intel_pstate_adjust_pstate(cpu);
+}
+
+static struct pstate_funcs core_funcs = {
+ .get_max = core_get_max_pstate,
+ .get_max_physical = core_get_max_pstate_physical,
+ .get_min = core_get_min_pstate,
+ .get_turbo = core_get_turbo_pstate,
+ .get_scaling = core_get_scaling,
+ .get_val = core_get_val,
+};
+
+static const struct pstate_funcs silvermont_funcs = {
+ .get_max = atom_get_max_pstate,
+ .get_max_physical = atom_get_max_pstate,
+ .get_min = atom_get_min_pstate,
+ .get_turbo = atom_get_turbo_pstate,
+ .get_val = atom_get_val,
+ .get_scaling = silvermont_get_scaling,
+ .get_vid = atom_get_vid,
+};
+
+static const struct pstate_funcs airmont_funcs = {
+ .get_max = atom_get_max_pstate,
+ .get_max_physical = atom_get_max_pstate,
+ .get_min = atom_get_min_pstate,
+ .get_turbo = atom_get_turbo_pstate,
+ .get_val = atom_get_val,
+ .get_scaling = airmont_get_scaling,
+ .get_vid = atom_get_vid,
+};
+
+static const struct pstate_funcs knl_funcs = {
+ .get_max = core_get_max_pstate,
+ .get_max_physical = core_get_max_pstate_physical,
+ .get_min = core_get_min_pstate,
+ .get_turbo = knl_get_turbo_pstate,
+ .get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
+ .get_scaling = core_get_scaling,
+ .get_val = core_get_val,
+};
+
+#define X86_MATCH(model, policy) \
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
+ X86_FEATURE_APERFMPERF, &policy)
+
+static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
+ X86_MATCH(SANDYBRIDGE, core_funcs),
+ X86_MATCH(SANDYBRIDGE_X, core_funcs),
+ X86_MATCH(ATOM_SILVERMONT, silvermont_funcs),
+ X86_MATCH(IVYBRIDGE, core_funcs),
+ X86_MATCH(HASWELL, core_funcs),
+ X86_MATCH(BROADWELL, core_funcs),
+ X86_MATCH(IVYBRIDGE_X, core_funcs),
+ X86_MATCH(HASWELL_X, core_funcs),
+ X86_MATCH(HASWELL_L, core_funcs),
+ X86_MATCH(HASWELL_G, core_funcs),
+ X86_MATCH(BROADWELL_G, core_funcs),
+ X86_MATCH(ATOM_AIRMONT, airmont_funcs),
+ X86_MATCH(SKYLAKE_L, core_funcs),
+ X86_MATCH(BROADWELL_X, core_funcs),
+ X86_MATCH(SKYLAKE, core_funcs),
+ X86_MATCH(BROADWELL_D, core_funcs),
+ X86_MATCH(XEON_PHI_KNL, knl_funcs),
+ X86_MATCH(XEON_PHI_KNM, knl_funcs),
+ X86_MATCH(ATOM_GOLDMONT, core_funcs),
+ X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs),
+ X86_MATCH(SKYLAKE_X, core_funcs),
+ X86_MATCH(COMETLAKE, core_funcs),
+ X86_MATCH(ICELAKE_X, core_funcs),
+ X86_MATCH(TIGERLAKE, core_funcs),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
+
+static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
+ X86_MATCH(BROADWELL_D, core_funcs),
+ X86_MATCH(BROADWELL_X, core_funcs),
+ X86_MATCH(SKYLAKE_X, core_funcs),
+ X86_MATCH(ICELAKE_X, core_funcs),
+ X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
+ {}
+};
+
+static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
+ X86_MATCH(KABYLAKE, core_funcs),
+ {}
+};
+
+static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = {
+ X86_MATCH(SKYLAKE_X, core_funcs),
+ X86_MATCH(SKYLAKE, core_funcs),
+ {}
+};
+
+static int intel_pstate_init_cpu(unsigned int cpunum)
+{
+ struct cpudata *cpu;
+
+ cpu = all_cpu_data[cpunum];
+
+ if (!cpu) {
+ cpu = kzalloc(sizeof(*cpu), GFP_KERNEL);
+ if (!cpu)
+ return -ENOMEM;
+
+ WRITE_ONCE(all_cpu_data[cpunum], cpu);
+
+ cpu->cpu = cpunum;
+
+ cpu->epp_default = -EINVAL;
+
+ if (hwp_active) {
+ const struct x86_cpu_id *id;
+
+ intel_pstate_hwp_enable(cpu);
+
+ id = x86_match_cpu(intel_pstate_hwp_boost_ids);
+ if (id && intel_pstate_acpi_pm_profile_server())
+ hwp_boost = true;
+ }
+ } else if (hwp_active) {
+ /*
+ * Re-enable HWP in case this happens after a resume from ACPI
+ * S3 if the CPU was offline during the whole system/resume
+ * cycle.
+ */
+ intel_pstate_hwp_reenable(cpu);
+ }
+
+ cpu->epp_powersave = -EINVAL;
+ cpu->epp_policy = 0;
+
+ intel_pstate_get_cpu_pstates(cpu);
+
+ pr_debug("controlling: cpu %d\n", cpunum);
+
+ return 0;
+}
+
+static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
+{
+ struct cpudata *cpu = all_cpu_data[cpu_num];
+
+ if (hwp_active && !hwp_boost)
+ return;
+
+ if (cpu->update_util_set)
+ return;
+
+ /* Prevent intel_pstate_update_util() from using stale data. */
+ cpu->sample.time = 0;
+ cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
+ (hwp_active ?
+ intel_pstate_update_util_hwp :
+ intel_pstate_update_util));
+ cpu->update_util_set = true;
+}
+
+static void intel_pstate_clear_update_util_hook(unsigned int cpu)
+{
+ struct cpudata *cpu_data = all_cpu_data[cpu];
+
+ if (!cpu_data->update_util_set)
+ return;
+
+ cpufreq_remove_update_util_hook(cpu);
+ cpu_data->update_util_set = false;
+ synchronize_rcu();
+}
+
+static int intel_pstate_get_max_freq(struct cpudata *cpu)
+{
+ return global.turbo_disabled || global.no_turbo ?
+ cpu->pstate.max_freq : cpu->pstate.turbo_freq;
+}
+
+static void intel_pstate_update_perf_limits(struct cpudata *cpu,
+ unsigned int policy_min,
+ unsigned int policy_max)
+{
+ int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
+ int32_t max_policy_perf, min_policy_perf;
+
+ max_policy_perf = policy_max / perf_ctl_scaling;
+ if (policy_max == policy_min) {
+ min_policy_perf = max_policy_perf;
+ } else {
+ min_policy_perf = policy_min / perf_ctl_scaling;
+ min_policy_perf = clamp_t(int32_t, min_policy_perf,
+ 0, max_policy_perf);
+ }
+
+ /*
+ * HWP needs some special consideration, because HWP_REQUEST uses
+ * abstract values to represent performance rather than pure ratios.
+ */
+ if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
+ int freq;
+
+ freq = max_policy_perf * perf_ctl_scaling;
+ max_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
+ freq = min_policy_perf * perf_ctl_scaling;
+ min_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
+ }
+
+ pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
+ cpu->cpu, min_policy_perf, max_policy_perf);
+
+ /* Normalize user input to [min_perf, max_perf] */
+ if (per_cpu_limits) {
+ cpu->min_perf_ratio = min_policy_perf;
+ cpu->max_perf_ratio = max_policy_perf;
+ } else {
+ int turbo_max = cpu->pstate.turbo_pstate;
+ int32_t global_min, global_max;
+
+ /* Global limits are in percent of the maximum turbo P-state. */
+ global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
+ global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
+ global_min = clamp_t(int32_t, global_min, 0, global_max);
+
+ pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu,
+ global_min, global_max);
+
+ cpu->min_perf_ratio = max(min_policy_perf, global_min);
+ cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
+ cpu->max_perf_ratio = min(max_policy_perf, global_max);
+ cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
+
+ /* Make sure min_perf <= max_perf */
+ cpu->min_perf_ratio = min(cpu->min_perf_ratio,
+ cpu->max_perf_ratio);
+
+ }
+ pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu,
+ cpu->max_perf_ratio,
+ cpu->min_perf_ratio);
+}
+
+static int intel_pstate_set_policy(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu;
+
+ if (!policy->cpuinfo.max_freq)
+ return -ENODEV;
+
+ pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
+ policy->cpuinfo.max_freq, policy->max);
+
+ cpu = all_cpu_data[policy->cpu];
+ cpu->policy = policy->policy;
+
+ mutex_lock(&intel_pstate_limits_lock);
+
+ intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
+
+ if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ /*
+ * NOHZ_FULL CPUs need this as the governor callback may not
+ * be invoked on them.
+ */
+ intel_pstate_clear_update_util_hook(policy->cpu);
+ intel_pstate_max_within_limits(cpu);
+ } else {
+ intel_pstate_set_update_util_hook(policy->cpu);
+ }
+
+ if (hwp_active) {
+ /*
+ * When hwp_boost was active before and dynamically it
+ * was turned off, in that case we need to clear the
+ * update util hook.
+ */
+ if (!hwp_boost)
+ intel_pstate_clear_update_util_hook(policy->cpu);
+ intel_pstate_hwp_set(policy->cpu);
+ }
+ /*
+ * policy->cur is never updated with the intel_pstate driver, but it
+ * is used as a stale frequency value. So, keep it within limits.
+ */
+ policy->cur = policy->min;
+
+ mutex_unlock(&intel_pstate_limits_lock);
+
+ return 0;
+}
+
+static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
+ struct cpufreq_policy_data *policy)
+{
+ if (!hwp_active &&
+ cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
+ policy->max < policy->cpuinfo.max_freq &&
+ policy->max > cpu->pstate.max_freq) {
+ pr_debug("policy->max > max non turbo frequency\n");
+ policy->max = policy->cpuinfo.max_freq;
+ }
+}
+
+static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
+ struct cpufreq_policy_data *policy)
+{
+ int max_freq;
+
+ update_turbo_state();
+ if (hwp_active) {
+ intel_pstate_get_hwp_cap(cpu);
+ max_freq = global.no_turbo || global.turbo_disabled ?
+ cpu->pstate.max_freq : cpu->pstate.turbo_freq;
+ } else {
+ max_freq = intel_pstate_get_max_freq(cpu);
+ }
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq);
+
+ intel_pstate_adjust_policy_max(cpu, policy);
+}
+
+static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
+{
+ intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy);
+
+ return 0;
+}
+
+static int intel_cpufreq_cpu_offline(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+
+ pr_debug("CPU %d going offline\n", cpu->cpu);
+
+ if (cpu->suspended)
+ return 0;
+
+ /*
+ * If the CPU is an SMT thread and it goes offline with the performance
+ * settings different from the minimum, it will prevent its sibling
+ * from getting to lower performance levels, so force the minimum
+ * performance on CPU offline to prevent that from happening.
+ */
+ if (hwp_active)
+ intel_pstate_hwp_offline(cpu);
+ else
+ intel_pstate_set_min_pstate(cpu);
+
+ intel_pstate_exit_perf_limits(policy);
+
+ return 0;
+}
+
+static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+
+ pr_debug("CPU %d going online\n", cpu->cpu);
+
+ intel_pstate_init_acpi_perf_limits(policy);
+
+ if (hwp_active) {
+ /*
+ * Re-enable HWP and clear the "suspended" flag to let "resume"
+ * know that it need not do that.
+ */
+ intel_pstate_hwp_reenable(cpu);
+ cpu->suspended = false;
+ }
+
+ return 0;
+}
+
+static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
+{
+ intel_pstate_clear_update_util_hook(policy->cpu);
+
+ return intel_cpufreq_cpu_offline(policy);
+}
+
+static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+{
+ pr_debug("CPU %d exiting\n", policy->cpu);
+
+ policy->fast_switch_possible = false;
+
+ return 0;
+}
+
+static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu;
+ int rc;
+
+ rc = intel_pstate_init_cpu(policy->cpu);
+ if (rc)
+ return rc;
+
+ cpu = all_cpu_data[policy->cpu];
+
+ cpu->max_perf_ratio = 0xFF;
+ cpu->min_perf_ratio = 0;
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.min_freq = cpu->pstate.min_freq;
+ update_turbo_state();
+ global.turbo_disabled_mf = global.turbo_disabled;
+ policy->cpuinfo.max_freq = global.turbo_disabled ?
+ cpu->pstate.max_freq : cpu->pstate.turbo_freq;
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+
+ intel_pstate_init_acpi_perf_limits(policy);
+
+ policy->fast_switch_possible = true;
+
+ return 0;
+}
+
+static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
+{
+ int ret = __intel_pstate_cpu_init(policy);
+
+ if (ret)
+ return ret;
+
+ /*
+ * Set the policy to powersave to provide a valid fallback value in case
+ * the default cpufreq governor is neither powersave nor performance.
+ */
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+
+ if (hwp_active) {
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+
+ cpu->epp_cached = intel_pstate_get_epp(cpu, 0);
+ }
+
+ return 0;
+}
+
+static struct cpufreq_driver intel_pstate = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = intel_pstate_verify_policy,
+ .setpolicy = intel_pstate_set_policy,
+ .suspend = intel_pstate_suspend,
+ .resume = intel_pstate_resume,
+ .init = intel_pstate_cpu_init,
+ .exit = intel_pstate_cpu_exit,
+ .offline = intel_pstate_cpu_offline,
+ .online = intel_pstate_cpu_online,
+ .update_limits = intel_pstate_update_limits,
+ .name = "intel_pstate",
+};
+
+static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+
+ intel_pstate_verify_cpu_policy(cpu, policy);
+ intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
+
+ return 0;
+}
+
+/* Use of trace in passive mode:
+ *
+ * In passive mode the trace core_busy field (also known as the
+ * performance field, and lablelled as such on the graphs; also known as
+ * core_avg_perf) is not needed and so is re-assigned to indicate if the
+ * driver call was via the normal or fast switch path. Various graphs
+ * output from the intel_pstate_tracer.py utility that include core_busy
+ * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
+ * so we use 10 to indicate the normal path through the driver, and
+ * 90 to indicate the fast switch path through the driver.
+ * The scaled_busy field is not used, and is set to 0.
+ */
+
+#define INTEL_PSTATE_TRACE_TARGET 10
+#define INTEL_PSTATE_TRACE_FAST_SWITCH 90
+
+static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate)
+{
+ struct sample *sample;
+
+ if (!trace_pstate_sample_enabled())
+ return;
+
+ if (!intel_pstate_sample(cpu, ktime_get()))
+ return;
+
+ sample = &cpu->sample;
+ trace_pstate_sample(trace_type,
+ 0,
+ old_pstate,
+ cpu->pstate.current_pstate,
+ sample->mperf,
+ sample->aperf,
+ sample->tsc,
+ get_avg_frequency(cpu),
+ fp_toint(cpu->iowait_boost * 100));
+}
+
+static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
+ u32 desired, bool fast_switch)
+{
+ u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
+
+ value &= ~HWP_MIN_PERF(~0L);
+ value |= HWP_MIN_PERF(min);
+
+ value &= ~HWP_MAX_PERF(~0L);
+ value |= HWP_MAX_PERF(max);
+
+ value &= ~HWP_DESIRED_PERF(~0L);
+ value |= HWP_DESIRED_PERF(desired);
+
+ if (value == prev)
+ return;
+
+ WRITE_ONCE(cpu->hwp_req_cached, value);
+ if (fast_switch)
+ wrmsrl(MSR_HWP_REQUEST, value);
+ else
+ wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+}
+
+static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
+ u32 target_pstate, bool fast_switch)
+{
+ if (fast_switch)
+ wrmsrl(MSR_IA32_PERF_CTL,
+ pstate_funcs.get_val(cpu, target_pstate));
+ else
+ wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+ pstate_funcs.get_val(cpu, target_pstate));
+}
+
+static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
+ int target_pstate, bool fast_switch)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+ int old_pstate = cpu->pstate.current_pstate;
+
+ target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+ if (hwp_active) {
+ int max_pstate = policy->strict_target ?
+ target_pstate : cpu->max_perf_ratio;
+
+ intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0,
+ fast_switch);
+ } else if (target_pstate != old_pstate) {
+ intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
+ }
+
+ cpu->pstate.current_pstate = target_pstate;
+
+ intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH :
+ INTEL_PSTATE_TRACE_TARGET, old_pstate);
+
+ return target_pstate;
+}
+
+static int intel_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+ struct cpufreq_freqs freqs;
+ int target_pstate;
+
+ update_turbo_state();
+
+ freqs.old = policy->cur;
+ freqs.new = target_freq;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+
+ target_pstate = intel_pstate_freq_to_hwp_rel(cpu, freqs.new, relation);
+ target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
+
+ freqs.new = target_pstate * cpu->pstate.scaling;
+
+ cpufreq_freq_transition_end(policy, &freqs, false);
+
+ return 0;
+}
+
+static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+ int target_pstate;
+
+ update_turbo_state();
+
+ target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
+
+ target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
+
+ return target_pstate * cpu->pstate.scaling;
+}
+
+static void intel_cpufreq_adjust_perf(unsigned int cpunum,
+ unsigned long min_perf,
+ unsigned long target_perf,
+ unsigned long capacity)
+{
+ struct cpudata *cpu = all_cpu_data[cpunum];
+ u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
+ int old_pstate = cpu->pstate.current_pstate;
+ int cap_pstate, min_pstate, max_pstate, target_pstate;
+
+ update_turbo_state();
+ cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
+ HWP_HIGHEST_PERF(hwp_cap);
+
+ /* Optimization: Avoid unnecessary divisions. */
+
+ target_pstate = cap_pstate;
+ if (target_perf < capacity)
+ target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity);
+
+ min_pstate = cap_pstate;
+ if (min_perf < capacity)
+ min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity);
+
+ if (min_pstate < cpu->pstate.min_pstate)
+ min_pstate = cpu->pstate.min_pstate;
+
+ if (min_pstate < cpu->min_perf_ratio)
+ min_pstate = cpu->min_perf_ratio;
+
+ max_pstate = min(cap_pstate, cpu->max_perf_ratio);
+ if (max_pstate < min_pstate)
+ max_pstate = min_pstate;
+
+ target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate);
+
+ intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true);
+
+ cpu->pstate.current_pstate = target_pstate;
+ intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
+}
+
+static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct freq_qos_request *req;
+ struct cpudata *cpu;
+ struct device *dev;
+ int ret, freq;
+
+ dev = get_cpu_device(policy->cpu);
+ if (!dev)
+ return -ENODEV;
+
+ ret = __intel_pstate_cpu_init(policy);
+ if (ret)
+ return ret;
+
+ policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
+ /* This reflects the intel_pstate_get_cpu_pstates() setting. */
+ policy->cur = policy->cpuinfo.min_freq;
+
+ req = kcalloc(2, sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto pstate_exit;
+ }
+
+ cpu = all_cpu_data[policy->cpu];
+
+ if (hwp_active) {
+ u64 value;
+
+ policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
+
+ intel_pstate_get_hwp_cap(cpu);
+
+ rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
+ WRITE_ONCE(cpu->hwp_req_cached, value);
+
+ cpu->epp_cached = intel_pstate_get_epp(cpu, value);
+ } else {
+ policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
+ }
+
+ freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100);
+
+ ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN,
+ freq);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
+ goto free_req;
+ }
+
+ freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100);
+
+ ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX,
+ freq);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
+ goto remove_min_req;
+ }
+
+ policy->driver_data = req;
+
+ return 0;
+
+remove_min_req:
+ freq_qos_remove_request(req);
+free_req:
+ kfree(req);
+pstate_exit:
+ intel_pstate_exit_perf_limits(policy);
+
+ return ret;
+}
+
+static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct freq_qos_request *req;
+
+ req = policy->driver_data;
+
+ freq_qos_remove_request(req + 1);
+ freq_qos_remove_request(req);
+ kfree(req);
+
+ return intel_pstate_cpu_exit(policy);
+}
+
+static int intel_cpufreq_suspend(struct cpufreq_policy *policy)
+{
+ intel_pstate_suspend(policy);
+
+ if (hwp_active) {
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+ u64 value = READ_ONCE(cpu->hwp_req_cached);
+
+ /*
+ * Clear the desired perf field in MSR_HWP_REQUEST in case
+ * intel_cpufreq_adjust_perf() is in use and the last value
+ * written by it may not be suitable.
+ */
+ value &= ~HWP_DESIRED_PERF(~0L);
+ wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ WRITE_ONCE(cpu->hwp_req_cached, value);
+ }
+
+ return 0;
+}
+
+static struct cpufreq_driver intel_cpufreq = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = intel_cpufreq_verify_policy,
+ .target = intel_cpufreq_target,
+ .fast_switch = intel_cpufreq_fast_switch,
+ .init = intel_cpufreq_cpu_init,
+ .exit = intel_cpufreq_cpu_exit,
+ .offline = intel_cpufreq_cpu_offline,
+ .online = intel_pstate_cpu_online,
+ .suspend = intel_cpufreq_suspend,
+ .resume = intel_pstate_resume,
+ .update_limits = intel_pstate_update_limits,
+ .name = "intel_cpufreq",
+};
+
+static struct cpufreq_driver *default_driver;
+
+static void intel_pstate_driver_cleanup(void)
+{
+ unsigned int cpu;
+
+ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ if (all_cpu_data[cpu]) {
+ if (intel_pstate_driver == &intel_pstate)
+ intel_pstate_clear_update_util_hook(cpu);
+
+ spin_lock(&hwp_notify_lock);
+ kfree(all_cpu_data[cpu]);
+ WRITE_ONCE(all_cpu_data[cpu], NULL);
+ spin_unlock(&hwp_notify_lock);
+ }
+ }
+ cpus_read_unlock();
+
+ intel_pstate_driver = NULL;
+}
+
+static int intel_pstate_register_driver(struct cpufreq_driver *driver)
+{
+ int ret;
+
+ if (driver == &intel_pstate)
+ intel_pstate_sysfs_expose_hwp_dynamic_boost();
+
+ memset(&global, 0, sizeof(global));
+ global.max_perf_pct = 100;
+
+ intel_pstate_driver = driver;
+ ret = cpufreq_register_driver(intel_pstate_driver);
+ if (ret) {
+ intel_pstate_driver_cleanup();
+ return ret;
+ }
+
+ global.min_perf_pct = min_perf_pct_min();
+
+ return 0;
+}
+
+static ssize_t intel_pstate_show_status(char *buf)
+{
+ if (!intel_pstate_driver)
+ return sprintf(buf, "off\n");
+
+ return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ?
+ "active" : "passive");
+}
+
+static int intel_pstate_update_status(const char *buf, size_t size)
+{
+ if (size == 3 && !strncmp(buf, "off", size)) {
+ if (!intel_pstate_driver)
+ return -EINVAL;
+
+ if (hwp_active)
+ return -EBUSY;
+
+ cpufreq_unregister_driver(intel_pstate_driver);
+ intel_pstate_driver_cleanup();
+ return 0;
+ }
+
+ if (size == 6 && !strncmp(buf, "active", size)) {
+ if (intel_pstate_driver) {
+ if (intel_pstate_driver == &intel_pstate)
+ return 0;
+
+ cpufreq_unregister_driver(intel_pstate_driver);
+ }
+
+ return intel_pstate_register_driver(&intel_pstate);
+ }
+
+ if (size == 7 && !strncmp(buf, "passive", size)) {
+ if (intel_pstate_driver) {
+ if (intel_pstate_driver == &intel_cpufreq)
+ return 0;
+
+ cpufreq_unregister_driver(intel_pstate_driver);
+ intel_pstate_sysfs_hide_hwp_dynamic_boost();
+ }
+
+ return intel_pstate_register_driver(&intel_cpufreq);
+ }
+
+ return -EINVAL;
+}
+
+static int no_load __initdata;
+static int no_hwp __initdata;
+static int hwp_only __initdata;
+static unsigned int force_load __initdata;
+
+static int __init intel_pstate_msrs_not_valid(void)
+{
+ if (!pstate_funcs.get_max(0) ||
+ !pstate_funcs.get_min(0) ||
+ !pstate_funcs.get_turbo(0))
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
+{
+ pstate_funcs.get_max = funcs->get_max;
+ pstate_funcs.get_max_physical = funcs->get_max_physical;
+ pstate_funcs.get_min = funcs->get_min;
+ pstate_funcs.get_turbo = funcs->get_turbo;
+ pstate_funcs.get_scaling = funcs->get_scaling;
+ pstate_funcs.get_val = funcs->get_val;
+ pstate_funcs.get_vid = funcs->get_vid;
+ pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
+}
+
+#ifdef CONFIG_ACPI
+
+static bool __init intel_pstate_no_acpi_pss(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ acpi_status status;
+ union acpi_object *pss;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_processor *pr = per_cpu(processors, i);
+
+ if (!pr)
+ continue;
+
+ status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ continue;
+
+ pss = buffer.pointer;
+ if (pss && pss->type == ACPI_TYPE_PACKAGE) {
+ kfree(pss);
+ return false;
+ }
+
+ kfree(pss);
+ }
+
+ pr_debug("ACPI _PSS not found\n");
+ return true;
+}
+
+static bool __init intel_pstate_no_acpi_pcch(void)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ status = acpi_get_handle(NULL, "\\_SB", &handle);
+ if (ACPI_FAILURE(status))
+ goto not_found;
+
+ if (acpi_has_method(handle, "PCCH"))
+ return false;
+
+not_found:
+ pr_debug("ACPI PCCH not found\n");
+ return true;
+}
+
+static bool __init intel_pstate_has_acpi_ppc(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct acpi_processor *pr = per_cpu(processors, i);
+
+ if (!pr)
+ continue;
+ if (acpi_has_method(pr->handle, "_PPC"))
+ return true;
+ }
+ pr_debug("ACPI _PPC not found\n");
+ return false;
+}
+
+enum {
+ PSS,
+ PPC,
+};
+
+/* Hardware vendor-specific info that has its own power management modes */
+static struct acpi_platform_list plat_info[] __initdata = {
+ {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS},
+ {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ { } /* End */
+};
+
+#define BITMASK_OOB (BIT(8) | BIT(18))
+
+static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
+{
+ const struct x86_cpu_id *id;
+ u64 misc_pwr;
+ int idx;
+
+ id = x86_match_cpu(intel_pstate_cpu_oob_ids);
+ if (id) {
+ rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
+ if (misc_pwr & BITMASK_OOB) {
+ pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
+ pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
+ return true;
+ }
+ }
+
+ idx = acpi_match_platform_list(plat_info);
+ if (idx < 0)
+ return false;
+
+ switch (plat_info[idx].data) {
+ case PSS:
+ if (!intel_pstate_no_acpi_pss())
+ return false;
+
+ return intel_pstate_no_acpi_pcch();
+ case PPC:
+ return intel_pstate_has_acpi_ppc() && !force_load;
+ }
+
+ return false;
+}
+
+static void intel_pstate_request_control_from_smm(void)
+{
+ /*
+ * It may be unsafe to request P-states control from SMM if _PPC support
+ * has not been enabled.
+ */
+ if (acpi_ppc)
+ acpi_processor_pstate_control();
+}
+#else /* CONFIG_ACPI not enabled */
+static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
+static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
+static inline void intel_pstate_request_control_from_smm(void) {}
+#endif /* CONFIG_ACPI */
+
+#define INTEL_PSTATE_HWP_BROADWELL 0x01
+
+#define X86_MATCH_HWP(model, hwp_mode) \
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
+ X86_FEATURE_HWP, hwp_mode)
+
+static const struct x86_cpu_id hwp_support_ids[] __initconst = {
+ X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
+ X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
+ X86_MATCH_HWP(ANY, 0),
+ {}
+};
+
+static bool intel_pstate_hwp_is_enabled(void)
+{
+ u64 value;
+
+ rdmsrl(MSR_PM_ENABLE, value);
+ return !!(value & 0x1);
+}
+
+static const struct x86_cpu_id intel_epp_balance_perf[] = {
+ /*
+ * Set EPP value as 102, this is the max suggested EPP
+ * which can result in one core turbo frequency for
+ * AlderLake Mobile CPUs.
+ */
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102),
+ {}
+};
+
+static int __init intel_pstate_init(void)
+{
+ static struct cpudata **_all_cpu_data;
+ const struct x86_cpu_id *id;
+ int rc;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return -ENODEV;
+
+ id = x86_match_cpu(hwp_support_ids);
+ if (id) {
+ bool hwp_forced = intel_pstate_hwp_is_enabled();
+
+ if (hwp_forced)
+ pr_info("HWP enabled by BIOS\n");
+ else if (no_load)
+ return -ENODEV;
+
+ copy_cpu_funcs(&core_funcs);
+ /*
+ * Avoid enabling HWP for processors without EPP support,
+ * because that means incomplete HWP implementation which is a
+ * corner case and supporting it is generally problematic.
+ *
+ * If HWP is enabled already, though, there is no choice but to
+ * deal with it.
+ */
+ if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
+ WRITE_ONCE(hwp_active, 1);
+ hwp_mode_bdw = id->driver_data;
+ intel_pstate.attr = hwp_cpufreq_attrs;
+ intel_cpufreq.attr = hwp_cpufreq_attrs;
+ intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
+ intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf;
+ if (!default_driver)
+ default_driver = &intel_pstate;
+
+ if (boot_cpu_has(X86_FEATURE_HYBRID_CPU))
+ pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling;
+
+ goto hwp_cpu_matched;
+ }
+ pr_info("HWP not enabled\n");
+ } else {
+ if (no_load)
+ return -ENODEV;
+
+ id = x86_match_cpu(intel_pstate_cpu_ids);
+ if (!id) {
+ pr_info("CPU model not supported\n");
+ return -ENODEV;
+ }
+
+ copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
+ }
+
+ if (intel_pstate_msrs_not_valid()) {
+ pr_info("Invalid MSRs\n");
+ return -ENODEV;
+ }
+ /* Without HWP start in the passive mode. */
+ if (!default_driver)
+ default_driver = &intel_cpufreq;
+
+hwp_cpu_matched:
+ /*
+ * The Intel pstate driver will be ignored if the platform
+ * firmware has its own power management modes.
+ */
+ if (intel_pstate_platform_pwr_mgmt_exists()) {
+ pr_info("P-states controlled by the platform\n");
+ return -ENODEV;
+ }
+
+ if (!hwp_active && hwp_only)
+ return -ENOTSUPP;
+
+ pr_info("Intel P-state driver initializing\n");
+
+ _all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus()));
+ if (!_all_cpu_data)
+ return -ENOMEM;
+
+ WRITE_ONCE(all_cpu_data, _all_cpu_data);
+
+ intel_pstate_request_control_from_smm();
+
+ intel_pstate_sysfs_expose_params();
+
+ if (hwp_active) {
+ const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf);
+
+ if (id)
+ epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data;
+ }
+
+ mutex_lock(&intel_pstate_driver_lock);
+ rc = intel_pstate_register_driver(default_driver);
+ mutex_unlock(&intel_pstate_driver_lock);
+ if (rc) {
+ intel_pstate_sysfs_remove();
+ return rc;
+ }
+
+ if (hwp_active) {
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
+ if (id) {
+ set_power_ctl_ee_state(false);
+ pr_info("Disabling energy efficiency optimization\n");
+ }
+
+ pr_info("HWP enabled\n");
+ } else if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
+ pr_warn("Problematic setup: Hybrid processor with disabled HWP\n");
+ }
+
+ return 0;
+}
+device_initcall(intel_pstate_init);
+
+static int __init intel_pstate_setup(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "disable"))
+ no_load = 1;
+ else if (!strcmp(str, "active"))
+ default_driver = &intel_pstate;
+ else if (!strcmp(str, "passive"))
+ default_driver = &intel_cpufreq;
+
+ if (!strcmp(str, "no_hwp"))
+ no_hwp = 1;
+
+ if (!strcmp(str, "force"))
+ force_load = 1;
+ if (!strcmp(str, "hwp_only"))
+ hwp_only = 1;
+ if (!strcmp(str, "per_cpu_perf_limits"))
+ per_cpu_limits = true;
+
+#ifdef CONFIG_ACPI
+ if (!strcmp(str, "support_acpi_ppc"))
+ acpi_ppc = true;
+#endif
+
+ return 0;
+}
+early_param("intel_pstate", intel_pstate_setup);
+
+MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
+MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
new file mode 100644
index 000000000..70ad8fe1d
--- /dev/null
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * kirkwood_freq.c: cpufreq driver for the Marvell kirkwood
+ *
+ * Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <asm/proc-fns.h>
+
+#define CPU_SW_INT_BLK BIT(28)
+
+static struct priv
+{
+ struct clk *cpu_clk;
+ struct clk *ddr_clk;
+ struct clk *powersave_clk;
+ struct device *dev;
+ void __iomem *base;
+} priv;
+
+#define STATE_CPU_FREQ 0x01
+#define STATE_DDR_FREQ 0x02
+
+/*
+ * Kirkwood can swap the clock to the CPU between two clocks:
+ *
+ * - cpu clk
+ * - ddr clk
+ *
+ * The frequencies are set at runtime before registering this table.
+ */
+static struct cpufreq_frequency_table kirkwood_freq_table[] = {
+ {0, STATE_CPU_FREQ, 0}, /* CPU uses cpuclk */
+ {0, STATE_DDR_FREQ, 0}, /* CPU uses ddrclk */
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
+{
+ return clk_get_rate(priv.powersave_clk) / 1000;
+}
+
+static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ unsigned int state = kirkwood_freq_table[index].driver_data;
+ unsigned long reg;
+
+ local_irq_disable();
+
+ /* Disable interrupts to the CPU */
+ reg = readl_relaxed(priv.base);
+ reg |= CPU_SW_INT_BLK;
+ writel_relaxed(reg, priv.base);
+
+ switch (state) {
+ case STATE_CPU_FREQ:
+ clk_set_parent(priv.powersave_clk, priv.cpu_clk);
+ break;
+ case STATE_DDR_FREQ:
+ clk_set_parent(priv.powersave_clk, priv.ddr_clk);
+ break;
+ }
+
+ /* Wait-for-Interrupt, while the hardware changes frequency */
+ cpu_do_idle();
+
+ /* Enable interrupts to the CPU */
+ reg = readl_relaxed(priv.base);
+ reg &= ~CPU_SW_INT_BLK;
+ writel_relaxed(reg, priv.base);
+
+ local_irq_enable();
+
+ return 0;
+}
+
+/* Module init and exit code */
+static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ cpufreq_generic_init(policy, kirkwood_freq_table, 5000);
+ return 0;
+}
+
+static struct cpufreq_driver kirkwood_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .get = kirkwood_cpufreq_get_cpu_frequency,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = kirkwood_cpufreq_target,
+ .init = kirkwood_cpufreq_cpu_init,
+ .name = "kirkwood-cpufreq",
+ .attr = cpufreq_generic_attr,
+};
+
+static int kirkwood_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ int err;
+
+ priv.dev = &pdev->dev;
+
+ priv.base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv.base))
+ return PTR_ERR(priv.base);
+
+ np = of_cpu_device_node_get(0);
+ if (!np) {
+ dev_err(&pdev->dev, "failed to get cpu device node\n");
+ return -ENODEV;
+ }
+
+ priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
+ if (IS_ERR(priv.cpu_clk)) {
+ dev_err(priv.dev, "Unable to get cpuclk\n");
+ err = PTR_ERR(priv.cpu_clk);
+ goto out_node;
+ }
+
+ err = clk_prepare_enable(priv.cpu_clk);
+ if (err) {
+ dev_err(priv.dev, "Unable to prepare cpuclk\n");
+ goto out_node;
+ }
+
+ kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
+
+ priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
+ if (IS_ERR(priv.ddr_clk)) {
+ dev_err(priv.dev, "Unable to get ddrclk\n");
+ err = PTR_ERR(priv.ddr_clk);
+ goto out_cpu;
+ }
+
+ err = clk_prepare_enable(priv.ddr_clk);
+ if (err) {
+ dev_err(priv.dev, "Unable to prepare ddrclk\n");
+ goto out_cpu;
+ }
+ kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;
+
+ priv.powersave_clk = of_clk_get_by_name(np, "powersave");
+ if (IS_ERR(priv.powersave_clk)) {
+ dev_err(priv.dev, "Unable to get powersave\n");
+ err = PTR_ERR(priv.powersave_clk);
+ goto out_ddr;
+ }
+ err = clk_prepare_enable(priv.powersave_clk);
+ if (err) {
+ dev_err(priv.dev, "Unable to prepare powersave clk\n");
+ goto out_ddr;
+ }
+
+ err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
+ if (err) {
+ dev_err(priv.dev, "Failed to register cpufreq driver\n");
+ goto out_powersave;
+ }
+
+ of_node_put(np);
+ return 0;
+
+out_powersave:
+ clk_disable_unprepare(priv.powersave_clk);
+out_ddr:
+ clk_disable_unprepare(priv.ddr_clk);
+out_cpu:
+ clk_disable_unprepare(priv.cpu_clk);
+out_node:
+ of_node_put(np);
+
+ return err;
+}
+
+static int kirkwood_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&kirkwood_cpufreq_driver);
+
+ clk_disable_unprepare(priv.powersave_clk);
+ clk_disable_unprepare(priv.ddr_clk);
+ clk_disable_unprepare(priv.cpu_clk);
+
+ return 0;
+}
+
+static struct platform_driver kirkwood_cpufreq_platform_driver = {
+ .probe = kirkwood_cpufreq_probe,
+ .remove = kirkwood_cpufreq_remove,
+ .driver = {
+ .name = "kirkwood-cpufreq",
+ },
+};
+
+module_platform_driver(kirkwood_cpufreq_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch");
+MODULE_DESCRIPTION("cpufreq driver for Marvell's kirkwood CPU");
+MODULE_ALIAS("platform:kirkwood-cpufreq");
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
new file mode 100644
index 000000000..3e000e1a7
--- /dev/null
+++ b/drivers/cpufreq/longhaul.c
@@ -0,0 +1,1000 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (C) 2001-2004 Dave Jones.
+ * (C) 2002 Padraig Brady. <padraig@antefacto.com>
+ *
+ * Based upon datasheets & sample CPUs kindly provided by VIA.
+ *
+ * VIA have currently 3 different versions of Longhaul.
+ * Version 1 (Longhaul) uses the BCR2 MSR at 0x1147.
+ * It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0.
+ * Version 2 of longhaul is backward compatible with v1, but adds
+ * LONGHAUL MSR for purpose of both frequency and voltage scaling.
+ * Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C).
+ * Version 3 of longhaul got renamed to Powersaver and redesigned
+ * to use only the POWERSAVER MSR at 0x110a.
+ * It is present in Ezra-T (C5M), Nehemiah (C5X) and above.
+ * It's pretty much the same feature wise to longhaul v2, though
+ * there is provision for scaling FSB too, but this doesn't work
+ * too well in practice so we don't even try to use this.
+ *
+ * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/timex.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+
+#include <asm/msr.h>
+#include <asm/cpu_device_id.h>
+#include <acpi/processor.h>
+
+#include "longhaul.h"
+
+#define TYPE_LONGHAUL_V1 1
+#define TYPE_LONGHAUL_V2 2
+#define TYPE_POWERSAVER 3
+
+#define CPU_SAMUEL 1
+#define CPU_SAMUEL2 2
+#define CPU_EZRA 3
+#define CPU_EZRA_T 4
+#define CPU_NEHEMIAH 5
+#define CPU_NEHEMIAH_C 6
+
+/* Flags */
+#define USE_ACPI_C3 (1 << 1)
+#define USE_NORTHBRIDGE (1 << 2)
+
+static int cpu_model;
+static unsigned int numscales = 16;
+static unsigned int fsb;
+
+static const struct mV_pos *vrm_mV_table;
+static const unsigned char *mV_vrm_table;
+
+static unsigned int highest_speed, lowest_speed; /* kHz */
+static unsigned int minmult, maxmult;
+static int can_scale_voltage;
+static struct acpi_processor *pr;
+static struct acpi_processor_cx *cx;
+static u32 acpi_regs_addr;
+static u8 longhaul_flags;
+static unsigned int longhaul_index;
+
+/* Module parameters */
+static int scale_voltage;
+static int disable_acpi_c3;
+static int revid_errata;
+static int enable;
+
+/* Clock ratios multiplied by 10 */
+static int mults[32];
+static int eblcr[32];
+static int longhaul_version;
+static struct cpufreq_frequency_table *longhaul_table;
+
+static char speedbuffer[8];
+
+static char *print_speed(int speed)
+{
+ if (speed < 1000) {
+ snprintf(speedbuffer, sizeof(speedbuffer), "%dMHz", speed);
+ return speedbuffer;
+ }
+
+ if (speed%1000 == 0)
+ snprintf(speedbuffer, sizeof(speedbuffer),
+ "%dGHz", speed/1000);
+ else
+ snprintf(speedbuffer, sizeof(speedbuffer),
+ "%d.%dGHz", speed/1000, (speed%1000)/100);
+
+ return speedbuffer;
+}
+
+
+static unsigned int calc_speed(int mult)
+{
+ int khz;
+ khz = (mult/10)*fsb;
+ if (mult%10)
+ khz += fsb/2;
+ khz *= 1000;
+ return khz;
+}
+
+
+static int longhaul_get_cpu_mult(void)
+{
+ unsigned long invalue = 0, lo, hi;
+
+ rdmsr(MSR_IA32_EBL_CR_POWERON, lo, hi);
+ invalue = (lo & (1<<22|1<<23|1<<24|1<<25))>>22;
+ if (longhaul_version == TYPE_LONGHAUL_V2 ||
+ longhaul_version == TYPE_POWERSAVER) {
+ if (lo & (1<<27))
+ invalue += 16;
+ }
+ return eblcr[invalue];
+}
+
+/* For processor with BCR2 MSR */
+
+static void do_longhaul1(unsigned int mults_index)
+{
+ union msr_bcr2 bcr2;
+
+ rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ /* Enable software clock multiplier */
+ bcr2.bits.ESOFTBF = 1;
+ bcr2.bits.CLOCKMUL = mults_index & 0xff;
+
+ /* Sync to timer tick */
+ safe_halt();
+ /* Change frequency on next halt or sleep */
+ wrmsrl(MSR_VIA_BCR2, bcr2.val);
+ /* Invoke transition */
+ ACPI_FLUSH_CPU_CACHE();
+ halt();
+
+ /* Disable software clock multiplier */
+ local_irq_disable();
+ rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ bcr2.bits.ESOFTBF = 0;
+ wrmsrl(MSR_VIA_BCR2, bcr2.val);
+}
+
+/* For processor with Longhaul MSR */
+
+static void do_powersaver(int cx_address, unsigned int mults_index,
+ unsigned int dir)
+{
+ union msr_longhaul longhaul;
+ u32 t;
+
+ rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ /* Setup new frequency */
+ if (!revid_errata)
+ longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
+ else
+ longhaul.bits.RevisionKey = 0;
+ longhaul.bits.SoftBusRatio = mults_index & 0xf;
+ longhaul.bits.SoftBusRatio4 = (mults_index & 0x10) >> 4;
+ /* Setup new voltage */
+ if (can_scale_voltage)
+ longhaul.bits.SoftVID = (mults_index >> 8) & 0x1f;
+ /* Sync to timer tick */
+ safe_halt();
+ /* Raise voltage if necessary */
+ if (can_scale_voltage && dir) {
+ longhaul.bits.EnableSoftVID = 1;
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ /* Change voltage */
+ if (!cx_address) {
+ ACPI_FLUSH_CPU_CACHE();
+ halt();
+ } else {
+ ACPI_FLUSH_CPU_CACHE();
+ /* Invoke C3 */
+ inb(cx_address);
+ /* Dummy op - must do something useless after P_LVL3
+ * read */
+ t = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ }
+ longhaul.bits.EnableSoftVID = 0;
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ }
+
+ /* Change frequency on next halt or sleep */
+ longhaul.bits.EnableSoftBusRatio = 1;
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ if (!cx_address) {
+ ACPI_FLUSH_CPU_CACHE();
+ halt();
+ } else {
+ ACPI_FLUSH_CPU_CACHE();
+ /* Invoke C3 */
+ inb(cx_address);
+ /* Dummy op - must do something useless after P_LVL3 read */
+ t = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ }
+ /* Disable bus ratio bit */
+ longhaul.bits.EnableSoftBusRatio = 0;
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+
+ /* Reduce voltage if necessary */
+ if (can_scale_voltage && !dir) {
+ longhaul.bits.EnableSoftVID = 1;
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ /* Change voltage */
+ if (!cx_address) {
+ ACPI_FLUSH_CPU_CACHE();
+ halt();
+ } else {
+ ACPI_FLUSH_CPU_CACHE();
+ /* Invoke C3 */
+ inb(cx_address);
+ /* Dummy op - must do something useless after P_LVL3
+ * read */
+ t = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ }
+ longhaul.bits.EnableSoftVID = 0;
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ }
+}
+
+/**
+ * longhaul_set_cpu_frequency()
+ * @mults_index : bitpattern of the new multiplier.
+ *
+ * Sets a new clock ratio.
+ */
+
+static int longhaul_setstate(struct cpufreq_policy *policy,
+ unsigned int table_index)
+{
+ unsigned int mults_index;
+ int speed, mult;
+ struct cpufreq_freqs freqs;
+ unsigned long flags;
+ unsigned int pic1_mask, pic2_mask;
+ u16 bm_status = 0;
+ u32 bm_timeout = 1000;
+ unsigned int dir = 0;
+
+ mults_index = longhaul_table[table_index].driver_data;
+ /* Safety precautions */
+ mult = mults[mults_index & 0x1f];
+ if (mult == -1)
+ return -EINVAL;
+
+ speed = calc_speed(mult);
+ if ((speed > highest_speed) || (speed < lowest_speed))
+ return -EINVAL;
+
+ /* Voltage transition before frequency transition? */
+ if (can_scale_voltage && longhaul_index < table_index)
+ dir = 1;
+
+ freqs.old = calc_speed(longhaul_get_cpu_mult());
+ freqs.new = speed;
+
+ pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
+ fsb, mult/10, mult%10, print_speed(speed/1000));
+retry_loop:
+ preempt_disable();
+ local_irq_save(flags);
+
+ pic2_mask = inb(0xA1);
+ pic1_mask = inb(0x21); /* works on C3. save mask. */
+ outb(0xFF, 0xA1); /* Overkill */
+ outb(0xFE, 0x21); /* TMR0 only */
+
+ /* Wait while PCI bus is busy. */
+ if (acpi_regs_addr && (longhaul_flags & USE_NORTHBRIDGE
+ || ((pr != NULL) && pr->flags.bm_control))) {
+ bm_status = inw(acpi_regs_addr);
+ bm_status &= 1 << 4;
+ while (bm_status && bm_timeout) {
+ outw(1 << 4, acpi_regs_addr);
+ bm_timeout--;
+ bm_status = inw(acpi_regs_addr);
+ bm_status &= 1 << 4;
+ }
+ }
+
+ if (longhaul_flags & USE_NORTHBRIDGE) {
+ /* Disable AGP and PCI arbiters */
+ outb(3, 0x22);
+ } else if ((pr != NULL) && pr->flags.bm_control) {
+ /* Disable bus master arbitration */
+ acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
+ }
+ switch (longhaul_version) {
+
+ /*
+ * Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B])
+ * Software controlled multipliers only.
+ */
+ case TYPE_LONGHAUL_V1:
+ do_longhaul1(mults_index);
+ break;
+
+ /*
+ * Longhaul v2 appears in Samuel2 Steppings 1->7 [C5B] and Ezra [C5C]
+ *
+ * Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N])
+ * Nehemiah can do FSB scaling too, but this has never been proven
+ * to work in practice.
+ */
+ case TYPE_LONGHAUL_V2:
+ case TYPE_POWERSAVER:
+ if (longhaul_flags & USE_ACPI_C3) {
+ /* Don't allow wakeup */
+ acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
+ do_powersaver(cx->address, mults_index, dir);
+ } else {
+ do_powersaver(0, mults_index, dir);
+ }
+ break;
+ }
+
+ if (longhaul_flags & USE_NORTHBRIDGE) {
+ /* Enable arbiters */
+ outb(0, 0x22);
+ } else if ((pr != NULL) && pr->flags.bm_control) {
+ /* Enable bus master arbitration */
+ acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
+ }
+ outb(pic2_mask, 0xA1); /* restore mask */
+ outb(pic1_mask, 0x21);
+
+ local_irq_restore(flags);
+ preempt_enable();
+
+ freqs.new = calc_speed(longhaul_get_cpu_mult());
+ /* Check if requested frequency is set. */
+ if (unlikely(freqs.new != speed)) {
+ pr_info("Failed to set requested frequency!\n");
+ /* Revision ID = 1 but processor is expecting revision key
+ * equal to 0. Jumpers at the bottom of processor will change
+ * multiplier and FSB, but will not change bits in Longhaul
+ * MSR nor enable voltage scaling. */
+ if (!revid_errata) {
+ pr_info("Enabling \"Ignore Revision ID\" option\n");
+ revid_errata = 1;
+ msleep(200);
+ goto retry_loop;
+ }
+ /* Why ACPI C3 sometimes doesn't work is a mystery for me.
+ * But it does happen. Processor is entering ACPI C3 state,
+ * but it doesn't change frequency. I tried poking various
+ * bits in northbridge registers, but without success. */
+ if (longhaul_flags & USE_ACPI_C3) {
+ pr_info("Disabling ACPI C3 support\n");
+ longhaul_flags &= ~USE_ACPI_C3;
+ if (revid_errata) {
+ pr_info("Disabling \"Ignore Revision ID\" option\n");
+ revid_errata = 0;
+ }
+ msleep(200);
+ goto retry_loop;
+ }
+ /* This shouldn't happen. Longhaul ver. 2 was reported not
+ * working on processors without voltage scaling, but with
+ * RevID = 1. RevID errata will make things right. Just
+ * to be 100% sure. */
+ if (longhaul_version == TYPE_LONGHAUL_V2) {
+ pr_info("Switching to Longhaul ver. 1\n");
+ longhaul_version = TYPE_LONGHAUL_V1;
+ msleep(200);
+ goto retry_loop;
+ }
+ }
+
+ if (!bm_timeout) {
+ pr_info("Warning: Timeout while waiting for idle PCI bus\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/*
+ * Centaur decided to make life a little more tricky.
+ * Only longhaul v1 is allowed to read EBLCR BSEL[0:1].
+ * Samuel2 and above have to try and guess what the FSB is.
+ * We do this by assuming we booted at maximum multiplier, and interpolate
+ * between that value multiplied by possible FSBs and cpu_mhz which
+ * was calculated at boot time. Really ugly, but no other way to do this.
+ */
+
+#define ROUNDING 0xf
+
+static int guess_fsb(int mult)
+{
+ int speed = cpu_khz / 1000;
+ int i;
+ int speeds[] = { 666, 1000, 1333, 2000 };
+ int f_max, f_min;
+
+ for (i = 0; i < 4; i++) {
+ f_max = ((speeds[i] * mult) + 50) / 100;
+ f_max += (ROUNDING / 2);
+ f_min = f_max - ROUNDING;
+ if ((speed <= f_max) && (speed >= f_min))
+ return speeds[i] / 10;
+ }
+ return 0;
+}
+
+
+static int longhaul_get_ranges(void)
+{
+ unsigned int i, j, k = 0;
+ unsigned int ratio;
+ int mult;
+
+ /* Get current frequency */
+ mult = longhaul_get_cpu_mult();
+ if (mult == -1) {
+ pr_info("Invalid (reserved) multiplier!\n");
+ return -EINVAL;
+ }
+ fsb = guess_fsb(mult);
+ if (fsb == 0) {
+ pr_info("Invalid (reserved) FSB!\n");
+ return -EINVAL;
+ }
+ /* Get max multiplier - as we always did.
+ * Longhaul MSR is useful only when voltage scaling is enabled.
+ * C3 is booting at max anyway. */
+ maxmult = mult;
+ /* Get min multiplier */
+ switch (cpu_model) {
+ case CPU_NEHEMIAH:
+ minmult = 50;
+ break;
+ case CPU_NEHEMIAH_C:
+ minmult = 40;
+ break;
+ default:
+ minmult = 30;
+ break;
+ }
+
+ pr_debug("MinMult:%d.%dx MaxMult:%d.%dx\n",
+ minmult/10, minmult%10, maxmult/10, maxmult%10);
+
+ highest_speed = calc_speed(maxmult);
+ lowest_speed = calc_speed(minmult);
+ pr_debug("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb,
+ print_speed(lowest_speed/1000),
+ print_speed(highest_speed/1000));
+
+ if (lowest_speed == highest_speed) {
+ pr_info("highestspeed == lowest, aborting\n");
+ return -EINVAL;
+ }
+ if (lowest_speed > highest_speed) {
+ pr_info("nonsense! lowest (%d > %d) !\n",
+ lowest_speed, highest_speed);
+ return -EINVAL;
+ }
+
+ longhaul_table = kcalloc(numscales + 1, sizeof(*longhaul_table),
+ GFP_KERNEL);
+ if (!longhaul_table)
+ return -ENOMEM;
+
+ for (j = 0; j < numscales; j++) {
+ ratio = mults[j];
+ if (ratio == -1)
+ continue;
+ if (ratio > maxmult || ratio < minmult)
+ continue;
+ longhaul_table[k].frequency = calc_speed(ratio);
+ longhaul_table[k].driver_data = j;
+ k++;
+ }
+ if (k <= 1) {
+ kfree(longhaul_table);
+ return -ENODEV;
+ }
+ /* Sort */
+ for (j = 0; j < k - 1; j++) {
+ unsigned int min_f, min_i;
+ min_f = longhaul_table[j].frequency;
+ min_i = j;
+ for (i = j + 1; i < k; i++) {
+ if (longhaul_table[i].frequency < min_f) {
+ min_f = longhaul_table[i].frequency;
+ min_i = i;
+ }
+ }
+ if (min_i != j) {
+ swap(longhaul_table[j].frequency,
+ longhaul_table[min_i].frequency);
+ swap(longhaul_table[j].driver_data,
+ longhaul_table[min_i].driver_data);
+ }
+ }
+
+ longhaul_table[k].frequency = CPUFREQ_TABLE_END;
+
+ /* Find index we are running on */
+ for (j = 0; j < k; j++) {
+ if (mults[longhaul_table[j].driver_data & 0x1f] == mult) {
+ longhaul_index = j;
+ break;
+ }
+ }
+ return 0;
+}
+
+
+static void longhaul_setup_voltagescaling(void)
+{
+ struct cpufreq_frequency_table *freq_pos;
+ union msr_longhaul longhaul;
+ struct mV_pos minvid, maxvid, vid;
+ unsigned int j, speed, pos, kHz_step, numvscales;
+ int min_vid_speed;
+
+ rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ if (!(longhaul.bits.RevisionID & 1)) {
+ pr_info("Voltage scaling not supported by CPU\n");
+ return;
+ }
+
+ if (!longhaul.bits.VRMRev) {
+ pr_info("VRM 8.5\n");
+ vrm_mV_table = &vrm85_mV[0];
+ mV_vrm_table = &mV_vrm85[0];
+ } else {
+ pr_info("Mobile VRM\n");
+ if (cpu_model < CPU_NEHEMIAH)
+ return;
+ vrm_mV_table = &mobilevrm_mV[0];
+ mV_vrm_table = &mV_mobilevrm[0];
+ }
+
+ minvid = vrm_mV_table[longhaul.bits.MinimumVID];
+ maxvid = vrm_mV_table[longhaul.bits.MaximumVID];
+
+ if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) {
+ pr_info("Bogus values Min:%d.%03d Max:%d.%03d - Voltage scaling disabled\n",
+ minvid.mV/1000, minvid.mV%1000,
+ maxvid.mV/1000, maxvid.mV%1000);
+ return;
+ }
+
+ if (minvid.mV == maxvid.mV) {
+ pr_info("Claims to support voltage scaling but min & max are both %d.%03d - Voltage scaling disabled\n",
+ maxvid.mV/1000, maxvid.mV%1000);
+ return;
+ }
+
+ /* How many voltage steps*/
+ numvscales = maxvid.pos - minvid.pos + 1;
+ pr_info("Max VID=%d.%03d Min VID=%d.%03d, %d possible voltage scales\n",
+ maxvid.mV/1000, maxvid.mV%1000,
+ minvid.mV/1000, minvid.mV%1000,
+ numvscales);
+
+ /* Calculate max frequency at min voltage */
+ j = longhaul.bits.MinMHzBR;
+ if (longhaul.bits.MinMHzBR4)
+ j += 16;
+ min_vid_speed = eblcr[j];
+ if (min_vid_speed == -1)
+ return;
+ switch (longhaul.bits.MinMHzFSB) {
+ case 0:
+ min_vid_speed *= 13333;
+ break;
+ case 1:
+ min_vid_speed *= 10000;
+ break;
+ case 3:
+ min_vid_speed *= 6666;
+ break;
+ default:
+ return;
+ }
+ if (min_vid_speed >= highest_speed)
+ return;
+ /* Calculate kHz for one voltage step */
+ kHz_step = (highest_speed - min_vid_speed) / numvscales;
+
+ cpufreq_for_each_entry_idx(freq_pos, longhaul_table, j) {
+ speed = freq_pos->frequency;
+ if (speed > min_vid_speed)
+ pos = (speed - min_vid_speed) / kHz_step + minvid.pos;
+ else
+ pos = minvid.pos;
+ freq_pos->driver_data |= mV_vrm_table[pos] << 8;
+ vid = vrm_mV_table[mV_vrm_table[pos]];
+ pr_info("f: %d kHz, index: %d, vid: %d mV\n",
+ speed, j, vid.mV);
+ }
+
+ can_scale_voltage = 1;
+ pr_info("Voltage scaling enabled\n");
+}
+
+
+static int longhaul_target(struct cpufreq_policy *policy,
+ unsigned int table_index)
+{
+ unsigned int i;
+ unsigned int dir = 0;
+ u8 vid, current_vid;
+ int retval = 0;
+
+ if (!can_scale_voltage)
+ retval = longhaul_setstate(policy, table_index);
+ else {
+ /* On test system voltage transitions exceeding single
+ * step up or down were turning motherboard off. Both
+ * "ondemand" and "userspace" are unsafe. C7 is doing
+ * this in hardware, C3 is old and we need to do this
+ * in software. */
+ i = longhaul_index;
+ current_vid = (longhaul_table[longhaul_index].driver_data >> 8);
+ current_vid &= 0x1f;
+ if (table_index > longhaul_index)
+ dir = 1;
+ while (i != table_index) {
+ vid = (longhaul_table[i].driver_data >> 8) & 0x1f;
+ if (vid != current_vid) {
+ retval = longhaul_setstate(policy, i);
+ current_vid = vid;
+ msleep(200);
+ }
+ if (dir)
+ i++;
+ else
+ i--;
+ }
+ retval = longhaul_setstate(policy, table_index);
+ }
+
+ longhaul_index = table_index;
+ return retval;
+}
+
+
+static unsigned int longhaul_get(unsigned int cpu)
+{
+ if (cpu)
+ return 0;
+ return calc_speed(longhaul_get_cpu_mult());
+}
+
+static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
+ u32 nesting_level,
+ void *context, void **return_value)
+{
+ struct acpi_device *d = acpi_fetch_acpi_dev(obj_handle);
+
+ if (!d)
+ return 0;
+
+ *return_value = acpi_driver_data(d);
+ return 1;
+}
+
+/* VIA don't support PM2 reg, but have something similar */
+static int enable_arbiter_disable(void)
+{
+ struct pci_dev *dev;
+ int status = 1;
+ int reg;
+ u8 pci_cmd;
+
+ /* Find PLE133 host bridge */
+ reg = 0x78;
+ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0,
+ NULL);
+ /* Find PM133/VT8605 host bridge */
+ if (dev == NULL)
+ dev = pci_get_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_8605_0, NULL);
+ /* Find CLE266 host bridge */
+ if (dev == NULL) {
+ reg = 0x76;
+ dev = pci_get_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_862X_0, NULL);
+ /* Find CN400 V-Link host bridge */
+ if (dev == NULL)
+ dev = pci_get_device(PCI_VENDOR_ID_VIA, 0x7259, NULL);
+ }
+ if (dev != NULL) {
+ /* Enable access to port 0x22 */
+ pci_read_config_byte(dev, reg, &pci_cmd);
+ if (!(pci_cmd & 1<<7)) {
+ pci_cmd |= 1<<7;
+ pci_write_config_byte(dev, reg, pci_cmd);
+ pci_read_config_byte(dev, reg, &pci_cmd);
+ if (!(pci_cmd & 1<<7)) {
+ pr_err("Can't enable access to port 0x22\n");
+ status = 0;
+ }
+ }
+ pci_dev_put(dev);
+ return status;
+ }
+ return 0;
+}
+
+static int longhaul_setup_southbridge(void)
+{
+ struct pci_dev *dev;
+ u8 pci_cmd;
+
+ /* Find VT8235 southbridge */
+ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL);
+ if (dev == NULL)
+ /* Find VT8237 southbridge */
+ dev = pci_get_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_8237, NULL);
+ if (dev != NULL) {
+ /* Set transition time to max */
+ pci_read_config_byte(dev, 0xec, &pci_cmd);
+ pci_cmd &= ~(1 << 2);
+ pci_write_config_byte(dev, 0xec, pci_cmd);
+ pci_read_config_byte(dev, 0xe4, &pci_cmd);
+ pci_cmd &= ~(1 << 7);
+ pci_write_config_byte(dev, 0xe4, pci_cmd);
+ pci_read_config_byte(dev, 0xe5, &pci_cmd);
+ pci_cmd |= 1 << 7;
+ pci_write_config_byte(dev, 0xe5, pci_cmd);
+ /* Get address of ACPI registers block*/
+ pci_read_config_byte(dev, 0x81, &pci_cmd);
+ if (pci_cmd & 1 << 7) {
+ pci_read_config_dword(dev, 0x88, &acpi_regs_addr);
+ acpi_regs_addr &= 0xff00;
+ pr_info("ACPI I/O at 0x%x\n", acpi_regs_addr);
+ }
+
+ pci_dev_put(dev);
+ return 1;
+ }
+ return 0;
+}
+
+static int longhaul_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ char *cpuname = NULL;
+ int ret;
+ u32 lo, hi;
+
+ /* Check what we have on this motherboard */
+ switch (c->x86_model) {
+ case 6:
+ cpu_model = CPU_SAMUEL;
+ cpuname = "C3 'Samuel' [C5A]";
+ longhaul_version = TYPE_LONGHAUL_V1;
+ memcpy(mults, samuel1_mults, sizeof(samuel1_mults));
+ memcpy(eblcr, samuel1_eblcr, sizeof(samuel1_eblcr));
+ break;
+
+ case 7:
+ switch (c->x86_stepping) {
+ case 0:
+ longhaul_version = TYPE_LONGHAUL_V1;
+ cpu_model = CPU_SAMUEL2;
+ cpuname = "C3 'Samuel 2' [C5B]";
+ /* Note, this is not a typo, early Samuel2's had
+ * Samuel1 ratios. */
+ memcpy(mults, samuel1_mults, sizeof(samuel1_mults));
+ memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr));
+ break;
+ case 1 ... 15:
+ longhaul_version = TYPE_LONGHAUL_V2;
+ if (c->x86_stepping < 8) {
+ cpu_model = CPU_SAMUEL2;
+ cpuname = "C3 'Samuel 2' [C5B]";
+ } else {
+ cpu_model = CPU_EZRA;
+ cpuname = "C3 'Ezra' [C5C]";
+ }
+ memcpy(mults, ezra_mults, sizeof(ezra_mults));
+ memcpy(eblcr, ezra_eblcr, sizeof(ezra_eblcr));
+ break;
+ }
+ break;
+
+ case 8:
+ cpu_model = CPU_EZRA_T;
+ cpuname = "C3 'Ezra-T' [C5M]";
+ longhaul_version = TYPE_POWERSAVER;
+ numscales = 32;
+ memcpy(mults, ezrat_mults, sizeof(ezrat_mults));
+ memcpy(eblcr, ezrat_eblcr, sizeof(ezrat_eblcr));
+ break;
+
+ case 9:
+ longhaul_version = TYPE_POWERSAVER;
+ numscales = 32;
+ memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
+ memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
+ switch (c->x86_stepping) {
+ case 0 ... 1:
+ cpu_model = CPU_NEHEMIAH;
+ cpuname = "C3 'Nehemiah A' [C5XLOE]";
+ break;
+ case 2 ... 4:
+ cpu_model = CPU_NEHEMIAH;
+ cpuname = "C3 'Nehemiah B' [C5XLOH]";
+ break;
+ case 5 ... 15:
+ cpu_model = CPU_NEHEMIAH_C;
+ cpuname = "C3 'Nehemiah C' [C5P]";
+ break;
+ }
+ break;
+
+ default:
+ cpuname = "Unknown";
+ break;
+ }
+ /* Check Longhaul ver. 2 */
+ if (longhaul_version == TYPE_LONGHAUL_V2) {
+ rdmsr(MSR_VIA_LONGHAUL, lo, hi);
+ if (lo == 0 && hi == 0)
+ /* Looks like MSR isn't present */
+ longhaul_version = TYPE_LONGHAUL_V1;
+ }
+
+ pr_info("VIA %s CPU detected. ", cpuname);
+ switch (longhaul_version) {
+ case TYPE_LONGHAUL_V1:
+ case TYPE_LONGHAUL_V2:
+ pr_cont("Longhaul v%d supported\n", longhaul_version);
+ break;
+ case TYPE_POWERSAVER:
+ pr_cont("Powersaver supported\n");
+ break;
+ }
+
+ /* Doesn't hurt */
+ longhaul_setup_southbridge();
+
+ /* Find ACPI data for processor */
+ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, &longhaul_walk_callback, NULL,
+ NULL, (void *)&pr);
+
+ /* Check ACPI support for C3 state */
+ if (pr != NULL && longhaul_version == TYPE_POWERSAVER) {
+ cx = &pr->power.states[ACPI_STATE_C3];
+ if (cx->address > 0 && cx->latency <= 1000)
+ longhaul_flags |= USE_ACPI_C3;
+ }
+ /* Disable if it isn't working */
+ if (disable_acpi_c3)
+ longhaul_flags &= ~USE_ACPI_C3;
+ /* Check if northbridge is friendly */
+ if (enable_arbiter_disable())
+ longhaul_flags |= USE_NORTHBRIDGE;
+
+ /* Check ACPI support for bus master arbiter disable */
+ if (!(longhaul_flags & USE_ACPI_C3
+ || longhaul_flags & USE_NORTHBRIDGE)
+ && ((pr == NULL) || !(pr->flags.bm_control))) {
+ pr_err("No ACPI support: Unsupported northbridge\n");
+ return -ENODEV;
+ }
+
+ if (longhaul_flags & USE_NORTHBRIDGE)
+ pr_info("Using northbridge support\n");
+ if (longhaul_flags & USE_ACPI_C3)
+ pr_info("Using ACPI support\n");
+
+ ret = longhaul_get_ranges();
+ if (ret != 0)
+ return ret;
+
+ if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0))
+ longhaul_setup_voltagescaling();
+
+ policy->transition_delay_us = 200000; /* usec */
+ policy->freq_table = longhaul_table;
+
+ return 0;
+}
+
+static struct cpufreq_driver longhaul_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = longhaul_target,
+ .get = longhaul_get,
+ .init = longhaul_cpu_init,
+ .name = "longhaul",
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct x86_cpu_id longhaul_id[] = {
+ X86_MATCH_VENDOR_FAM(CENTAUR, 6, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, longhaul_id);
+
+static int __init longhaul_init(void)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ if (!x86_match_cpu(longhaul_id))
+ return -ENODEV;
+
+ if (!enable) {
+ pr_err("Option \"enable\" not set - Aborting\n");
+ return -ENODEV;
+ }
+#ifdef CONFIG_SMP
+ if (num_online_cpus() > 1) {
+ pr_err("More than 1 CPU detected, longhaul disabled\n");
+ return -ENODEV;
+ }
+#endif
+#ifdef CONFIG_X86_IO_APIC
+ if (boot_cpu_has(X86_FEATURE_APIC)) {
+ pr_err("APIC detected. Longhaul is currently broken in this configuration.\n");
+ return -ENODEV;
+ }
+#endif
+ switch (c->x86_model) {
+ case 6 ... 9:
+ return cpufreq_register_driver(&longhaul_driver);
+ case 10:
+ pr_err("Use acpi-cpufreq driver for VIA C7\n");
+ }
+
+ return -ENODEV;
+}
+
+
+static void __exit longhaul_exit(void)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0);
+ int i;
+
+ for (i = 0; i < numscales; i++) {
+ if (mults[i] == maxmult) {
+ struct cpufreq_freqs freqs;
+
+ freqs.old = policy->cur;
+ freqs.new = longhaul_table[i].frequency;
+ freqs.flags = 0;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ longhaul_setstate(policy, i);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+ break;
+ }
+ }
+
+ cpufreq_cpu_put(policy);
+ cpufreq_unregister_driver(&longhaul_driver);
+ kfree(longhaul_table);
+}
+
+/* Even if BIOS is exporting ACPI C3 state, and it is used
+ * with success when CPU is idle, this state doesn't
+ * trigger frequency transition in some cases. */
+module_param(disable_acpi_c3, int, 0644);
+MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support");
+/* Change CPU voltage with frequency. Very useful to save
+ * power, but most VIA C3 processors aren't supporting it. */
+module_param(scale_voltage, int, 0644);
+MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
+/* Force revision key to 0 for processors which doesn't
+ * support voltage scaling, but are introducing itself as
+ * such. */
+module_param(revid_errata, int, 0644);
+MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
+/* By default driver is disabled to prevent incompatible
+ * system freeze. */
+module_param(enable, int, 0644);
+MODULE_PARM_DESC(enable, "Enable driver");
+
+MODULE_AUTHOR("Dave Jones");
+MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors.");
+MODULE_LICENSE("GPL");
+
+late_initcall(longhaul_init);
+module_exit(longhaul_exit);
diff --git a/drivers/cpufreq/longhaul.h b/drivers/cpufreq/longhaul.h
new file mode 100644
index 000000000..89c4cc297
--- /dev/null
+++ b/drivers/cpufreq/longhaul.h
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * longhaul.h
+ * (C) 2003 Dave Jones.
+ *
+ * VIA-specific information
+ */
+
+union msr_bcr2 {
+ struct {
+ unsigned Reseved:19, // 18:0
+ ESOFTBF:1, // 19
+ Reserved2:3, // 22:20
+ CLOCKMUL:4, // 26:23
+ Reserved3:5; // 31:27
+ } bits;
+ unsigned long val;
+};
+
+union msr_longhaul {
+ struct {
+ unsigned RevisionID:4, // 3:0
+ RevisionKey:4, // 7:4
+ EnableSoftBusRatio:1, // 8
+ EnableSoftVID:1, // 9
+ EnableSoftBSEL:1, // 10
+ Reserved:3, // 11:13
+ SoftBusRatio4:1, // 14
+ VRMRev:1, // 15
+ SoftBusRatio:4, // 19:16
+ SoftVID:5, // 24:20
+ Reserved2:3, // 27:25
+ SoftBSEL:2, // 29:28
+ Reserved3:2, // 31:30
+ MaxMHzBR:4, // 35:32
+ MaximumVID:5, // 40:36
+ MaxMHzFSB:2, // 42:41
+ MaxMHzBR4:1, // 43
+ Reserved4:4, // 47:44
+ MinMHzBR:4, // 51:48
+ MinimumVID:5, // 56:52
+ MinMHzFSB:2, // 58:57
+ MinMHzBR4:1, // 59
+ Reserved5:4; // 63:60
+ } bits;
+ unsigned long long val;
+};
+
+/*
+ * Clock ratio tables. Div/Mod by 10 to get ratio.
+ * The eblcr values specify the ratio read from the CPU.
+ * The mults values specify what to write to the CPU.
+ */
+
+/*
+ * VIA C3 Samuel 1 & Samuel 2 (stepping 0)
+ */
+static const int samuel1_mults[16] = {
+ -1, /* 0000 -> RESERVED */
+ 30, /* 0001 -> 3.0x */
+ 40, /* 0010 -> 4.0x */
+ -1, /* 0011 -> RESERVED */
+ -1, /* 0100 -> RESERVED */
+ 35, /* 0101 -> 3.5x */
+ 45, /* 0110 -> 4.5x */
+ 55, /* 0111 -> 5.5x */
+ 60, /* 1000 -> 6.0x */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 50, /* 1011 -> 5.0x */
+ 65, /* 1100 -> 6.5x */
+ 75, /* 1101 -> 7.5x */
+ -1, /* 1110 -> RESERVED */
+ -1, /* 1111 -> RESERVED */
+};
+
+static const int samuel1_eblcr[16] = {
+ 50, /* 0000 -> RESERVED */
+ 30, /* 0001 -> 3.0x */
+ 40, /* 0010 -> 4.0x */
+ -1, /* 0011 -> RESERVED */
+ 55, /* 0100 -> 5.5x */
+ 35, /* 0101 -> 3.5x */
+ 45, /* 0110 -> 4.5x */
+ -1, /* 0111 -> RESERVED */
+ -1, /* 1000 -> RESERVED */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 60, /* 1011 -> 6.0x */
+ -1, /* 1100 -> RESERVED */
+ 75, /* 1101 -> 7.5x */
+ -1, /* 1110 -> RESERVED */
+ 65, /* 1111 -> 6.5x */
+};
+
+/*
+ * VIA C3 Samuel2 Stepping 1->15
+ */
+static const int samuel2_eblcr[16] = {
+ 50, /* 0000 -> 5.0x */
+ 30, /* 0001 -> 3.0x */
+ 40, /* 0010 -> 4.0x */
+ 100, /* 0011 -> 10.0x */
+ 55, /* 0100 -> 5.5x */
+ 35, /* 0101 -> 3.5x */
+ 45, /* 0110 -> 4.5x */
+ 110, /* 0111 -> 11.0x */
+ 90, /* 1000 -> 9.0x */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 60, /* 1011 -> 6.0x */
+ 120, /* 1100 -> 12.0x */
+ 75, /* 1101 -> 7.5x */
+ 130, /* 1110 -> 13.0x */
+ 65, /* 1111 -> 6.5x */
+};
+
+/*
+ * VIA C3 Ezra
+ */
+static const int ezra_mults[16] = {
+ 100, /* 0000 -> 10.0x */
+ 30, /* 0001 -> 3.0x */
+ 40, /* 0010 -> 4.0x */
+ 90, /* 0011 -> 9.0x */
+ 95, /* 0100 -> 9.5x */
+ 35, /* 0101 -> 3.5x */
+ 45, /* 0110 -> 4.5x */
+ 55, /* 0111 -> 5.5x */
+ 60, /* 1000 -> 6.0x */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 50, /* 1011 -> 5.0x */
+ 65, /* 1100 -> 6.5x */
+ 75, /* 1101 -> 7.5x */
+ 85, /* 1110 -> 8.5x */
+ 120, /* 1111 -> 12.0x */
+};
+
+static const int ezra_eblcr[16] = {
+ 50, /* 0000 -> 5.0x */
+ 30, /* 0001 -> 3.0x */
+ 40, /* 0010 -> 4.0x */
+ 100, /* 0011 -> 10.0x */
+ 55, /* 0100 -> 5.5x */
+ 35, /* 0101 -> 3.5x */
+ 45, /* 0110 -> 4.5x */
+ 95, /* 0111 -> 9.5x */
+ 90, /* 1000 -> 9.0x */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 60, /* 1011 -> 6.0x */
+ 120, /* 1100 -> 12.0x */
+ 75, /* 1101 -> 7.5x */
+ 85, /* 1110 -> 8.5x */
+ 65, /* 1111 -> 6.5x */
+};
+
+/*
+ * VIA C3 (Ezra-T) [C5M].
+ */
+static const int ezrat_mults[32] = {
+ 100, /* 0000 -> 10.0x */
+ 30, /* 0001 -> 3.0x */
+ 40, /* 0010 -> 4.0x */
+ 90, /* 0011 -> 9.0x */
+ 95, /* 0100 -> 9.5x */
+ 35, /* 0101 -> 3.5x */
+ 45, /* 0110 -> 4.5x */
+ 55, /* 0111 -> 5.5x */
+ 60, /* 1000 -> 6.0x */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 50, /* 1011 -> 5.0x */
+ 65, /* 1100 -> 6.5x */
+ 75, /* 1101 -> 7.5x */
+ 85, /* 1110 -> 8.5x */
+ 120, /* 1111 -> 12.0x */
+
+ -1, /* 0000 -> RESERVED (10.0x) */
+ 110, /* 0001 -> 11.0x */
+ -1, /* 0010 -> 12.0x */
+ -1, /* 0011 -> RESERVED (9.0x)*/
+ 105, /* 0100 -> 10.5x */
+ 115, /* 0101 -> 11.5x */
+ 125, /* 0110 -> 12.5x */
+ 135, /* 0111 -> 13.5x */
+ 140, /* 1000 -> 14.0x */
+ 150, /* 1001 -> 15.0x */
+ 160, /* 1010 -> 16.0x */
+ 130, /* 1011 -> 13.0x */
+ 145, /* 1100 -> 14.5x */
+ 155, /* 1101 -> 15.5x */
+ -1, /* 1110 -> RESERVED (13.0x) */
+ -1, /* 1111 -> RESERVED (12.0x) */
+};
+
+static const int ezrat_eblcr[32] = {
+ 50, /* 0000 -> 5.0x */
+ 30, /* 0001 -> 3.0x */
+ 40, /* 0010 -> 4.0x */
+ 100, /* 0011 -> 10.0x */
+ 55, /* 0100 -> 5.5x */
+ 35, /* 0101 -> 3.5x */
+ 45, /* 0110 -> 4.5x */
+ 95, /* 0111 -> 9.5x */
+ 90, /* 1000 -> 9.0x */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 60, /* 1011 -> 6.0x */
+ 120, /* 1100 -> 12.0x */
+ 75, /* 1101 -> 7.5x */
+ 85, /* 1110 -> 8.5x */
+ 65, /* 1111 -> 6.5x */
+
+ -1, /* 0000 -> RESERVED (9.0x) */
+ 110, /* 0001 -> 11.0x */
+ 120, /* 0010 -> 12.0x */
+ -1, /* 0011 -> RESERVED (10.0x)*/
+ 135, /* 0100 -> 13.5x */
+ 115, /* 0101 -> 11.5x */
+ 125, /* 0110 -> 12.5x */
+ 105, /* 0111 -> 10.5x */
+ 130, /* 1000 -> 13.0x */
+ 150, /* 1001 -> 15.0x */
+ 160, /* 1010 -> 16.0x */
+ 140, /* 1011 -> 14.0x */
+ -1, /* 1100 -> RESERVED (12.0x) */
+ 155, /* 1101 -> 15.5x */
+ -1, /* 1110 -> RESERVED (13.0x) */
+ 145, /* 1111 -> 14.5x */
+};
+
+/*
+ * VIA C3 Nehemiah */
+
+static const int nehemiah_mults[32] = {
+ 100, /* 0000 -> 10.0x */
+ -1, /* 0001 -> 16.0x */
+ 40, /* 0010 -> 4.0x */
+ 90, /* 0011 -> 9.0x */
+ 95, /* 0100 -> 9.5x */
+ -1, /* 0101 -> RESERVED */
+ 45, /* 0110 -> 4.5x */
+ 55, /* 0111 -> 5.5x */
+ 60, /* 1000 -> 6.0x */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 50, /* 1011 -> 5.0x */
+ 65, /* 1100 -> 6.5x */
+ 75, /* 1101 -> 7.5x */
+ 85, /* 1110 -> 8.5x */
+ 120, /* 1111 -> 12.0x */
+ -1, /* 0000 -> 10.0x */
+ 110, /* 0001 -> 11.0x */
+ -1, /* 0010 -> 12.0x */
+ -1, /* 0011 -> 9.0x */
+ 105, /* 0100 -> 10.5x */
+ 115, /* 0101 -> 11.5x */
+ 125, /* 0110 -> 12.5x */
+ 135, /* 0111 -> 13.5x */
+ 140, /* 1000 -> 14.0x */
+ 150, /* 1001 -> 15.0x */
+ 160, /* 1010 -> 16.0x */
+ 130, /* 1011 -> 13.0x */
+ 145, /* 1100 -> 14.5x */
+ 155, /* 1101 -> 15.5x */
+ -1, /* 1110 -> RESERVED (13.0x) */
+ -1, /* 1111 -> 12.0x */
+};
+
+static const int nehemiah_eblcr[32] = {
+ 50, /* 0000 -> 5.0x */
+ 160, /* 0001 -> 16.0x */
+ 40, /* 0010 -> 4.0x */
+ 100, /* 0011 -> 10.0x */
+ 55, /* 0100 -> 5.5x */
+ -1, /* 0101 -> RESERVED */
+ 45, /* 0110 -> 4.5x */
+ 95, /* 0111 -> 9.5x */
+ 90, /* 1000 -> 9.0x */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 60, /* 1011 -> 6.0x */
+ 120, /* 1100 -> 12.0x */
+ 75, /* 1101 -> 7.5x */
+ 85, /* 1110 -> 8.5x */
+ 65, /* 1111 -> 6.5x */
+ 90, /* 0000 -> 9.0x */
+ 110, /* 0001 -> 11.0x */
+ 120, /* 0010 -> 12.0x */
+ 100, /* 0011 -> 10.0x */
+ 135, /* 0100 -> 13.5x */
+ 115, /* 0101 -> 11.5x */
+ 125, /* 0110 -> 12.5x */
+ 105, /* 0111 -> 10.5x */
+ 130, /* 1000 -> 13.0x */
+ 150, /* 1001 -> 15.0x */
+ 160, /* 1010 -> 16.0x */
+ 140, /* 1011 -> 14.0x */
+ 120, /* 1100 -> 12.0x */
+ 155, /* 1101 -> 15.5x */
+ -1, /* 1110 -> RESERVED (13.0x) */
+ 145 /* 1111 -> 14.5x */
+};
+
+/*
+ * Voltage scales. Div/Mod by 1000 to get actual voltage.
+ * Which scale to use depends on the VRM type in use.
+ */
+
+struct mV_pos {
+ unsigned short mV;
+ unsigned short pos;
+};
+
+static const struct mV_pos vrm85_mV[32] = {
+ {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2},
+ {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26},
+ {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18},
+ {1450, 16}, {1400, 14}, {1350, 12}, {1300, 10},
+ {1275, 9}, {1225, 7}, {1175, 5}, {1125, 3},
+ {1075, 1}, {1825, 31}, {1775, 29}, {1725, 27},
+ {1675, 25}, {1625, 23}, {1575, 21}, {1525, 19},
+ {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11}
+};
+
+static const unsigned char mV_vrm85[32] = {
+ 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11,
+ 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d,
+ 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19,
+ 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15
+};
+
+static const struct mV_pos mobilevrm_mV[32] = {
+ {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28},
+ {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24},
+ {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20},
+ {1150, 19}, {1100, 18}, {1050, 17}, {1000, 16},
+ {975, 15}, {950, 14}, {925, 13}, {900, 12},
+ {875, 11}, {850, 10}, {825, 9}, {800, 8},
+ {775, 7}, {750, 6}, {725, 5}, {700, 4},
+ {675, 3}, {650, 2}, {625, 1}, {600, 0}
+};
+
+static const unsigned char mV_mobilevrm[32] = {
+ 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
+ 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
+ 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
+ 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00
+};
+
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c
new file mode 100644
index 000000000..1caaec7c2
--- /dev/null
+++ b/drivers/cpufreq/longrun.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ *
+ * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/timex.h>
+
+#include <asm/msr.h>
+#include <asm/processor.h>
+#include <asm/cpu_device_id.h>
+
+static struct cpufreq_driver longrun_driver;
+
+/**
+ * longrun_{low,high}_freq is needed for the conversion of cpufreq kHz
+ * values into per cent values. In TMTA microcode, the following is valid:
+ * performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
+ */
+static unsigned int longrun_low_freq, longrun_high_freq;
+
+
+/**
+ * longrun_get_policy - get the current LongRun policy
+ * @policy: struct cpufreq_policy where current policy is written into
+ *
+ * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS
+ * and MSR_TMTA_LONGRUN_CTRL
+ */
+static void longrun_get_policy(struct cpufreq_policy *policy)
+{
+ u32 msr_lo, msr_hi;
+
+ rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
+ pr_debug("longrun flags are %x - %x\n", msr_lo, msr_hi);
+ if (msr_lo & 0x01)
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ else
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+
+ rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
+ pr_debug("longrun ctrl is %x - %x\n", msr_lo, msr_hi);
+ msr_lo &= 0x0000007F;
+ msr_hi &= 0x0000007F;
+
+ if (longrun_high_freq <= longrun_low_freq) {
+ /* Assume degenerate Longrun table */
+ policy->min = policy->max = longrun_high_freq;
+ } else {
+ policy->min = longrun_low_freq + msr_lo *
+ ((longrun_high_freq - longrun_low_freq) / 100);
+ policy->max = longrun_low_freq + msr_hi *
+ ((longrun_high_freq - longrun_low_freq) / 100);
+ }
+ policy->cpu = 0;
+}
+
+
+/**
+ * longrun_set_policy - sets a new CPUFreq policy
+ * @policy: new policy
+ *
+ * Sets a new CPUFreq policy on LongRun-capable processors. This function
+ * has to be called with cpufreq_driver locked.
+ */
+static int longrun_set_policy(struct cpufreq_policy *policy)
+{
+ u32 msr_lo, msr_hi;
+ u32 pctg_lo, pctg_hi;
+
+ if (!policy)
+ return -EINVAL;
+
+ if (longrun_high_freq <= longrun_low_freq) {
+ /* Assume degenerate Longrun table */
+ pctg_lo = pctg_hi = 100;
+ } else {
+ pctg_lo = (policy->min - longrun_low_freq) /
+ ((longrun_high_freq - longrun_low_freq) / 100);
+ pctg_hi = (policy->max - longrun_low_freq) /
+ ((longrun_high_freq - longrun_low_freq) / 100);
+ }
+
+ if (pctg_hi > 100)
+ pctg_hi = 100;
+ if (pctg_lo > pctg_hi)
+ pctg_lo = pctg_hi;
+
+ /* performance or economy mode */
+ rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
+ msr_lo &= 0xFFFFFFFE;
+ switch (policy->policy) {
+ case CPUFREQ_POLICY_PERFORMANCE:
+ msr_lo |= 0x00000001;
+ break;
+ case CPUFREQ_POLICY_POWERSAVE:
+ break;
+ }
+ wrmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
+
+ /* lower and upper boundary */
+ rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
+ msr_lo &= 0xFFFFFF80;
+ msr_hi &= 0xFFFFFF80;
+ msr_lo |= pctg_lo;
+ msr_hi |= pctg_hi;
+ wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
+
+ return 0;
+}
+
+
+/**
+ * longrun_verify_poliy - verifies a new CPUFreq policy
+ * @policy: the policy to verify
+ *
+ * Validates a new CPUFreq policy. This function has to be called with
+ * cpufreq_driver locked.
+ */
+static int longrun_verify_policy(struct cpufreq_policy_data *policy)
+{
+ if (!policy)
+ return -EINVAL;
+
+ policy->cpu = 0;
+ cpufreq_verify_within_cpu_limits(policy);
+
+ return 0;
+}
+
+static unsigned int longrun_get(unsigned int cpu)
+{
+ u32 eax, ebx, ecx, edx;
+
+ if (cpu)
+ return 0;
+
+ cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
+ pr_debug("cpuid eax is %u\n", eax);
+
+ return eax * 1000;
+}
+
+/**
+ * longrun_determine_freqs - determines the lowest and highest possible core frequency
+ * @low_freq: an int to put the lowest frequency into
+ * @high_freq: an int to put the highest frequency into
+ *
+ * Determines the lowest and highest possible core frequencies on this CPU.
+ * This is necessary to calculate the performance percentage according to
+ * TMTA rules:
+ * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq)
+ */
+static int longrun_determine_freqs(unsigned int *low_freq,
+ unsigned int *high_freq)
+{
+ u32 msr_lo, msr_hi;
+ u32 save_lo, save_hi;
+ u32 eax, ebx, ecx, edx;
+ u32 try_hi;
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ if (!low_freq || !high_freq)
+ return -EINVAL;
+
+ if (cpu_has(c, X86_FEATURE_LRTI)) {
+ /* if the LongRun Table Interface is present, the
+ * detection is a bit easier:
+ * For minimum frequency, read out the maximum
+ * level (msr_hi), write that into "currently
+ * selected level", and read out the frequency.
+ * For maximum frequency, read out level zero.
+ */
+ /* minimum */
+ rdmsr(MSR_TMTA_LRTI_READOUT, msr_lo, msr_hi);
+ wrmsr(MSR_TMTA_LRTI_READOUT, msr_hi, msr_hi);
+ rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
+ *low_freq = msr_lo * 1000; /* to kHz */
+
+ /* maximum */
+ wrmsr(MSR_TMTA_LRTI_READOUT, 0, msr_hi);
+ rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
+ *high_freq = msr_lo * 1000; /* to kHz */
+
+ pr_debug("longrun table interface told %u - %u kHz\n",
+ *low_freq, *high_freq);
+
+ if (*low_freq > *high_freq)
+ *low_freq = *high_freq;
+ return 0;
+ }
+
+ /* set the upper border to the value determined during TSC init */
+ *high_freq = (cpu_khz / 1000);
+ *high_freq = *high_freq * 1000;
+ pr_debug("high frequency is %u kHz\n", *high_freq);
+
+ /* get current borders */
+ rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
+ save_lo = msr_lo & 0x0000007F;
+ save_hi = msr_hi & 0x0000007F;
+
+ /* if current perf_pctg is larger than 90%, we need to decrease the
+ * upper limit to make the calculation more accurate.
+ */
+ cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
+ /* try decreasing in 10% steps, some processors react only
+ * on some barrier values */
+ for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -= 10) {
+ /* set to 0 to try_hi perf_pctg */
+ msr_lo &= 0xFFFFFF80;
+ msr_hi &= 0xFFFFFF80;
+ msr_hi |= try_hi;
+ wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
+
+ /* read out current core MHz and current perf_pctg */
+ cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
+
+ /* restore values */
+ wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi);
+ }
+ pr_debug("percentage is %u %%, freq is %u MHz\n", ecx, eax);
+
+ /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
+ * eqals
+ * low_freq * (1 - perf_pctg) = (cur_freq - high_freq * perf_pctg)
+ *
+ * high_freq * perf_pctg is stored tempoarily into "ebx".
+ */
+ ebx = (((cpu_khz / 1000) * ecx) / 100); /* to MHz */
+
+ if ((ecx > 95) || (ecx == 0) || (eax < ebx))
+ return -EIO;
+
+ edx = ((eax - ebx) * 100) / (100 - ecx);
+ *low_freq = edx * 1000; /* back to kHz */
+
+ pr_debug("low frequency is %u kHz\n", *low_freq);
+
+ if (*low_freq > *high_freq)
+ *low_freq = *high_freq;
+
+ return 0;
+}
+
+
+static int longrun_cpu_init(struct cpufreq_policy *policy)
+{
+ int result = 0;
+
+ /* capability check */
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+ /* detect low and high frequency */
+ result = longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq);
+ if (result)
+ return result;
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.min_freq = longrun_low_freq;
+ policy->cpuinfo.max_freq = longrun_high_freq;
+ longrun_get_policy(policy);
+
+ return 0;
+}
+
+
+static struct cpufreq_driver longrun_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = longrun_verify_policy,
+ .setpolicy = longrun_set_policy,
+ .get = longrun_get,
+ .init = longrun_cpu_init,
+ .name = "longrun",
+};
+
+static const struct x86_cpu_id longrun_ids[] = {
+ X86_MATCH_VENDOR_FEATURE(TRANSMETA, X86_FEATURE_LONGRUN, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, longrun_ids);
+
+/**
+ * longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver
+ *
+ * Initializes the LongRun support.
+ */
+static int __init longrun_init(void)
+{
+ if (!x86_match_cpu(longrun_ids))
+ return -ENODEV;
+ return cpufreq_register_driver(&longrun_driver);
+}
+
+
+/**
+ * longrun_exit - unregisters LongRun support
+ */
+static void __exit longrun_exit(void)
+{
+ cpufreq_unregister_driver(&longrun_driver);
+}
+
+
+MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
+MODULE_DESCRIPTION("LongRun driver for Transmeta Crusoe and "
+ "Efficeon processors.");
+MODULE_LICENSE("GPL");
+
+module_init(longrun_init);
+module_exit(longrun_exit);
diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
new file mode 100644
index 000000000..fb72d709d
--- /dev/null
+++ b/drivers/cpufreq/loongson1-cpufreq.c
@@ -0,0 +1,222 @@
+/*
+ * CPU Frequency Scaling for Loongson 1 SoC
+ *
+ * Copyright (C) 2014-2016 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <cpufreq.h>
+#include <loongson1.h>
+
+struct ls1x_cpufreq {
+ struct device *dev;
+ struct clk *clk; /* CPU clk */
+ struct clk *mux_clk; /* MUX of CPU clk */
+ struct clk *pll_clk; /* PLL clk */
+ struct clk *osc_clk; /* OSC clk */
+ unsigned int max_freq;
+ unsigned int min_freq;
+};
+
+static struct ls1x_cpufreq *cpufreq;
+
+static int ls1x_cpufreq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ if (val == CPUFREQ_POSTCHANGE)
+ current_cpu_data.udelay_val = loops_per_jiffy;
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ls1x_cpufreq_notifier_block = {
+ .notifier_call = ls1x_cpufreq_notifier
+};
+
+static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct device *cpu_dev = get_cpu_device(policy->cpu);
+ unsigned int old_freq, new_freq;
+
+ old_freq = policy->cur;
+ new_freq = policy->freq_table[index].frequency;
+
+ /*
+ * The procedure of reconfiguring CPU clk is as below.
+ *
+ * - Reparent CPU clk to OSC clk
+ * - Reset CPU clock (very important)
+ * - Reconfigure CPU DIV
+ * - Reparent CPU clk back to CPU DIV clk
+ */
+
+ clk_set_parent(policy->clk, cpufreq->osc_clk);
+ __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU,
+ LS1X_CLK_PLL_DIV);
+ __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU),
+ LS1X_CLK_PLL_DIV);
+ clk_set_rate(cpufreq->mux_clk, new_freq * 1000);
+ clk_set_parent(policy->clk, cpufreq->mux_clk);
+ dev_dbg(cpu_dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
+
+ return 0;
+}
+
+static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
+{
+ struct device *cpu_dev = get_cpu_device(policy->cpu);
+ struct cpufreq_frequency_table *freq_tbl;
+ unsigned int pll_freq, freq;
+ int steps, i;
+
+ pll_freq = clk_get_rate(cpufreq->pll_clk) / 1000;
+
+ steps = 1 << DIV_CPU_WIDTH;
+ freq_tbl = kcalloc(steps, sizeof(*freq_tbl), GFP_KERNEL);
+ if (!freq_tbl)
+ return -ENOMEM;
+
+ for (i = 0; i < (steps - 1); i++) {
+ freq = pll_freq / (i + 1);
+ if ((freq < cpufreq->min_freq) || (freq > cpufreq->max_freq))
+ freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
+ else
+ freq_tbl[i].frequency = freq;
+ dev_dbg(cpu_dev,
+ "cpufreq table: index %d: frequency %d\n", i,
+ freq_tbl[i].frequency);
+ }
+ freq_tbl[i].frequency = CPUFREQ_TABLE_END;
+
+ policy->clk = cpufreq->clk;
+ cpufreq_generic_init(policy, freq_tbl, 0);
+
+ return 0;
+}
+
+static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ kfree(policy->freq_table);
+ return 0;
+}
+
+static struct cpufreq_driver ls1x_cpufreq_driver = {
+ .name = "cpufreq-ls1x",
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = ls1x_cpufreq_target,
+ .get = cpufreq_generic_get,
+ .init = ls1x_cpufreq_init,
+ .exit = ls1x_cpufreq_exit,
+ .attr = cpufreq_generic_attr,
+};
+
+static int ls1x_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ cpufreq_unregister_driver(&ls1x_cpufreq_driver);
+
+ return 0;
+}
+
+static int ls1x_cpufreq_probe(struct platform_device *pdev)
+{
+ struct plat_ls1x_cpufreq *pdata = dev_get_platdata(&pdev->dev);
+ struct clk *clk;
+ int ret;
+
+ if (!pdata || !pdata->clk_name || !pdata->osc_clk_name) {
+ dev_err(&pdev->dev, "platform data missing\n");
+ return -EINVAL;
+ }
+
+ cpufreq =
+ devm_kzalloc(&pdev->dev, sizeof(struct ls1x_cpufreq), GFP_KERNEL);
+ if (!cpufreq)
+ return -ENOMEM;
+
+ cpufreq->dev = &pdev->dev;
+
+ clk = devm_clk_get(&pdev->dev, pdata->clk_name);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "unable to get %s clock\n",
+ pdata->clk_name);
+ return PTR_ERR(clk);
+ }
+ cpufreq->clk = clk;
+
+ clk = clk_get_parent(clk);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "unable to get parent of %s clock\n",
+ __clk_get_name(cpufreq->clk));
+ return PTR_ERR(clk);
+ }
+ cpufreq->mux_clk = clk;
+
+ clk = clk_get_parent(clk);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "unable to get parent of %s clock\n",
+ __clk_get_name(cpufreq->mux_clk));
+ return PTR_ERR(clk);
+ }
+ cpufreq->pll_clk = clk;
+
+ clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "unable to get %s clock\n",
+ pdata->osc_clk_name);
+ return PTR_ERR(clk);
+ }
+ cpufreq->osc_clk = clk;
+
+ cpufreq->max_freq = pdata->max_freq;
+ cpufreq->min_freq = pdata->min_freq;
+
+ ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to register CPUFreq driver: %d\n", ret);
+ return ret;
+ }
+
+ ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to register CPUFreq notifier: %d\n",ret);
+ cpufreq_unregister_driver(&ls1x_cpufreq_driver);
+ }
+
+ return ret;
+}
+
+static struct platform_driver ls1x_cpufreq_platdrv = {
+ .probe = ls1x_cpufreq_probe,
+ .remove = ls1x_cpufreq_remove,
+ .driver = {
+ .name = "ls1x-cpufreq",
+ },
+};
+
+module_platform_driver(ls1x_cpufreq_platdrv);
+
+MODULE_ALIAS("platform:ls1x-cpufreq");
+MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
+MODULE_DESCRIPTION("Loongson1 CPUFreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
new file mode 100644
index 000000000..afc59b292
--- /dev/null
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -0,0 +1,184 @@
+/*
+ * Cpufreq driver for the loongson-2 processors
+ *
+ * The 2E revision of loongson processor not support this feature.
+ *
+ * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
+ * Author: Yanhua, yanh@lemote.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+
+#include <asm/idle.h>
+
+#include <asm/mach-loongson2ef/loongson.h>
+
+static uint nowait;
+
+static void (*saved_cpu_wait) (void);
+
+static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data);
+
+static struct notifier_block loongson2_cpufreq_notifier_block = {
+ .notifier_call = loongson2_cpu_freq_notifier
+};
+
+static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ if (val == CPUFREQ_POSTCHANGE)
+ current_cpu_data.udelay_val = loops_per_jiffy;
+
+ return 0;
+}
+
+/*
+ * Here we notify other drivers of the proposed change and the final change.
+ */
+static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ unsigned int freq;
+
+ freq =
+ ((cpu_clock_freq / 1000) *
+ loongson2_clockmod_table[index].driver_data) / 8;
+
+ /* setting the cpu frequency */
+ loongson2_cpu_set_rate(freq);
+
+ return 0;
+}
+
+static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ int i;
+ unsigned long rate;
+ int ret;
+
+ rate = cpu_clock_freq / 1000;
+ if (!rate)
+ return -EINVAL;
+
+ /* clock table init */
+ for (i = 2;
+ (loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END);
+ i++)
+ loongson2_clockmod_table[i].frequency = (rate * i) / 8;
+
+ ret = loongson2_cpu_set_rate(rate);
+ if (ret)
+ return ret;
+
+ cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0);
+ return 0;
+}
+
+static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static struct cpufreq_driver loongson2_cpufreq_driver = {
+ .name = "loongson2",
+ .init = loongson2_cpufreq_cpu_init,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = loongson2_cpufreq_target,
+ .get = cpufreq_generic_get,
+ .exit = loongson2_cpufreq_exit,
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct platform_device_id platform_device_ids[] = {
+ {
+ .name = "loongson2_cpufreq",
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(platform, platform_device_ids);
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "loongson2_cpufreq",
+ },
+ .id_table = platform_device_ids,
+};
+
+/*
+ * This is the simple version of Loongson-2 wait, Maybe we need do this in
+ * interrupt disabled context.
+ */
+
+static DEFINE_SPINLOCK(loongson2_wait_lock);
+
+static void loongson2_cpu_wait(void)
+{
+ unsigned long flags;
+ u32 cpu_freq;
+
+ spin_lock_irqsave(&loongson2_wait_lock, flags);
+ cpu_freq = readl(LOONGSON_CHIPCFG);
+ /* Put CPU into wait mode */
+ writel(readl(LOONGSON_CHIPCFG) & ~0x7, LOONGSON_CHIPCFG);
+ /* Restore CPU state */
+ writel(cpu_freq, LOONGSON_CHIPCFG);
+ spin_unlock_irqrestore(&loongson2_wait_lock, flags);
+ local_irq_enable();
+}
+
+static int __init cpufreq_init(void)
+{
+ int ret;
+
+ /* Register platform stuff */
+ ret = platform_driver_register(&platform_driver);
+ if (ret)
+ return ret;
+
+ pr_info("Loongson-2F CPU frequency driver\n");
+
+ cpufreq_register_notifier(&loongson2_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ ret = cpufreq_register_driver(&loongson2_cpufreq_driver);
+
+ if (!ret && !nowait) {
+ saved_cpu_wait = cpu_wait;
+ cpu_wait = loongson2_cpu_wait;
+ }
+
+ return ret;
+}
+
+static void __exit cpufreq_exit(void)
+{
+ if (!nowait && saved_cpu_wait)
+ cpu_wait = saved_cpu_wait;
+ cpufreq_unregister_driver(&loongson2_cpufreq_driver);
+ cpufreq_unregister_notifier(&loongson2_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ platform_driver_unregister(&platform_driver);
+}
+
+module_init(cpufreq_init);
+module_exit(cpufreq_exit);
+
+module_param(nowait, uint, 0644);
+MODULE_PARM_DESC(nowait, "Disable Loongson-2F specific wait");
+
+MODULE_AUTHOR("Yanhua <yanh@lemote.com>");
+MODULE_DESCRIPTION("cpufreq driver for Loongson2F");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
new file mode 100644
index 000000000..28d346062
--- /dev/null
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2011 Dmitry Eremin-Solenikov
+ * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
+ *
+ * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs,
+ * that is iMac G5 and latest single CPU desktop.
+ */
+
+#undef DEBUG
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/time.h>
+#include <linux/of_device.h>
+
+#define DBG(fmt...) pr_debug(fmt)
+
+/* see 970FX user manual */
+
+#define SCOM_PCR 0x0aa001 /* PCR scom addr */
+
+#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */
+#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */
+#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */
+#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */
+#define PCR_SPEED_MASK 0x000e0000U /* speed mask */
+#define PCR_SPEED_SHIFT 17
+#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */
+#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */
+#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */
+#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */
+#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */
+#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */
+
+#define SCOM_PSR 0x408001 /* PSR scom addr */
+/* warning: PSR is a 64 bits register */
+#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */
+#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */
+#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */
+#define PSR_CUR_SPEED_SHIFT (56)
+
+/*
+ * The G5 only supports two frequencies (Quarter speed is not supported)
+ */
+#define CPUFREQ_HIGH 0
+#define CPUFREQ_LOW 1
+
+static struct cpufreq_frequency_table maple_cpu_freqs[] = {
+ {0, CPUFREQ_HIGH, 0},
+ {0, CPUFREQ_LOW, 0},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+/* Power mode data is an array of the 32 bits PCR values to use for
+ * the various frequencies, retrieved from the device-tree
+ */
+static int maple_pmode_cur;
+
+static const u32 *maple_pmode_data;
+static int maple_pmode_max;
+
+/*
+ * SCOM based frequency switching for 970FX rev3
+ */
+static int maple_scom_switch_freq(int speed_mode)
+{
+ unsigned long flags;
+ int to;
+
+ local_irq_save(flags);
+
+ /* Clear PCR high */
+ scom970_write(SCOM_PCR, 0);
+ /* Clear PCR low */
+ scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0);
+ /* Set PCR low */
+ scom970_write(SCOM_PCR, PCR_HILO_SELECT |
+ maple_pmode_data[speed_mode]);
+
+ /* Wait for completion */
+ for (to = 0; to < 10; to++) {
+ unsigned long psr = scom970_read(SCOM_PSR);
+
+ if ((psr & PSR_CMD_RECEIVED) == 0 &&
+ (((psr >> PSR_CUR_SPEED_SHIFT) ^
+ (maple_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3)
+ == 0)
+ break;
+ if (psr & PSR_CMD_COMPLETED)
+ break;
+ udelay(100);
+ }
+
+ local_irq_restore(flags);
+
+ maple_pmode_cur = speed_mode;
+ ppc_proc_freq = maple_cpu_freqs[speed_mode].frequency * 1000ul;
+
+ return 0;
+}
+
+static int maple_scom_query_freq(void)
+{
+ unsigned long psr = scom970_read(SCOM_PSR);
+ int i;
+
+ for (i = 0; i <= maple_pmode_max; i++)
+ if ((((psr >> PSR_CUR_SPEED_SHIFT) ^
+ (maple_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0)
+ break;
+ return i;
+}
+
+/*
+ * Common interface to the cpufreq core
+ */
+
+static int maple_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ return maple_scom_switch_freq(index);
+}
+
+static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
+{
+ return maple_cpu_freqs[maple_pmode_cur].frequency;
+}
+
+static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ cpufreq_generic_init(policy, maple_cpu_freqs, 12000);
+ return 0;
+}
+
+static struct cpufreq_driver maple_cpufreq_driver = {
+ .name = "maple",
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = maple_cpufreq_cpu_init,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = maple_cpufreq_target,
+ .get = maple_cpufreq_get_speed,
+ .attr = cpufreq_generic_attr,
+};
+
+static int __init maple_cpufreq_init(void)
+{
+ struct device_node *cpunode;
+ unsigned int psize;
+ unsigned long max_freq;
+ const u32 *valp;
+ u32 pvr_hi;
+ int rc = -ENODEV;
+
+ /*
+ * Behave here like powermac driver which checks machine compatibility
+ * to ease merging of two drivers in future.
+ */
+ if (!of_machine_is_compatible("Momentum,Maple") &&
+ !of_machine_is_compatible("Momentum,Apache"))
+ return 0;
+
+ /* Get first CPU node */
+ cpunode = of_cpu_device_node_get(0);
+ if (cpunode == NULL) {
+ pr_err("Can't find any CPU 0 node\n");
+ goto bail_noprops;
+ }
+
+ /* Check 970FX for now */
+ /* we actually don't care on which CPU to access PVR */
+ pvr_hi = PVR_VER(mfspr(SPRN_PVR));
+ if (pvr_hi != 0x3c && pvr_hi != 0x44) {
+ pr_err("Unsupported CPU version (%x)\n", pvr_hi);
+ goto bail_noprops;
+ }
+
+ /* Look for the powertune data in the device-tree */
+ /*
+ * On Maple this property is provided by PIBS in dual-processor config,
+ * not provided by PIBS in CPU0 config and also not provided by SLOF,
+ * so YMMV
+ */
+ maple_pmode_data = of_get_property(cpunode, "power-mode-data", &psize);
+ if (!maple_pmode_data) {
+ DBG("No power-mode-data !\n");
+ goto bail_noprops;
+ }
+ maple_pmode_max = psize / sizeof(u32) - 1;
+
+ /*
+ * From what I see, clock-frequency is always the maximal frequency.
+ * The current driver can not slew sysclk yet, so we really only deal
+ * with powertune steps for now. We also only implement full freq and
+ * half freq in this version. So far, I haven't yet seen a machine
+ * supporting anything else.
+ */
+ valp = of_get_property(cpunode, "clock-frequency", NULL);
+ if (!valp)
+ goto bail_noprops;
+ max_freq = (*valp)/1000;
+ maple_cpu_freqs[0].frequency = max_freq;
+ maple_cpu_freqs[1].frequency = max_freq/2;
+
+ /* Force apply current frequency to make sure everything is in
+ * sync (voltage is right for example). Firmware may leave us with
+ * a strange setting ...
+ */
+ msleep(10);
+ maple_pmode_cur = -1;
+ maple_scom_switch_freq(maple_scom_query_freq());
+
+ pr_info("Registering Maple CPU frequency driver\n");
+ pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ maple_cpu_freqs[1].frequency/1000,
+ maple_cpu_freqs[0].frequency/1000,
+ maple_cpu_freqs[maple_pmode_cur].frequency/1000);
+
+ rc = cpufreq_register_driver(&maple_cpufreq_driver);
+
+bail_noprops:
+ of_node_put(cpunode);
+
+ return rc;
+}
+
+module_init(maple_cpufreq_init);
+
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
new file mode 100644
index 000000000..f0e0a35c7
--- /dev/null
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cpufreq.h>
+#include <linux/energy_model.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+
+#define LUT_MAX_ENTRIES 32U
+#define LUT_FREQ GENMASK(11, 0)
+#define LUT_ROW_SIZE 0x4
+#define CPUFREQ_HW_STATUS BIT(0)
+#define SVS_HW_STATUS BIT(1)
+#define POLL_USEC 1000
+#define TIMEOUT_USEC 300000
+
+enum {
+ REG_FREQ_LUT_TABLE,
+ REG_FREQ_ENABLE,
+ REG_FREQ_PERF_STATE,
+ REG_FREQ_HW_STATE,
+ REG_EM_POWER_TBL,
+ REG_FREQ_LATENCY,
+
+ REG_ARRAY_SIZE,
+};
+
+struct mtk_cpufreq_data {
+ struct cpufreq_frequency_table *table;
+ void __iomem *reg_bases[REG_ARRAY_SIZE];
+ struct resource *res;
+ void __iomem *base;
+ int nr_opp;
+};
+
+static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x110,
+};
+
+static int __maybe_unused
+mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
+ unsigned long *KHz)
+{
+ struct mtk_cpufreq_data *data;
+ struct cpufreq_policy *policy;
+ int i;
+
+ policy = cpufreq_cpu_get_raw(cpu_dev->id);
+ if (!policy)
+ return 0;
+
+ data = policy->driver_data;
+
+ for (i = 0; i < data->nr_opp; i++) {
+ if (data->table[i].frequency < *KHz)
+ break;
+ }
+ i--;
+
+ *KHz = data->table[i].frequency;
+ /* Provide micro-Watts value to the Energy Model */
+ *uW = readl_relaxed(data->reg_bases[REG_EM_POWER_TBL] +
+ i * LUT_ROW_SIZE);
+
+ return 0;
+}
+
+static int mtk_cpufreq_hw_target_index(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct mtk_cpufreq_data *data = policy->driver_data;
+
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+
+ return 0;
+}
+
+static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
+{
+ struct mtk_cpufreq_data *data;
+ struct cpufreq_policy *policy;
+ unsigned int index;
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (!policy)
+ return 0;
+
+ data = policy->driver_data;
+
+ index = readl_relaxed(data->reg_bases[REG_FREQ_PERF_STATE]);
+ index = min(index, LUT_MAX_ENTRIES - 1);
+
+ return data->table[index].frequency;
+}
+
+static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct mtk_cpufreq_data *data = policy->driver_data;
+ unsigned int index;
+
+ index = cpufreq_table_find_index_dl(policy, target_freq, false);
+
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+
+ return policy->freq_table[index].frequency;
+}
+
+static int mtk_cpu_create_freq_table(struct platform_device *pdev,
+ struct mtk_cpufreq_data *data)
+{
+ struct device *dev = &pdev->dev;
+ u32 temp, i, freq, prev_freq = 0;
+ void __iomem *base_table;
+
+ data->table = devm_kcalloc(dev, LUT_MAX_ENTRIES + 1,
+ sizeof(*data->table), GFP_KERNEL);
+ if (!data->table)
+ return -ENOMEM;
+
+ base_table = data->reg_bases[REG_FREQ_LUT_TABLE];
+
+ for (i = 0; i < LUT_MAX_ENTRIES; i++) {
+ temp = readl_relaxed(base_table + (i * LUT_ROW_SIZE));
+ freq = FIELD_GET(LUT_FREQ, temp) * 1000;
+
+ if (freq == prev_freq)
+ break;
+
+ data->table[i].frequency = freq;
+
+ dev_dbg(dev, "index=%d freq=%d\n", i, data->table[i].frequency);
+
+ prev_freq = freq;
+ }
+
+ data->table[i].frequency = CPUFREQ_TABLE_END;
+ data->nr_opp = i;
+
+ return 0;
+}
+
+static int mtk_cpu_resources_init(struct platform_device *pdev,
+ struct cpufreq_policy *policy,
+ const u16 *offsets)
+{
+ struct mtk_cpufreq_data *data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *base;
+ int ret, i;
+ int index;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ index = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
+ "#performance-domain-cells",
+ policy->cpus);
+ if (index < 0)
+ return index;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ if (!res) {
+ dev_err(dev, "failed to get mem resource %d\n", index);
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), res->name)) {
+ dev_err(dev, "failed to request resource %pR\n", res);
+ return -EBUSY;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(dev, "failed to map resource %pR\n", res);
+ ret = -ENOMEM;
+ goto release_region;
+ }
+
+ data->base = base;
+ data->res = res;
+
+ for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++)
+ data->reg_bases[i] = base + offsets[i];
+
+ ret = mtk_cpu_create_freq_table(pdev, data);
+ if (ret) {
+ dev_info(dev, "Domain-%d failed to create freq table\n", index);
+ return ret;
+ }
+
+ policy->freq_table = data->table;
+ policy->driver_data = data;
+
+ return 0;
+release_region:
+ release_mem_region(res->start, resource_size(res));
+ return ret;
+}
+
+static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+{
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ int sig, pwr_hw = CPUFREQ_HW_STATUS | SVS_HW_STATUS;
+ struct mtk_cpufreq_data *data;
+ unsigned int latency;
+ int ret;
+
+ /* Get the bases of cpufreq for domains */
+ ret = mtk_cpu_resources_init(pdev, policy, platform_get_drvdata(pdev));
+ if (ret) {
+ dev_info(&pdev->dev, "CPUFreq resource init failed\n");
+ return ret;
+ }
+
+ data = policy->driver_data;
+
+ latency = readl_relaxed(data->reg_bases[REG_FREQ_LATENCY]) * 1000;
+ if (!latency)
+ latency = CPUFREQ_ETERNAL;
+
+ policy->cpuinfo.transition_latency = latency;
+ policy->fast_switch_possible = true;
+
+ /* HW should be in enabled state to proceed now */
+ writel_relaxed(0x1, data->reg_bases[REG_FREQ_ENABLE]);
+ if (readl_poll_timeout(data->reg_bases[REG_FREQ_HW_STATE], sig,
+ (sig & pwr_hw) == pwr_hw, POLL_USEC,
+ TIMEOUT_USEC)) {
+ if (!(sig & CPUFREQ_HW_STATUS)) {
+ pr_info("cpufreq hardware of CPU%d is not enabled\n",
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ pr_info("SVS of CPU%d is not enabled\n", policy->cpu);
+ }
+
+ return 0;
+}
+
+static int mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct mtk_cpufreq_data *data = policy->driver_data;
+ struct resource *res = data->res;
+ void __iomem *base = data->base;
+
+ /* HW should be in paused state now */
+ writel_relaxed(0x0, data->reg_bases[REG_FREQ_ENABLE]);
+ iounmap(base);
+ release_mem_region(res->start, resource_size(res));
+
+ return 0;
+}
+
+static void mtk_cpufreq_register_em(struct cpufreq_policy *policy)
+{
+ struct em_data_callback em_cb = EM_DATA_CB(mtk_cpufreq_get_cpu_power);
+ struct mtk_cpufreq_data *data = policy->driver_data;
+
+ em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp,
+ &em_cb, policy->cpus, true);
+}
+
+static struct cpufreq_driver cpufreq_mtk_hw_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_IS_COOLING_DEV,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = mtk_cpufreq_hw_target_index,
+ .get = mtk_cpufreq_hw_get,
+ .init = mtk_cpufreq_hw_cpu_init,
+ .exit = mtk_cpufreq_hw_cpu_exit,
+ .register_em = mtk_cpufreq_register_em,
+ .fast_switch = mtk_cpufreq_hw_fast_switch,
+ .name = "mtk-cpufreq-hw",
+ .attr = cpufreq_generic_attr,
+};
+
+static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
+{
+ const void *data;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -EINVAL;
+
+ platform_set_drvdata(pdev, (void *) data);
+ cpufreq_mtk_hw_driver.driver_data = pdev;
+
+ ret = cpufreq_register_driver(&cpufreq_mtk_hw_driver);
+ if (ret)
+ dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
+
+ return ret;
+}
+
+static int mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
+{
+ return cpufreq_unregister_driver(&cpufreq_mtk_hw_driver);
+}
+
+static const struct of_device_id mtk_cpufreq_hw_match[] = {
+ { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
+ {}
+};
+
+static struct platform_driver mtk_cpufreq_hw_driver = {
+ .probe = mtk_cpufreq_hw_driver_probe,
+ .remove = mtk_cpufreq_hw_driver_remove,
+ .driver = {
+ .name = "mtk-cpufreq-hw",
+ .of_match_table = mtk_cpufreq_hw_match,
+ },
+};
+module_platform_driver(mtk_cpufreq_hw_driver);
+
+MODULE_AUTHOR("Hector Yuan <hector.yuan@mediatek.com>");
+MODULE_DESCRIPTION("Mediatek cpufreq-hw driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
new file mode 100644
index 000000000..fef68cb2b
--- /dev/null
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -0,0 +1,807 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015 Linaro Ltd.
+ * Author: Pi-Cheng Chen <pi-cheng.chen@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+
+struct mtk_cpufreq_platform_data {
+ int min_volt_shift;
+ int max_volt_shift;
+ int proc_max_volt;
+ int sram_min_volt;
+ int sram_max_volt;
+ bool ccifreq_supported;
+};
+
+/*
+ * The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS
+ * on each CPU power/clock domain of Mediatek SoCs. Each CPU cluster in
+ * Mediatek SoCs has two voltage inputs, Vproc and Vsram. In some cases the two
+ * voltage inputs need to be controlled under a hardware limitation:
+ * 100mV < Vsram - Vproc < 200mV
+ *
+ * When scaling the clock frequency of a CPU clock domain, the clock source
+ * needs to be switched to another stable PLL clock temporarily until
+ * the original PLL becomes stable at target frequency.
+ */
+struct mtk_cpu_dvfs_info {
+ struct cpumask cpus;
+ struct device *cpu_dev;
+ struct device *cci_dev;
+ struct regulator *proc_reg;
+ struct regulator *sram_reg;
+ struct clk *cpu_clk;
+ struct clk *inter_clk;
+ struct list_head list_head;
+ int intermediate_voltage;
+ bool need_voltage_tracking;
+ int vproc_on_boot;
+ int pre_vproc;
+ /* Avoid race condition for regulators between notify and policy */
+ struct mutex reg_lock;
+ struct notifier_block opp_nb;
+ unsigned int opp_cpu;
+ unsigned long current_freq;
+ const struct mtk_cpufreq_platform_data *soc_data;
+ int vtrack_max;
+ bool ccifreq_bound;
+};
+
+static struct platform_device *cpufreq_pdev;
+
+static LIST_HEAD(dvfs_info_list);
+
+static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
+{
+ struct mtk_cpu_dvfs_info *info;
+
+ list_for_each_entry(info, &dvfs_info_list, list_head) {
+ if (cpumask_test_cpu(cpu, &info->cpus))
+ return info;
+ }
+
+ return NULL;
+}
+
+static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
+ int new_vproc)
+{
+ const struct mtk_cpufreq_platform_data *soc_data = info->soc_data;
+ struct regulator *proc_reg = info->proc_reg;
+ struct regulator *sram_reg = info->sram_reg;
+ int pre_vproc, pre_vsram, new_vsram, vsram, vproc, ret;
+ int retry = info->vtrack_max;
+
+ pre_vproc = regulator_get_voltage(proc_reg);
+ if (pre_vproc < 0) {
+ dev_err(info->cpu_dev,
+ "invalid Vproc value: %d\n", pre_vproc);
+ return pre_vproc;
+ }
+
+ pre_vsram = regulator_get_voltage(sram_reg);
+ if (pre_vsram < 0) {
+ dev_err(info->cpu_dev, "invalid Vsram value: %d\n", pre_vsram);
+ return pre_vsram;
+ }
+
+ new_vsram = clamp(new_vproc + soc_data->min_volt_shift,
+ soc_data->sram_min_volt, soc_data->sram_max_volt);
+
+ do {
+ if (pre_vproc <= new_vproc) {
+ vsram = clamp(pre_vproc + soc_data->max_volt_shift,
+ soc_data->sram_min_volt, new_vsram);
+ ret = regulator_set_voltage(sram_reg, vsram,
+ soc_data->sram_max_volt);
+
+ if (ret)
+ return ret;
+
+ if (vsram == soc_data->sram_max_volt ||
+ new_vsram == soc_data->sram_min_volt)
+ vproc = new_vproc;
+ else
+ vproc = vsram - soc_data->min_volt_shift;
+
+ ret = regulator_set_voltage(proc_reg, vproc,
+ soc_data->proc_max_volt);
+ if (ret) {
+ regulator_set_voltage(sram_reg, pre_vsram,
+ soc_data->sram_max_volt);
+ return ret;
+ }
+ } else if (pre_vproc > new_vproc) {
+ vproc = max(new_vproc,
+ pre_vsram - soc_data->max_volt_shift);
+ ret = regulator_set_voltage(proc_reg, vproc,
+ soc_data->proc_max_volt);
+ if (ret)
+ return ret;
+
+ if (vproc == new_vproc)
+ vsram = new_vsram;
+ else
+ vsram = max(new_vsram,
+ vproc + soc_data->min_volt_shift);
+
+ ret = regulator_set_voltage(sram_reg, vsram,
+ soc_data->sram_max_volt);
+ if (ret) {
+ regulator_set_voltage(proc_reg, pre_vproc,
+ soc_data->proc_max_volt);
+ return ret;
+ }
+ }
+
+ pre_vproc = vproc;
+ pre_vsram = vsram;
+
+ if (--retry < 0) {
+ dev_err(info->cpu_dev,
+ "over loop count, failed to set voltage\n");
+ return -EINVAL;
+ }
+ } while (vproc != new_vproc || vsram != new_vsram);
+
+ return 0;
+}
+
+static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc)
+{
+ const struct mtk_cpufreq_platform_data *soc_data = info->soc_data;
+ int ret;
+
+ if (info->need_voltage_tracking)
+ ret = mtk_cpufreq_voltage_tracking(info, vproc);
+ else
+ ret = regulator_set_voltage(info->proc_reg, vproc,
+ soc_data->proc_max_volt);
+ if (!ret)
+ info->pre_vproc = vproc;
+
+ return ret;
+}
+
+static bool is_ccifreq_ready(struct mtk_cpu_dvfs_info *info)
+{
+ struct device_link *sup_link;
+
+ if (info->ccifreq_bound)
+ return true;
+
+ sup_link = device_link_add(info->cpu_dev, info->cci_dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!sup_link) {
+ dev_err(info->cpu_dev, "cpu%d: sup_link is NULL\n", info->opp_cpu);
+ return false;
+ }
+
+ if (sup_link->supplier->links.status != DL_DEV_DRIVER_BOUND)
+ return false;
+
+ info->ccifreq_bound = true;
+
+ return true;
+}
+
+static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct cpufreq_frequency_table *freq_table = policy->freq_table;
+ struct clk *cpu_clk = policy->clk;
+ struct clk *armpll = clk_get_parent(cpu_clk);
+ struct mtk_cpu_dvfs_info *info = policy->driver_data;
+ struct device *cpu_dev = info->cpu_dev;
+ struct dev_pm_opp *opp;
+ long freq_hz, pre_freq_hz;
+ int vproc, pre_vproc, inter_vproc, target_vproc, ret;
+
+ inter_vproc = info->intermediate_voltage;
+
+ pre_freq_hz = clk_get_rate(cpu_clk);
+
+ mutex_lock(&info->reg_lock);
+
+ if (unlikely(info->pre_vproc <= 0))
+ pre_vproc = regulator_get_voltage(info->proc_reg);
+ else
+ pre_vproc = info->pre_vproc;
+
+ if (pre_vproc < 0) {
+ dev_err(cpu_dev, "invalid Vproc value: %d\n", pre_vproc);
+ ret = pre_vproc;
+ goto out;
+ }
+
+ freq_hz = freq_table[index].frequency * 1000;
+
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
+ if (IS_ERR(opp)) {
+ dev_err(cpu_dev, "cpu%d: failed to find OPP for %ld\n",
+ policy->cpu, freq_hz);
+ ret = PTR_ERR(opp);
+ goto out;
+ }
+ vproc = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ /*
+ * If MediaTek cci is supported but is not ready, we will use the value
+ * of max(target cpu voltage, booting voltage) to prevent high freqeuncy
+ * low voltage crash.
+ */
+ if (info->soc_data->ccifreq_supported && !is_ccifreq_ready(info))
+ vproc = max(vproc, info->vproc_on_boot);
+
+ /*
+ * If the new voltage or the intermediate voltage is higher than the
+ * current voltage, scale up voltage first.
+ */
+ target_vproc = max(inter_vproc, vproc);
+ if (pre_vproc <= target_vproc) {
+ ret = mtk_cpufreq_set_voltage(info, target_vproc);
+ if (ret) {
+ dev_err(cpu_dev,
+ "cpu%d: failed to scale up voltage!\n", policy->cpu);
+ mtk_cpufreq_set_voltage(info, pre_vproc);
+ goto out;
+ }
+ }
+
+ /* Reparent the CPU clock to intermediate clock. */
+ ret = clk_set_parent(cpu_clk, info->inter_clk);
+ if (ret) {
+ dev_err(cpu_dev,
+ "cpu%d: failed to re-parent cpu clock!\n", policy->cpu);
+ mtk_cpufreq_set_voltage(info, pre_vproc);
+ goto out;
+ }
+
+ /* Set the original PLL to target rate. */
+ ret = clk_set_rate(armpll, freq_hz);
+ if (ret) {
+ dev_err(cpu_dev,
+ "cpu%d: failed to scale cpu clock rate!\n", policy->cpu);
+ clk_set_parent(cpu_clk, armpll);
+ mtk_cpufreq_set_voltage(info, pre_vproc);
+ goto out;
+ }
+
+ /* Set parent of CPU clock back to the original PLL. */
+ ret = clk_set_parent(cpu_clk, armpll);
+ if (ret) {
+ dev_err(cpu_dev,
+ "cpu%d: failed to re-parent cpu clock!\n", policy->cpu);
+ mtk_cpufreq_set_voltage(info, inter_vproc);
+ goto out;
+ }
+
+ /*
+ * If the new voltage is lower than the intermediate voltage or the
+ * original voltage, scale down to the new voltage.
+ */
+ if (vproc < inter_vproc || vproc < pre_vproc) {
+ ret = mtk_cpufreq_set_voltage(info, vproc);
+ if (ret) {
+ dev_err(cpu_dev,
+ "cpu%d: failed to scale down voltage!\n", policy->cpu);
+ clk_set_parent(cpu_clk, info->inter_clk);
+ clk_set_rate(armpll, pre_freq_hz);
+ clk_set_parent(cpu_clk, armpll);
+ goto out;
+ }
+ }
+
+ info->current_freq = freq_hz;
+
+out:
+ mutex_unlock(&info->reg_lock);
+
+ return ret;
+}
+
+#define DYNAMIC_POWER "dynamic-power-coefficient"
+
+static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct dev_pm_opp *opp = data;
+ struct dev_pm_opp *new_opp;
+ struct mtk_cpu_dvfs_info *info;
+ unsigned long freq, volt;
+ struct cpufreq_policy *policy;
+ int ret = 0;
+
+ info = container_of(nb, struct mtk_cpu_dvfs_info, opp_nb);
+
+ if (event == OPP_EVENT_ADJUST_VOLTAGE) {
+ freq = dev_pm_opp_get_freq(opp);
+
+ mutex_lock(&info->reg_lock);
+ if (info->current_freq == freq) {
+ volt = dev_pm_opp_get_voltage(opp);
+ ret = mtk_cpufreq_set_voltage(info, volt);
+ if (ret)
+ dev_err(info->cpu_dev,
+ "failed to scale voltage: %d\n", ret);
+ }
+ mutex_unlock(&info->reg_lock);
+ } else if (event == OPP_EVENT_DISABLE) {
+ freq = dev_pm_opp_get_freq(opp);
+
+ /* case of current opp item is disabled */
+ if (info->current_freq == freq) {
+ freq = 1;
+ new_opp = dev_pm_opp_find_freq_ceil(info->cpu_dev,
+ &freq);
+ if (IS_ERR(new_opp)) {
+ dev_err(info->cpu_dev,
+ "all opp items are disabled\n");
+ ret = PTR_ERR(new_opp);
+ return notifier_from_errno(ret);
+ }
+
+ dev_pm_opp_put(new_opp);
+ policy = cpufreq_cpu_get(info->opp_cpu);
+ if (policy) {
+ cpufreq_driver_target(policy, freq / 1000,
+ CPUFREQ_RELATION_L);
+ cpufreq_cpu_put(policy);
+ }
+ }
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct device *of_get_cci(struct device *cpu_dev)
+{
+ struct device_node *np;
+ struct platform_device *pdev;
+
+ np = of_parse_phandle(cpu_dev->of_node, "mediatek,cci", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ return &pdev->dev;
+}
+
+static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
+{
+ struct device *cpu_dev;
+ struct dev_pm_opp *opp;
+ unsigned long rate;
+ int ret;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ dev_err(cpu_dev, "failed to get cpu%d device\n", cpu);
+ return -ENODEV;
+ }
+ info->cpu_dev = cpu_dev;
+
+ info->ccifreq_bound = false;
+ if (info->soc_data->ccifreq_supported) {
+ info->cci_dev = of_get_cci(info->cpu_dev);
+ if (IS_ERR(info->cci_dev)) {
+ ret = PTR_ERR(info->cci_dev);
+ dev_err(cpu_dev, "cpu%d: failed to get cci device\n", cpu);
+ return -ENODEV;
+ }
+ }
+
+ info->cpu_clk = clk_get(cpu_dev, "cpu");
+ if (IS_ERR(info->cpu_clk)) {
+ ret = PTR_ERR(info->cpu_clk);
+ return dev_err_probe(cpu_dev, ret,
+ "cpu%d: failed to get cpu clk\n", cpu);
+ }
+
+ info->inter_clk = clk_get(cpu_dev, "intermediate");
+ if (IS_ERR(info->inter_clk)) {
+ ret = PTR_ERR(info->inter_clk);
+ dev_err_probe(cpu_dev, ret,
+ "cpu%d: failed to get intermediate clk\n", cpu);
+ goto out_free_mux_clock;
+ }
+
+ info->proc_reg = regulator_get_optional(cpu_dev, "proc");
+ if (IS_ERR(info->proc_reg)) {
+ ret = PTR_ERR(info->proc_reg);
+ dev_err_probe(cpu_dev, ret,
+ "cpu%d: failed to get proc regulator\n", cpu);
+ goto out_free_inter_clock;
+ }
+
+ ret = regulator_enable(info->proc_reg);
+ if (ret) {
+ dev_warn(cpu_dev, "cpu%d: failed to enable vproc\n", cpu);
+ goto out_free_proc_reg;
+ }
+
+ /* Both presence and absence of sram regulator are valid cases. */
+ info->sram_reg = regulator_get_optional(cpu_dev, "sram");
+ if (IS_ERR(info->sram_reg)) {
+ ret = PTR_ERR(info->sram_reg);
+ if (ret == -EPROBE_DEFER)
+ goto out_disable_proc_reg;
+
+ info->sram_reg = NULL;
+ } else {
+ ret = regulator_enable(info->sram_reg);
+ if (ret) {
+ dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu);
+ goto out_free_sram_reg;
+ }
+ }
+
+ /* Get OPP-sharing information from "operating-points-v2" bindings */
+ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus);
+ if (ret) {
+ dev_err(cpu_dev,
+ "cpu%d: failed to get OPP-sharing information\n", cpu);
+ goto out_disable_sram_reg;
+ }
+
+ ret = dev_pm_opp_of_cpumask_add_table(&info->cpus);
+ if (ret) {
+ dev_warn(cpu_dev, "cpu%d: no OPP table\n", cpu);
+ goto out_disable_sram_reg;
+ }
+
+ ret = clk_prepare_enable(info->cpu_clk);
+ if (ret)
+ goto out_free_opp_table;
+
+ ret = clk_prepare_enable(info->inter_clk);
+ if (ret)
+ goto out_disable_mux_clock;
+
+ if (info->soc_data->ccifreq_supported) {
+ info->vproc_on_boot = regulator_get_voltage(info->proc_reg);
+ if (info->vproc_on_boot < 0) {
+ ret = info->vproc_on_boot;
+ dev_err(info->cpu_dev,
+ "invalid Vproc value: %d\n", info->vproc_on_boot);
+ goto out_disable_inter_clock;
+ }
+ }
+
+ /* Search a safe voltage for intermediate frequency. */
+ rate = clk_get_rate(info->inter_clk);
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
+ if (IS_ERR(opp)) {
+ dev_err(cpu_dev, "cpu%d: failed to get intermediate opp\n", cpu);
+ ret = PTR_ERR(opp);
+ goto out_disable_inter_clock;
+ }
+ info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ mutex_init(&info->reg_lock);
+ info->current_freq = clk_get_rate(info->cpu_clk);
+
+ info->opp_cpu = cpu;
+ info->opp_nb.notifier_call = mtk_cpufreq_opp_notifier;
+ ret = dev_pm_opp_register_notifier(cpu_dev, &info->opp_nb);
+ if (ret) {
+ dev_err(cpu_dev, "cpu%d: failed to register opp notifier\n", cpu);
+ goto out_disable_inter_clock;
+ }
+
+ /*
+ * If SRAM regulator is present, software "voltage tracking" is needed
+ * for this CPU power domain.
+ */
+ info->need_voltage_tracking = (info->sram_reg != NULL);
+
+ /*
+ * We assume min voltage is 0 and tracking target voltage using
+ * min_volt_shift for each iteration.
+ * The vtrack_max is 3 times of expeted iteration count.
+ */
+ info->vtrack_max = 3 * DIV_ROUND_UP(max(info->soc_data->sram_max_volt,
+ info->soc_data->proc_max_volt),
+ info->soc_data->min_volt_shift);
+
+ return 0;
+
+out_disable_inter_clock:
+ clk_disable_unprepare(info->inter_clk);
+
+out_disable_mux_clock:
+ clk_disable_unprepare(info->cpu_clk);
+
+out_free_opp_table:
+ dev_pm_opp_of_cpumask_remove_table(&info->cpus);
+
+out_disable_sram_reg:
+ if (info->sram_reg)
+ regulator_disable(info->sram_reg);
+
+out_free_sram_reg:
+ if (info->sram_reg)
+ regulator_put(info->sram_reg);
+
+out_disable_proc_reg:
+ regulator_disable(info->proc_reg);
+
+out_free_proc_reg:
+ regulator_put(info->proc_reg);
+
+out_free_inter_clock:
+ clk_put(info->inter_clk);
+
+out_free_mux_clock:
+ clk_put(info->cpu_clk);
+
+ return ret;
+}
+
+static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
+{
+ regulator_disable(info->proc_reg);
+ regulator_put(info->proc_reg);
+ if (info->sram_reg) {
+ regulator_disable(info->sram_reg);
+ regulator_put(info->sram_reg);
+ }
+ clk_disable_unprepare(info->cpu_clk);
+ clk_put(info->cpu_clk);
+ clk_disable_unprepare(info->inter_clk);
+ clk_put(info->inter_clk);
+ dev_pm_opp_of_cpumask_remove_table(&info->cpus);
+ dev_pm_opp_unregister_notifier(info->cpu_dev, &info->opp_nb);
+}
+
+static int mtk_cpufreq_init(struct cpufreq_policy *policy)
+{
+ struct mtk_cpu_dvfs_info *info;
+ struct cpufreq_frequency_table *freq_table;
+ int ret;
+
+ info = mtk_cpu_dvfs_info_lookup(policy->cpu);
+ if (!info) {
+ pr_err("dvfs info for cpu%d is not initialized.\n",
+ policy->cpu);
+ return -EINVAL;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(info->cpu_dev,
+ "failed to init cpufreq table for cpu%d: %d\n",
+ policy->cpu, ret);
+ return ret;
+ }
+
+ cpumask_copy(policy->cpus, &info->cpus);
+ policy->freq_table = freq_table;
+ policy->driver_data = info;
+ policy->clk = info->cpu_clk;
+
+ return 0;
+}
+
+static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct mtk_cpu_dvfs_info *info = policy->driver_data;
+
+ dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
+
+ return 0;
+}
+
+static struct cpufreq_driver mtk_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_IS_COOLING_DEV,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = mtk_cpufreq_set_target,
+ .get = cpufreq_generic_get,
+ .init = mtk_cpufreq_init,
+ .exit = mtk_cpufreq_exit,
+ .register_em = cpufreq_register_em_with_opp,
+ .name = "mtk-cpufreq",
+ .attr = cpufreq_generic_attr,
+};
+
+static int mtk_cpufreq_probe(struct platform_device *pdev)
+{
+ const struct mtk_cpufreq_platform_data *data;
+ struct mtk_cpu_dvfs_info *info, *tmp;
+ int cpu, ret;
+
+ data = dev_get_platdata(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev,
+ "failed to get mtk cpufreq platform data\n");
+ return -ENODEV;
+ }
+
+ for_each_possible_cpu(cpu) {
+ info = mtk_cpu_dvfs_info_lookup(cpu);
+ if (info)
+ continue;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto release_dvfs_info_list;
+ }
+
+ info->soc_data = data;
+ ret = mtk_cpu_dvfs_info_init(info, cpu);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to initialize dvfs info for cpu%d\n",
+ cpu);
+ goto release_dvfs_info_list;
+ }
+
+ list_add(&info->list_head, &dvfs_info_list);
+ }
+
+ ret = cpufreq_register_driver(&mtk_cpufreq_driver);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n");
+ goto release_dvfs_info_list;
+ }
+
+ return 0;
+
+release_dvfs_info_list:
+ list_for_each_entry_safe(info, tmp, &dvfs_info_list, list_head) {
+ mtk_cpu_dvfs_info_release(info);
+ list_del(&info->list_head);
+ }
+
+ return ret;
+}
+
+static struct platform_driver mtk_cpufreq_platdrv = {
+ .driver = {
+ .name = "mtk-cpufreq",
+ },
+ .probe = mtk_cpufreq_probe,
+};
+
+static const struct mtk_cpufreq_platform_data mt2701_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 1150000,
+ .sram_min_volt = 0,
+ .sram_max_volt = 1150000,
+ .ccifreq_supported = false,
+};
+
+static const struct mtk_cpufreq_platform_data mt7622_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 1350000,
+ .sram_min_volt = 0,
+ .sram_max_volt = 1350000,
+ .ccifreq_supported = false,
+};
+
+static const struct mtk_cpufreq_platform_data mt7623_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 1300000,
+ .ccifreq_supported = false,
+};
+
+static const struct mtk_cpufreq_platform_data mt8183_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 1150000,
+ .sram_min_volt = 0,
+ .sram_max_volt = 1150000,
+ .ccifreq_supported = true,
+};
+
+static const struct mtk_cpufreq_platform_data mt8186_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 250000,
+ .proc_max_volt = 1118750,
+ .sram_min_volt = 850000,
+ .sram_max_volt = 1118750,
+ .ccifreq_supported = true,
+};
+
+static const struct mtk_cpufreq_platform_data mt8516_platform_data = {
+ .min_volt_shift = 100000,
+ .max_volt_shift = 200000,
+ .proc_max_volt = 1310000,
+ .sram_min_volt = 0,
+ .sram_max_volt = 1310000,
+ .ccifreq_supported = false,
+};
+
+/* List of machines supported by this driver */
+static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
+ { .compatible = "mediatek,mt2701", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt7622", .data = &mt7622_platform_data },
+ { .compatible = "mediatek,mt7623", .data = &mt7623_platform_data },
+ { .compatible = "mediatek,mt8167", .data = &mt8516_platform_data },
+ { .compatible = "mediatek,mt817x", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt8173", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt8176", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt8183", .data = &mt8183_platform_data },
+ { .compatible = "mediatek,mt8186", .data = &mt8186_platform_data },
+ { .compatible = "mediatek,mt8365", .data = &mt2701_platform_data },
+ { .compatible = "mediatek,mt8516", .data = &mt8516_platform_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines);
+
+static int __init mtk_cpufreq_driver_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+ const struct mtk_cpufreq_platform_data *data;
+ int err;
+
+ np = of_find_node_by_path("/");
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(mtk_cpufreq_machines, np);
+ of_node_put(np);
+ if (!match) {
+ pr_debug("Machine is not compatible with mtk-cpufreq\n");
+ return -ENODEV;
+ }
+ data = match->data;
+
+ err = platform_driver_register(&mtk_cpufreq_platdrv);
+ if (err)
+ return err;
+
+ /*
+ * Since there's no place to hold device registration code and no
+ * device tree based way to match cpufreq driver yet, both the driver
+ * and the device registration codes are put here to handle defer
+ * probing.
+ */
+ cpufreq_pdev = platform_device_register_data(NULL, "mtk-cpufreq", -1,
+ data, sizeof(*data));
+ if (IS_ERR(cpufreq_pdev)) {
+ pr_err("failed to register mtk-cpufreq platform device\n");
+ platform_driver_unregister(&mtk_cpufreq_platdrv);
+ return PTR_ERR(cpufreq_pdev);
+ }
+
+ return 0;
+}
+module_init(mtk_cpufreq_driver_init)
+
+static void __exit mtk_cpufreq_driver_exit(void)
+{
+ platform_device_unregister(cpufreq_pdev);
+ platform_driver_unregister(&mtk_cpufreq_platdrv);
+}
+module_exit(mtk_cpufreq_driver_exit)
+
+MODULE_DESCRIPTION("MediaTek CPUFreq driver");
+MODULE_AUTHOR("Pi-Cheng Chen <pi-cheng.chen@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
new file mode 100644
index 000000000..7f3cfe668
--- /dev/null
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPUFreq support for Armada 370/XP platforms.
+ *
+ * Copyright (C) 2012-2016 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ */
+
+#define pr_fmt(fmt) "mvebu-pmsu: " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/resource.h>
+
+static int __init armada_xp_pmsu_cpufreq_init(void)
+{
+ struct device_node *np;
+ struct resource res;
+ int ret, cpu;
+
+ if (!of_machine_is_compatible("marvell,armadaxp"))
+ return 0;
+
+ /*
+ * In order to have proper cpufreq handling, we need to ensure
+ * that the Device Tree description of the CPU clock includes
+ * the definition of the PMU DFS registers. If not, we do not
+ * register the clock notifier and the cpufreq driver. This
+ * piece of code is only for compatibility with old Device
+ * Trees.
+ */
+ np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock");
+ if (!np)
+ return 0;
+
+ ret = of_address_to_resource(np, 1, &res);
+ if (ret) {
+ pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n");
+ of_node_put(np);
+ return 0;
+ }
+
+ of_node_put(np);
+
+ /*
+ * For each CPU, this loop registers the operating points
+ * supported (which are the nominal CPU frequency and half of
+ * it), and registers the clock notifier that will take care
+ * of doing the PMSU part of a frequency transition.
+ */
+ for_each_possible_cpu(cpu) {
+ struct device *cpu_dev;
+ struct clk *clk;
+ int ret;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("Cannot get CPU %d\n", cpu);
+ continue;
+ }
+
+ clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(clk)) {
+ pr_err("Cannot get clock for CPU %d\n", cpu);
+ return PTR_ERR(clk);
+ }
+
+ ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
+ if (ret) {
+ clk_put(clk);
+ return ret;
+ }
+
+ ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
+ if (ret) {
+ dev_pm_opp_remove(cpu_dev, clk_get_rate(clk));
+ clk_put(clk);
+ dev_err(cpu_dev, "Failed to register OPPs\n");
+ return ret;
+ }
+
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev,
+ cpumask_of(cpu_dev->id));
+ if (ret)
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+ __func__, ret);
+ clk_put(clk);
+ }
+
+ platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ return 0;
+}
+device_initcall(armada_xp_pmsu_cpufreq_init);
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
new file mode 100644
index 000000000..1b50df06c
--- /dev/null
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPU frequency scaling for OMAP using OPP information
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Written by Tony Lindgren <tony@atomide.com>
+ *
+ * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
+ *
+ * Copyright (C) 2007-2011 Texas Instruments, Inc.
+ * - OMAP3/4 support by Rajendra Nayak, Santosh Shilimkar
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/pm_opp.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <asm/smp_plat.h>
+#include <asm/cpu.h>
+
+/* OPP tolerance in percentage */
+#define OPP_TOLERANCE 4
+
+static struct cpufreq_frequency_table *freq_table;
+static atomic_t freq_table_users = ATOMIC_INIT(0);
+static struct device *mpu_dev;
+static struct regulator *mpu_reg;
+
+static int omap_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ int r, ret;
+ struct dev_pm_opp *opp;
+ unsigned long freq, volt = 0, volt_old = 0, tol = 0;
+ unsigned int old_freq, new_freq;
+
+ old_freq = policy->cur;
+ new_freq = freq_table[index].frequency;
+
+ freq = new_freq * 1000;
+ ret = clk_round_rate(policy->clk, freq);
+ if (ret < 0) {
+ dev_warn(mpu_dev,
+ "CPUfreq: Cannot find matching frequency for %lu\n",
+ freq);
+ return ret;
+ }
+ freq = ret;
+
+ if (mpu_reg) {
+ opp = dev_pm_opp_find_freq_ceil(mpu_dev, &freq);
+ if (IS_ERR(opp)) {
+ dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
+ __func__, new_freq);
+ return -EINVAL;
+ }
+ volt = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+ tol = volt * OPP_TOLERANCE / 100;
+ volt_old = regulator_get_voltage(mpu_reg);
+ }
+
+ dev_dbg(mpu_dev, "cpufreq-omap: %u MHz, %ld mV --> %u MHz, %ld mV\n",
+ old_freq / 1000, volt_old ? volt_old / 1000 : -1,
+ new_freq / 1000, volt ? volt / 1000 : -1);
+
+ /* scaling up? scale voltage before frequency */
+ if (mpu_reg && (new_freq > old_freq)) {
+ r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
+ if (r < 0) {
+ dev_warn(mpu_dev, "%s: unable to scale voltage up.\n",
+ __func__);
+ return r;
+ }
+ }
+
+ ret = clk_set_rate(policy->clk, new_freq * 1000);
+
+ /* scaling down? scale voltage after frequency */
+ if (mpu_reg && (new_freq < old_freq)) {
+ r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
+ if (r < 0) {
+ dev_warn(mpu_dev, "%s: unable to scale voltage down.\n",
+ __func__);
+ clk_set_rate(policy->clk, old_freq * 1000);
+ return r;
+ }
+ }
+
+ return ret;
+}
+
+static inline void freq_table_free(void)
+{
+ if (atomic_dec_and_test(&freq_table_users))
+ dev_pm_opp_free_cpufreq_table(mpu_dev, &freq_table);
+}
+
+static int omap_cpu_init(struct cpufreq_policy *policy)
+{
+ int result;
+
+ policy->clk = clk_get(NULL, "cpufreq_ck");
+ if (IS_ERR(policy->clk))
+ return PTR_ERR(policy->clk);
+
+ if (!freq_table) {
+ result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table);
+ if (result) {
+ dev_err(mpu_dev,
+ "%s: cpu%d: failed creating freq table[%d]\n",
+ __func__, policy->cpu, result);
+ clk_put(policy->clk);
+ return result;
+ }
+ }
+
+ atomic_inc_return(&freq_table_users);
+
+ /* FIXME: what's the actual transition time? */
+ cpufreq_generic_init(policy, freq_table, 300 * 1000);
+
+ return 0;
+}
+
+static int omap_cpu_exit(struct cpufreq_policy *policy)
+{
+ freq_table_free();
+ clk_put(policy->clk);
+ return 0;
+}
+
+static struct cpufreq_driver omap_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = omap_target,
+ .get = cpufreq_generic_get,
+ .init = omap_cpu_init,
+ .exit = omap_cpu_exit,
+ .register_em = cpufreq_register_em_with_opp,
+ .name = "omap",
+ .attr = cpufreq_generic_attr,
+};
+
+static int omap_cpufreq_probe(struct platform_device *pdev)
+{
+ mpu_dev = get_cpu_device(0);
+ if (!mpu_dev) {
+ pr_warn("%s: unable to get the MPU device\n", __func__);
+ return -EINVAL;
+ }
+
+ mpu_reg = regulator_get(mpu_dev, "vcc");
+ if (IS_ERR(mpu_reg)) {
+ pr_warn("%s: unable to get MPU regulator\n", __func__);
+ mpu_reg = NULL;
+ } else {
+ /*
+ * Ensure physical regulator is present.
+ * (e.g. could be dummy regulator.)
+ */
+ if (regulator_get_voltage(mpu_reg) < 0) {
+ pr_warn("%s: physical regulator not present for MPU\n",
+ __func__);
+ regulator_put(mpu_reg);
+ mpu_reg = NULL;
+ }
+ }
+
+ return cpufreq_register_driver(&omap_driver);
+}
+
+static int omap_cpufreq_remove(struct platform_device *pdev)
+{
+ return cpufreq_unregister_driver(&omap_driver);
+}
+
+static struct platform_driver omap_cpufreq_platdrv = {
+ .driver = {
+ .name = "omap-cpufreq",
+ },
+ .probe = omap_cpufreq_probe,
+ .remove = omap_cpufreq_remove,
+};
+module_platform_driver(omap_cpufreq_platdrv);
+
+MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
new file mode 100644
index 000000000..ef0a3216a
--- /dev/null
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Pentium 4/Xeon CPU on demand clock modulation/speed scaling
+ * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com>
+ * (C) 2002 Arjan van de Ven <arjanv@redhat.com>
+ * (C) 2002 Tora T. Engstad
+ * All Rights Reserved
+ *
+ * The author(s) of this software shall not be held liable for damages
+ * of any nature resulting due to the use of this software. This
+ * software is provided AS-IS with no warranties.
+ *
+ * Date Errata Description
+ * 20020525 N44, O17 12.5% or 25% DC causes lockup
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/timex.h>
+
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include <asm/timer.h>
+#include <asm/cpu_device_id.h>
+
+#include "speedstep-lib.h"
+
+/*
+ * Duty Cycle (3bits), note DC_DISABLE is not specified in
+ * intel docs i just use it to mean disable
+ */
+enum {
+ DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT,
+ DC_64PT, DC_75PT, DC_88PT, DC_DISABLE
+};
+
+#define DC_ENTRIES 8
+
+
+static int has_N44_O17_errata[NR_CPUS];
+static unsigned int stock_freq;
+static struct cpufreq_driver p4clockmod_driver;
+static unsigned int cpufreq_p4_get(unsigned int cpu);
+
+static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
+{
+ u32 l, h;
+
+ if ((newstate > DC_DISABLE) || (newstate == DC_RESV))
+ return -EINVAL;
+
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
+
+ if (l & 0x01)
+ pr_debug("CPU#%d currently thermal throttled\n", cpu);
+
+ if (has_N44_O17_errata[cpu] &&
+ (newstate == DC_25PT || newstate == DC_DFLT))
+ newstate = DC_38PT;
+
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
+ if (newstate == DC_DISABLE) {
+ pr_debug("CPU#%d disabling modulation\n", cpu);
+ wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
+ } else {
+ pr_debug("CPU#%d setting duty cycle to %d%%\n",
+ cpu, ((125 * newstate) / 10));
+ /* bits 63 - 5 : reserved
+ * bit 4 : enable/disable
+ * bits 3-1 : duty cycle
+ * bit 0 : reserved
+ */
+ l = (l & ~14);
+ l = l | (1<<4) | ((newstate & 0x7)<<1);
+ wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h);
+ }
+
+ return 0;
+}
+
+
+static struct cpufreq_frequency_table p4clockmod_table[] = {
+ {0, DC_RESV, CPUFREQ_ENTRY_INVALID},
+ {0, DC_DFLT, 0},
+ {0, DC_25PT, 0},
+ {0, DC_38PT, 0},
+ {0, DC_50PT, 0},
+ {0, DC_64PT, 0},
+ {0, DC_75PT, 0},
+ {0, DC_88PT, 0},
+ {0, DC_DISABLE, 0},
+ {0, DC_RESV, CPUFREQ_TABLE_END},
+};
+
+
+static int cpufreq_p4_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ int i;
+
+ /* run on each logical CPU,
+ * see section 13.15.3 of IA32 Intel Architecture Software
+ * Developer's Manual, Volume 3
+ */
+ for_each_cpu(i, policy->cpus)
+ cpufreq_p4_setdc(i, p4clockmod_table[index].driver_data);
+
+ return 0;
+}
+
+
+static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
+{
+ if (c->x86 == 0x06) {
+ if (cpu_has(c, X86_FEATURE_EST))
+ pr_warn_once("Warning: EST-capable CPU detected. The acpi-cpufreq module offers voltage scaling in addition to frequency scaling. You should use that instead of p4-clockmod, if possible.\n");
+ switch (c->x86_model) {
+ case 0x0E: /* Core */
+ case 0x0F: /* Core Duo */
+ case 0x16: /* Celeron Core */
+ case 0x1C: /* Atom */
+ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+ return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
+ case 0x0D: /* Pentium M (Dothan) */
+ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+ fallthrough;
+ case 0x09: /* Pentium M (Banias) */
+ return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
+ }
+ }
+
+ if (c->x86 != 0xF)
+ return 0;
+
+ /* on P-4s, the TSC runs with constant frequency independent whether
+ * throttling is active or not. */
+ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+
+ if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
+ pr_warn("Warning: Pentium 4-M detected. The speedstep-ich or acpi cpufreq modules offer voltage scaling in addition of frequency scaling. You should use either one instead of p4-clockmod, if possible.\n");
+ return speedstep_get_frequency(SPEEDSTEP_CPU_P4M);
+ }
+
+ return speedstep_get_frequency(SPEEDSTEP_CPU_P4D);
+}
+
+
+
+static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
+ int cpuid = 0;
+ unsigned int i;
+
+#ifdef CONFIG_SMP
+ cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu));
+#endif
+
+ /* Errata workaround */
+ cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;
+ switch (cpuid) {
+ case 0x0f07:
+ case 0x0f0a:
+ case 0x0f11:
+ case 0x0f12:
+ has_N44_O17_errata[policy->cpu] = 1;
+ pr_debug("has errata -- disabling low frequencies\n");
+ }
+
+ if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D &&
+ c->x86_model < 2) {
+ /* switch to maximum frequency and measure result */
+ cpufreq_p4_setdc(policy->cpu, DC_DISABLE);
+ recalibrate_cpu_khz();
+ }
+ /* get max frequency */
+ stock_freq = cpufreq_p4_get_frequency(c);
+ if (!stock_freq)
+ return -EINVAL;
+
+ /* table init */
+ for (i = 1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
+ if ((i < 2) && (has_N44_O17_errata[policy->cpu]))
+ p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ else
+ p4clockmod_table[i].frequency = (stock_freq * i)/8;
+ }
+
+ /* cpuinfo and default policy values */
+
+ /* the transition latency is set to be 1 higher than the maximum
+ * transition latency of the ondemand governor */
+ policy->cpuinfo.transition_latency = 10000001;
+ policy->freq_table = &p4clockmod_table[0];
+
+ return 0;
+}
+
+
+static unsigned int cpufreq_p4_get(unsigned int cpu)
+{
+ u32 l, h;
+
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
+
+ if (l & 0x10) {
+ l = l >> 1;
+ l &= 0x7;
+ } else
+ l = DC_DISABLE;
+
+ if (l != DC_DISABLE)
+ return stock_freq * l / 8;
+
+ return stock_freq;
+}
+
+static struct cpufreq_driver p4clockmod_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cpufreq_p4_target,
+ .init = cpufreq_p4_cpu_init,
+ .get = cpufreq_p4_get,
+ .name = "p4-clockmod",
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct x86_cpu_id cpufreq_p4_id[] = {
+ X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_ACC, NULL),
+ {}
+};
+
+/*
+ * Intentionally no MODULE_DEVICE_TABLE here: this driver should not
+ * be auto loaded. Please don't add one.
+ */
+
+static int __init cpufreq_p4_init(void)
+{
+ int ret;
+
+ /*
+ * THERM_CONTROL is architectural for IA32 now, so
+ * we can rely on the capability checks
+ */
+ if (!x86_match_cpu(cpufreq_p4_id) || !boot_cpu_has(X86_FEATURE_ACPI))
+ return -ENODEV;
+
+ ret = cpufreq_register_driver(&p4clockmod_driver);
+ if (!ret)
+ pr_info("P4/Xeon(TM) CPU On-Demand Clock Modulation available\n");
+
+ return ret;
+}
+
+
+static void __exit cpufreq_p4_exit(void)
+{
+ cpufreq_unregister_driver(&p4clockmod_driver);
+}
+
+
+MODULE_AUTHOR("Zwane Mwaikambo <zwane@commfireservices.com>");
+MODULE_DESCRIPTION("cpufreq driver for Pentium(TM) 4/Xeon(TM)");
+MODULE_LICENSE("GPL");
+
+late_initcall(cpufreq_p4_init);
+module_exit(cpufreq_p4_exit);
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
new file mode 100644
index 000000000..039a66bbe
--- /dev/null
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2007 PA Semi, Inc
+ *
+ * Authors: Egor Martovetsky <egor@pasemi.com>
+ * Olof Johansson <olof@lixom.net>
+ *
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Based on arch/powerpc/platforms/cell/cbe_cpufreq.c:
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+
+#include <asm/hw_irq.h>
+#include <asm/io.h>
+#include <asm/time.h>
+#include <asm/smp.h>
+
+#include <platforms/pasemi/pasemi.h>
+
+#define SDCASR_REG 0x0100
+#define SDCASR_REG_STRIDE 0x1000
+#define SDCPWR_CFGA0_REG 0x0100
+#define SDCPWR_PWST0_REG 0x0000
+#define SDCPWR_GIZTIME_REG 0x0440
+
+/* SDCPWR_GIZTIME_REG fields */
+#define SDCPWR_GIZTIME_GR 0x80000000
+#define SDCPWR_GIZTIME_LONGLOCK 0x000000ff
+
+/* Offset of ASR registers from SDC base */
+#define SDCASR_OFFSET 0x120000
+
+static void __iomem *sdcpwr_mapbase;
+static void __iomem *sdcasr_mapbase;
+
+/* Current astate, is used when waking up from power savings on
+ * one core, in case the other core has switched states during
+ * the idle time.
+ */
+static int current_astate;
+
+/* We support 5(A0-A4) power states excluding turbo(A5-A6) modes */
+static struct cpufreq_frequency_table pas_freqs[] = {
+ {0, 0, 0},
+ {0, 1, 0},
+ {0, 2, 0},
+ {0, 3, 0},
+ {0, 4, 0},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+/*
+ * hardware specific functions
+ */
+
+static int get_astate_freq(int astate)
+{
+ u32 ret;
+ ret = in_le32(sdcpwr_mapbase + SDCPWR_CFGA0_REG + (astate * 0x10));
+
+ return ret & 0x3f;
+}
+
+static int get_cur_astate(int cpu)
+{
+ u32 ret;
+
+ ret = in_le32(sdcpwr_mapbase + SDCPWR_PWST0_REG);
+ ret = (ret >> (cpu * 4)) & 0x7;
+
+ return ret;
+}
+
+static int get_gizmo_latency(void)
+{
+ u32 giztime, ret;
+
+ giztime = in_le32(sdcpwr_mapbase + SDCPWR_GIZTIME_REG);
+
+ /* just provide the upper bound */
+ if (giztime & SDCPWR_GIZTIME_GR)
+ ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 128000;
+ else
+ ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 1000;
+
+ return ret;
+}
+
+static void set_astate(int cpu, unsigned int astate)
+{
+ unsigned long flags;
+
+ /* Return if called before init has run */
+ if (unlikely(!sdcasr_mapbase))
+ return;
+
+ local_irq_save(flags);
+
+ out_le32(sdcasr_mapbase + SDCASR_REG + SDCASR_REG_STRIDE*cpu, astate);
+
+ local_irq_restore(flags);
+}
+
+int check_astate(void)
+{
+ return get_cur_astate(hard_smp_processor_id());
+}
+
+void restore_astate(int cpu)
+{
+ set_astate(cpu, current_astate);
+}
+
+/*
+ * cpufreq functions
+ */
+
+static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *pos;
+ const u32 *max_freqp;
+ u32 max_freq;
+ int cur_astate, idx;
+ struct resource res;
+ struct device_node *cpu, *dn;
+ int err = -ENODEV;
+
+ cpu = of_get_cpu_node(policy->cpu, NULL);
+ if (!cpu)
+ goto out;
+
+ max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+ of_node_put(cpu);
+ if (!max_freqp) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* we need the freq in kHz */
+ max_freq = *max_freqp / 1000;
+
+ dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
+ if (!dn)
+ dn = of_find_compatible_node(NULL, NULL,
+ "pasemi,pwrficient-sdc");
+ if (!dn)
+ goto out;
+ err = of_address_to_resource(dn, 0, &res);
+ of_node_put(dn);
+ if (err)
+ goto out;
+ sdcasr_mapbase = ioremap(res.start + SDCASR_OFFSET, 0x2000);
+ if (!sdcasr_mapbase) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ dn = of_find_compatible_node(NULL, NULL, "1682m-gizmo");
+ if (!dn)
+ dn = of_find_compatible_node(NULL, NULL,
+ "pasemi,pwrficient-gizmo");
+ if (!dn) {
+ err = -ENODEV;
+ goto out_unmap_sdcasr;
+ }
+ err = of_address_to_resource(dn, 0, &res);
+ of_node_put(dn);
+ if (err)
+ goto out_unmap_sdcasr;
+ sdcpwr_mapbase = ioremap(res.start, 0x1000);
+ if (!sdcpwr_mapbase) {
+ err = -EINVAL;
+ goto out_unmap_sdcasr;
+ }
+
+ pr_debug("init cpufreq on CPU %d\n", policy->cpu);
+ pr_debug("max clock-frequency is at %u kHz\n", max_freq);
+ pr_debug("initializing frequency table\n");
+
+ /* initialize frequency table */
+ cpufreq_for_each_entry_idx(pos, pas_freqs, idx) {
+ pos->frequency = get_astate_freq(pos->driver_data) * 100000;
+ pr_debug("%d: %d\n", idx, pos->frequency);
+ }
+
+ cur_astate = get_cur_astate(policy->cpu);
+ pr_debug("current astate is at %d\n",cur_astate);
+
+ policy->cur = pas_freqs[cur_astate].frequency;
+ ppc_proc_freq = policy->cur * 1000ul;
+
+ cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
+ return 0;
+
+out_unmap_sdcasr:
+ iounmap(sdcasr_mapbase);
+out:
+ return err;
+}
+
+static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ /*
+ * We don't support CPU hotplug. Don't unmap after the system
+ * has already made it to a running state.
+ */
+ if (system_state >= SYSTEM_RUNNING)
+ return 0;
+
+ if (sdcasr_mapbase)
+ iounmap(sdcasr_mapbase);
+ if (sdcpwr_mapbase)
+ iounmap(sdcpwr_mapbase);
+
+ return 0;
+}
+
+static int pas_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int pas_astate_new)
+{
+ int i;
+
+ pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
+ policy->cpu,
+ pas_freqs[pas_astate_new].frequency,
+ pas_freqs[pas_astate_new].driver_data);
+
+ current_astate = pas_astate_new;
+
+ for_each_online_cpu(i)
+ set_astate(i, pas_astate_new);
+
+ ppc_proc_freq = pas_freqs[pas_astate_new].frequency * 1000ul;
+ return 0;
+}
+
+static struct cpufreq_driver pas_cpufreq_driver = {
+ .name = "pas-cpufreq",
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = pas_cpufreq_cpu_init,
+ .exit = pas_cpufreq_cpu_exit,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pas_cpufreq_target,
+ .attr = cpufreq_generic_attr,
+};
+
+/*
+ * module init and destoy
+ */
+
+static int __init pas_cpufreq_init(void)
+{
+ if (!of_machine_is_compatible("PA6T-1682M") &&
+ !of_machine_is_compatible("pasemi,pwrficient"))
+ return -ENODEV;
+
+ return cpufreq_register_driver(&pas_cpufreq_driver);
+}
+
+static void __exit pas_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&pas_cpufreq_driver);
+}
+
+module_init(pas_cpufreq_init);
+module_exit(pas_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>, Olof Johansson <olof@lixom.net>");
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
new file mode 100644
index 000000000..9f3fc7a07
--- /dev/null
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -0,0 +1,632 @@
+/*
+ * pcc-cpufreq.c - Processor Clocking Control firmware cpufreq interface
+ *
+ * Copyright (C) 2009 Red Hat, Matthew Garrett <mjg@redhat.com>
+ * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
+ * Nagananda Chumbalkar <nagananda.chumbalkar@hp.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or NON
+ * INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include <acpi/processor.h>
+
+#define PCC_VERSION "1.10.00"
+#define POLL_LOOPS 300
+
+#define CMD_COMPLETE 0x1
+#define CMD_GET_FREQ 0x0
+#define CMD_SET_FREQ 0x1
+
+#define BUF_SZ 4
+
+struct pcc_register_resource {
+ u8 descriptor;
+ u16 length;
+ u8 space_id;
+ u8 bit_width;
+ u8 bit_offset;
+ u8 access_size;
+ u64 address;
+} __attribute__ ((packed));
+
+struct pcc_memory_resource {
+ u8 descriptor;
+ u16 length;
+ u8 space_id;
+ u8 resource_usage;
+ u8 type_specific;
+ u64 granularity;
+ u64 minimum;
+ u64 maximum;
+ u64 translation_offset;
+ u64 address_length;
+} __attribute__ ((packed));
+
+static struct cpufreq_driver pcc_cpufreq_driver;
+
+struct pcc_header {
+ u32 signature;
+ u16 length;
+ u8 major;
+ u8 minor;
+ u32 features;
+ u16 command;
+ u16 status;
+ u32 latency;
+ u32 minimum_time;
+ u32 maximum_time;
+ u32 nominal;
+ u32 throttled_frequency;
+ u32 minimum_frequency;
+};
+
+static void __iomem *pcch_virt_addr;
+static struct pcc_header __iomem *pcch_hdr;
+
+static DEFINE_SPINLOCK(pcc_lock);
+
+static struct acpi_generic_address doorbell;
+
+static u64 doorbell_preserve;
+static u64 doorbell_write;
+
+static u8 OSC_UUID[16] = {0x9F, 0x2C, 0x9B, 0x63, 0x91, 0x70, 0x1f, 0x49,
+ 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46};
+
+struct pcc_cpu {
+ u32 input_offset;
+ u32 output_offset;
+};
+
+static struct pcc_cpu __percpu *pcc_cpu_info;
+
+static int pcc_cpufreq_verify(struct cpufreq_policy_data *policy)
+{
+ cpufreq_verify_within_cpu_limits(policy);
+ return 0;
+}
+
+static inline void pcc_cmd(void)
+{
+ u64 doorbell_value;
+ int i;
+
+ acpi_read(&doorbell_value, &doorbell);
+ acpi_write((doorbell_value & doorbell_preserve) | doorbell_write,
+ &doorbell);
+
+ for (i = 0; i < POLL_LOOPS; i++) {
+ if (ioread16(&pcch_hdr->status) & CMD_COMPLETE)
+ break;
+ }
+}
+
+static inline void pcc_clear_mapping(void)
+{
+ if (pcch_virt_addr)
+ iounmap(pcch_virt_addr);
+ pcch_virt_addr = NULL;
+}
+
+static unsigned int pcc_get_freq(unsigned int cpu)
+{
+ struct pcc_cpu *pcc_cpu_data;
+ unsigned int curr_freq;
+ unsigned int freq_limit;
+ u16 status;
+ u32 input_buffer;
+ u32 output_buffer;
+
+ spin_lock(&pcc_lock);
+
+ pr_debug("get: get_freq for CPU %d\n", cpu);
+ pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
+
+ input_buffer = 0x1;
+ iowrite32(input_buffer,
+ (pcch_virt_addr + pcc_cpu_data->input_offset));
+ iowrite16(CMD_GET_FREQ, &pcch_hdr->command);
+
+ pcc_cmd();
+
+ output_buffer =
+ ioread32(pcch_virt_addr + pcc_cpu_data->output_offset);
+
+ /* Clear the input buffer - we are done with the current command */
+ memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
+
+ status = ioread16(&pcch_hdr->status);
+ if (status != CMD_COMPLETE) {
+ pr_debug("get: FAILED: for CPU %d, status is %d\n",
+ cpu, status);
+ goto cmd_incomplete;
+ }
+ iowrite16(0, &pcch_hdr->status);
+ curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff))
+ / 100) * 1000);
+
+ pr_debug("get: SUCCESS: (virtual) output_offset for cpu %d is "
+ "0x%p, contains a value of: 0x%x. Speed is: %d MHz\n",
+ cpu, (pcch_virt_addr + pcc_cpu_data->output_offset),
+ output_buffer, curr_freq);
+
+ freq_limit = (output_buffer >> 8) & 0xff;
+ if (freq_limit != 0xff) {
+ pr_debug("get: frequency for cpu %d is being temporarily"
+ " capped at %d\n", cpu, curr_freq);
+ }
+
+ spin_unlock(&pcc_lock);
+ return curr_freq;
+
+cmd_incomplete:
+ iowrite16(0, &pcch_hdr->status);
+ spin_unlock(&pcc_lock);
+ return 0;
+}
+
+static int pcc_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct pcc_cpu *pcc_cpu_data;
+ struct cpufreq_freqs freqs;
+ u16 status;
+ u32 input_buffer;
+ int cpu;
+
+ cpu = policy->cpu;
+ pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
+
+ pr_debug("target: CPU %d should go to target freq: %d "
+ "(virtual) input_offset is 0x%p\n",
+ cpu, target_freq,
+ (pcch_virt_addr + pcc_cpu_data->input_offset));
+
+ freqs.old = policy->cur;
+ freqs.new = target_freq;
+ cpufreq_freq_transition_begin(policy, &freqs);
+ spin_lock(&pcc_lock);
+
+ input_buffer = 0x1 | (((target_freq * 100)
+ / (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
+ iowrite32(input_buffer,
+ (pcch_virt_addr + pcc_cpu_data->input_offset));
+ iowrite16(CMD_SET_FREQ, &pcch_hdr->command);
+
+ pcc_cmd();
+
+ /* Clear the input buffer - we are done with the current command */
+ memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
+
+ status = ioread16(&pcch_hdr->status);
+ iowrite16(0, &pcch_hdr->status);
+
+ cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE);
+ spin_unlock(&pcc_lock);
+
+ if (status != CMD_COMPLETE) {
+ pr_debug("target: FAILED for cpu %d, with status: 0x%x\n",
+ cpu, status);
+ return -EINVAL;
+ }
+
+ pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu);
+
+ return 0;
+}
+
+static int pcc_get_offset(int cpu)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *pccp, *offset;
+ struct pcc_cpu *pcc_cpu_data;
+ struct acpi_processor *pr;
+ int ret = 0;
+
+ pr = per_cpu(processors, cpu);
+ pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
+
+ if (!pr)
+ return -ENODEV;
+
+ status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ pccp = buffer.pointer;
+ if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ offset = &(pccp->package.elements[0]);
+ if (!offset || offset->type != ACPI_TYPE_INTEGER) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ pcc_cpu_data->input_offset = offset->integer.value;
+
+ offset = &(pccp->package.elements[1]);
+ if (!offset || offset->type != ACPI_TYPE_INTEGER) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ pcc_cpu_data->output_offset = offset->integer.value;
+
+ memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
+ memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ);
+
+ pr_debug("pcc_get_offset: for CPU %d: pcc_cpu_data "
+ "input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n",
+ cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset);
+out_free:
+ kfree(buffer.pointer);
+ return ret;
+}
+
+static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
+{
+ acpi_status status;
+ struct acpi_object_list input;
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object in_params[4];
+ union acpi_object *out_obj;
+ u32 capabilities[2];
+ u32 errors;
+ u32 supported;
+ int ret = 0;
+
+ input.count = 4;
+ input.pointer = in_params;
+ in_params[0].type = ACPI_TYPE_BUFFER;
+ in_params[0].buffer.length = 16;
+ in_params[0].buffer.pointer = OSC_UUID;
+ in_params[1].type = ACPI_TYPE_INTEGER;
+ in_params[1].integer.value = 1;
+ in_params[2].type = ACPI_TYPE_INTEGER;
+ in_params[2].integer.value = 2;
+ in_params[3].type = ACPI_TYPE_BUFFER;
+ in_params[3].buffer.length = 8;
+ in_params[3].buffer.pointer = (u8 *)&capabilities;
+
+ capabilities[0] = OSC_QUERY_ENABLE;
+ capabilities[1] = 0x1;
+
+ status = acpi_evaluate_object(*handle, "_OSC", &input, &output);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (!output.length)
+ return -ENODEV;
+
+ out_obj = output.pointer;
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
+ if (errors) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ supported = *((u32 *)(out_obj->buffer.pointer + 4));
+ if (!(supported & 0x1)) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ kfree(output.pointer);
+ capabilities[0] = 0x0;
+ capabilities[1] = 0x1;
+
+ status = acpi_evaluate_object(*handle, "_OSC", &input, &output);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (!output.length)
+ return -ENODEV;
+
+ out_obj = output.pointer;
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
+ if (errors) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ supported = *((u32 *)(out_obj->buffer.pointer + 4));
+ if (!(supported & 0x1)) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+out_free:
+ kfree(output.pointer);
+ return ret;
+}
+
+static int __init pcc_cpufreq_probe(void)
+{
+ acpi_status status;
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct pcc_memory_resource *mem_resource;
+ struct pcc_register_resource *reg_resource;
+ union acpi_object *out_obj, *member;
+ acpi_handle handle, osc_handle;
+ int ret = 0;
+
+ status = acpi_get_handle(NULL, "\\_SB", &handle);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (!acpi_has_method(handle, "PCCH"))
+ return -ENODEV;
+
+ status = acpi_get_handle(handle, "_OSC", &osc_handle);
+ if (ACPI_SUCCESS(status)) {
+ ret = pcc_cpufreq_do_osc(&osc_handle);
+ if (ret)
+ pr_debug("probe: _OSC evaluation did not succeed\n");
+ /* Firmware's use of _OSC is optional */
+ ret = 0;
+ }
+
+ status = acpi_evaluate_object(handle, "PCCH", NULL, &output);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ out_obj = output.pointer;
+ if (out_obj->type != ACPI_TYPE_PACKAGE) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ member = &out_obj->package.elements[0];
+ if (member->type != ACPI_TYPE_BUFFER) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ mem_resource = (struct pcc_memory_resource *)member->buffer.pointer;
+
+ pr_debug("probe: mem_resource descriptor: 0x%x,"
+ " length: %d, space_id: %d, resource_usage: %d,"
+ " type_specific: %d, granularity: 0x%llx,"
+ " minimum: 0x%llx, maximum: 0x%llx,"
+ " translation_offset: 0x%llx, address_length: 0x%llx\n",
+ mem_resource->descriptor, mem_resource->length,
+ mem_resource->space_id, mem_resource->resource_usage,
+ mem_resource->type_specific, mem_resource->granularity,
+ mem_resource->minimum, mem_resource->maximum,
+ mem_resource->translation_offset,
+ mem_resource->address_length);
+
+ if (mem_resource->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ pcch_virt_addr = ioremap(mem_resource->minimum,
+ mem_resource->address_length);
+ if (pcch_virt_addr == NULL) {
+ pr_debug("probe: could not map shared mem region\n");
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ pcch_hdr = pcch_virt_addr;
+
+ pr_debug("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr);
+ pr_debug("probe: PCCH header is at physical address: 0x%llx,"
+ " signature: 0x%x, length: %d bytes, major: %d, minor: %d,"
+ " supported features: 0x%x, command field: 0x%x,"
+ " status field: 0x%x, nominal latency: %d us\n",
+ mem_resource->minimum, ioread32(&pcch_hdr->signature),
+ ioread16(&pcch_hdr->length), ioread8(&pcch_hdr->major),
+ ioread8(&pcch_hdr->minor), ioread32(&pcch_hdr->features),
+ ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status),
+ ioread32(&pcch_hdr->latency));
+
+ pr_debug("probe: min time between commands: %d us,"
+ " max time between commands: %d us,"
+ " nominal CPU frequency: %d MHz,"
+ " minimum CPU frequency: %d MHz,"
+ " minimum CPU frequency without throttling: %d MHz\n",
+ ioread32(&pcch_hdr->minimum_time),
+ ioread32(&pcch_hdr->maximum_time),
+ ioread32(&pcch_hdr->nominal),
+ ioread32(&pcch_hdr->throttled_frequency),
+ ioread32(&pcch_hdr->minimum_frequency));
+
+ member = &out_obj->package.elements[1];
+ if (member->type != ACPI_TYPE_BUFFER) {
+ ret = -ENODEV;
+ goto pcch_free;
+ }
+
+ reg_resource = (struct pcc_register_resource *)member->buffer.pointer;
+
+ doorbell.space_id = reg_resource->space_id;
+ doorbell.bit_width = reg_resource->bit_width;
+ doorbell.bit_offset = reg_resource->bit_offset;
+ doorbell.access_width = 4;
+ doorbell.address = reg_resource->address;
+
+ pr_debug("probe: doorbell: space_id is %d, bit_width is %d, "
+ "bit_offset is %d, access_width is %d, address is 0x%llx\n",
+ doorbell.space_id, doorbell.bit_width, doorbell.bit_offset,
+ doorbell.access_width, reg_resource->address);
+
+ member = &out_obj->package.elements[2];
+ if (member->type != ACPI_TYPE_INTEGER) {
+ ret = -ENODEV;
+ goto pcch_free;
+ }
+
+ doorbell_preserve = member->integer.value;
+
+ member = &out_obj->package.elements[3];
+ if (member->type != ACPI_TYPE_INTEGER) {
+ ret = -ENODEV;
+ goto pcch_free;
+ }
+
+ doorbell_write = member->integer.value;
+
+ pr_debug("probe: doorbell_preserve: 0x%llx,"
+ " doorbell_write: 0x%llx\n",
+ doorbell_preserve, doorbell_write);
+
+ pcc_cpu_info = alloc_percpu(struct pcc_cpu);
+ if (!pcc_cpu_info) {
+ ret = -ENOMEM;
+ goto pcch_free;
+ }
+
+ printk(KERN_DEBUG "pcc-cpufreq: (v%s) driver loaded with frequency"
+ " limits: %d MHz, %d MHz\n", PCC_VERSION,
+ ioread32(&pcch_hdr->minimum_frequency),
+ ioread32(&pcch_hdr->nominal));
+ kfree(output.pointer);
+ return ret;
+pcch_free:
+ pcc_clear_mapping();
+out_free:
+ kfree(output.pointer);
+ return ret;
+}
+
+static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned int result = 0;
+
+ if (!pcch_virt_addr) {
+ result = -1;
+ goto out;
+ }
+
+ result = pcc_get_offset(cpu);
+ if (result) {
+ pr_debug("init: PCCP evaluation failed\n");
+ goto out;
+ }
+
+ policy->max = policy->cpuinfo.max_freq =
+ ioread32(&pcch_hdr->nominal) * 1000;
+ policy->min = policy->cpuinfo.min_freq =
+ ioread32(&pcch_hdr->minimum_frequency) * 1000;
+
+ pr_debug("init: policy->max is %d, policy->min is %d\n",
+ policy->max, policy->min);
+out:
+ return result;
+}
+
+static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static struct cpufreq_driver pcc_cpufreq_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .get = pcc_get_freq,
+ .verify = pcc_cpufreq_verify,
+ .target = pcc_cpufreq_target,
+ .init = pcc_cpufreq_cpu_init,
+ .exit = pcc_cpufreq_cpu_exit,
+ .name = "pcc-cpufreq",
+};
+
+static int __init pcc_cpufreq_init(void)
+{
+ int ret;
+
+ /* Skip initialization if another cpufreq driver is there. */
+ if (cpufreq_get_current_driver())
+ return -EEXIST;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ ret = pcc_cpufreq_probe();
+ if (ret) {
+ pr_debug("pcc_cpufreq_init: PCCH evaluation failed\n");
+ return ret;
+ }
+
+ if (num_present_cpus() > 4) {
+ pcc_cpufreq_driver.flags |= CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING;
+ pr_err("%s: Too many CPUs, dynamic performance scaling disabled\n",
+ __func__);
+ pr_err("%s: Try to enable another scaling driver through BIOS settings\n",
+ __func__);
+ pr_err("%s: and complain to the system vendor\n", __func__);
+ }
+
+ ret = cpufreq_register_driver(&pcc_cpufreq_driver);
+
+ return ret;
+}
+
+static void __exit pcc_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&pcc_cpufreq_driver);
+
+ pcc_clear_mapping();
+
+ free_percpu(pcc_cpu_info);
+}
+
+static const struct acpi_device_id __maybe_unused processor_device_ids[] = {
+ {ACPI_PROCESSOR_OBJECT_HID, },
+ {ACPI_PROCESSOR_DEVICE_HID, },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, processor_device_ids);
+
+MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar");
+MODULE_VERSION(PCC_VERSION);
+MODULE_DESCRIPTION("Processor Clocking Control interface driver");
+MODULE_LICENSE("GPL");
+
+late_initcall(pcc_cpufreq_init);
+module_exit(pcc_cpufreq_exit);
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
new file mode 100644
index 000000000..4b8ee2014
--- /dev/null
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -0,0 +1,693 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * Copyright (C) 2004 John Steele Scott <toojays@toojays.net>
+ *
+ * TODO: Need a big cleanup here. Basically, we need to have different
+ * cpufreq_driver structures for the different type of HW instead of the
+ * current mess. We also need to better deal with the detection of the
+ * type of machine.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/hardirq.h>
+#include <linux/of_device.h>
+
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/pmac_feature.h>
+#include <asm/mmu_context.h>
+#include <asm/sections.h>
+#include <asm/cputable.h>
+#include <asm/time.h>
+#include <asm/mpic.h>
+#include <asm/keylargo.h>
+#include <asm/switch_to.h>
+
+/* WARNING !!! This will cause calibrate_delay() to be called,
+ * but this is an __init function ! So you MUST go edit
+ * init/main.c to make it non-init before enabling DEBUG_FREQ
+ */
+#undef DEBUG_FREQ
+
+extern void low_choose_7447a_dfs(int dfs);
+extern void low_choose_750fx_pll(int pll);
+extern void low_sleep_handler(void);
+
+/*
+ * Currently, PowerMac cpufreq supports only high & low frequencies
+ * that are set by the firmware
+ */
+static unsigned int low_freq;
+static unsigned int hi_freq;
+static unsigned int cur_freq;
+static unsigned int sleep_freq;
+static unsigned long transition_latency;
+
+/*
+ * Different models uses different mechanisms to switch the frequency
+ */
+static int (*set_speed_proc)(int low_speed);
+static unsigned int (*get_speed_proc)(void);
+
+/*
+ * Some definitions used by the various speedprocs
+ */
+static u32 voltage_gpio;
+static u32 frequency_gpio;
+static u32 slew_done_gpio;
+static int no_schedule;
+static int has_cpu_l2lve;
+static int is_pmu_based;
+
+/* There are only two frequency states for each processor. Values
+ * are in kHz for the time being.
+ */
+#define CPUFREQ_HIGH 0
+#define CPUFREQ_LOW 1
+
+static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
+ {0, CPUFREQ_HIGH, 0},
+ {0, CPUFREQ_LOW, 0},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+static inline void local_delay(unsigned long ms)
+{
+ if (no_schedule)
+ mdelay(ms);
+ else
+ msleep(ms);
+}
+
+#ifdef DEBUG_FREQ
+static inline void debug_calc_bogomips(void)
+{
+ /* This will cause a recalc of bogomips and display the
+ * result. We backup/restore the value to avoid affecting the
+ * core cpufreq framework's own calculation.
+ */
+ unsigned long save_lpj = loops_per_jiffy;
+ calibrate_delay();
+ loops_per_jiffy = save_lpj;
+}
+#endif /* DEBUG_FREQ */
+
+/* Switch CPU speed under 750FX CPU control
+ */
+static int cpu_750fx_cpu_speed(int low_speed)
+{
+ u32 hid2;
+
+ if (low_speed == 0) {
+ /* ramping up, set voltage first */
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
+ /* Make sure we sleep for at least 1ms */
+ local_delay(10);
+
+ /* tweak L2 for high voltage */
+ if (has_cpu_l2lve) {
+ hid2 = mfspr(SPRN_HID2);
+ hid2 &= ~0x2000;
+ mtspr(SPRN_HID2, hid2);
+ }
+ }
+#ifdef CONFIG_PPC_BOOK3S_32
+ low_choose_750fx_pll(low_speed);
+#endif
+ if (low_speed == 1) {
+ /* tweak L2 for low voltage */
+ if (has_cpu_l2lve) {
+ hid2 = mfspr(SPRN_HID2);
+ hid2 |= 0x2000;
+ mtspr(SPRN_HID2, hid2);
+ }
+
+ /* ramping down, set voltage last */
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
+ local_delay(10);
+ }
+
+ return 0;
+}
+
+static unsigned int cpu_750fx_get_cpu_speed(void)
+{
+ if (mfspr(SPRN_HID1) & HID1_PS)
+ return low_freq;
+ else
+ return hi_freq;
+}
+
+/* Switch CPU speed using DFS */
+static int dfs_set_cpu_speed(int low_speed)
+{
+ if (low_speed == 0) {
+ /* ramping up, set voltage first */
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
+ /* Make sure we sleep for at least 1ms */
+ local_delay(1);
+ }
+
+ /* set frequency */
+#ifdef CONFIG_PPC_BOOK3S_32
+ low_choose_7447a_dfs(low_speed);
+#endif
+ udelay(100);
+
+ if (low_speed == 1) {
+ /* ramping down, set voltage last */
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
+ local_delay(1);
+ }
+
+ return 0;
+}
+
+static unsigned int dfs_get_cpu_speed(void)
+{
+ if (mfspr(SPRN_HID1) & HID1_DFS)
+ return low_freq;
+ else
+ return hi_freq;
+}
+
+
+/* Switch CPU speed using slewing GPIOs
+ */
+static int gpios_set_cpu_speed(int low_speed)
+{
+ int gpio, timeout = 0;
+
+ /* If ramping up, set voltage first */
+ if (low_speed == 0) {
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
+ /* Delay is way too big but it's ok, we schedule */
+ local_delay(10);
+ }
+
+ /* Set frequency */
+ gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
+ if (low_speed == ((gpio & 0x01) == 0))
+ goto skip;
+
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, frequency_gpio,
+ low_speed ? 0x04 : 0x05);
+ udelay(200);
+ do {
+ if (++timeout > 100)
+ break;
+ local_delay(1);
+ gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, slew_done_gpio, 0);
+ } while((gpio & 0x02) == 0);
+ skip:
+ /* If ramping down, set voltage last */
+ if (low_speed == 1) {
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
+ /* Delay is way too big but it's ok, we schedule */
+ local_delay(10);
+ }
+
+#ifdef DEBUG_FREQ
+ debug_calc_bogomips();
+#endif
+
+ return 0;
+}
+
+/* Switch CPU speed under PMU control
+ */
+static int pmu_set_cpu_speed(int low_speed)
+{
+ struct adb_request req;
+ unsigned long save_l2cr;
+ unsigned long save_l3cr;
+ unsigned int pic_prio;
+ unsigned long flags;
+
+ preempt_disable();
+
+#ifdef DEBUG_FREQ
+ printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1));
+#endif
+ pmu_suspend();
+
+ /* Disable all interrupt sources on openpic */
+ pic_prio = mpic_cpu_get_priority();
+ mpic_cpu_set_priority(0xf);
+
+ /* Make sure the decrementer won't interrupt us */
+ asm volatile("mtdec %0" : : "r" (0x7fffffff));
+ /* Make sure any pending DEC interrupt occurring while we did
+ * the above didn't re-enable the DEC */
+ mb();
+ asm volatile("mtdec %0" : : "r" (0x7fffffff));
+
+ /* We can now disable MSR_EE */
+ local_irq_save(flags);
+
+ /* Giveup the FPU & vec */
+ enable_kernel_fp();
+
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ enable_kernel_altivec();
+#endif /* CONFIG_ALTIVEC */
+
+ /* Save & disable L2 and L3 caches */
+ save_l3cr = _get_L3CR(); /* (returns -1 if not available) */
+ save_l2cr = _get_L2CR(); /* (returns -1 if not available) */
+
+ /* Send the new speed command. My assumption is that this command
+ * will cause PLL_CFG[0..3] to be changed next time CPU goes to sleep
+ */
+ pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed);
+ while (!req.complete)
+ pmu_poll();
+
+ /* Prepare the northbridge for the speed transition */
+ pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,1);
+
+ /* Call low level code to backup CPU state and recover from
+ * hardware reset
+ */
+ low_sleep_handler();
+
+ /* Restore the northbridge */
+ pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,0);
+
+ /* Restore L2 cache */
+ if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
+ _set_L2CR(save_l2cr);
+ /* Restore L3 cache */
+ if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0)
+ _set_L3CR(save_l3cr);
+
+ /* Restore userland MMU context */
+ switch_mmu_context(NULL, current->active_mm, NULL);
+
+#ifdef DEBUG_FREQ
+ printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
+#endif
+
+ /* Restore low level PMU operations */
+ pmu_unlock();
+
+ /*
+ * Restore decrementer; we'll take a decrementer interrupt
+ * as soon as interrupts are re-enabled and the generic
+ * clockevents code will reprogram it with the right value.
+ */
+ set_dec(1);
+
+ /* Restore interrupts */
+ mpic_cpu_set_priority(pic_prio);
+
+ /* Let interrupts flow again ... */
+ local_irq_restore(flags);
+
+#ifdef DEBUG_FREQ
+ debug_calc_bogomips();
+#endif
+
+ pmu_resume();
+
+ preempt_enable();
+
+ return 0;
+}
+
+static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode)
+{
+ unsigned long l3cr;
+ static unsigned long prev_l3cr;
+
+ if (speed_mode == CPUFREQ_LOW &&
+ cpu_has_feature(CPU_FTR_L3CR)) {
+ l3cr = _get_L3CR();
+ if (l3cr & L3CR_L3E) {
+ prev_l3cr = l3cr;
+ _set_L3CR(0);
+ }
+ }
+ set_speed_proc(speed_mode == CPUFREQ_LOW);
+ if (speed_mode == CPUFREQ_HIGH &&
+ cpu_has_feature(CPU_FTR_L3CR)) {
+ l3cr = _get_L3CR();
+ if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
+ _set_L3CR(prev_l3cr);
+ }
+ cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
+
+ return 0;
+}
+
+static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
+{
+ return cur_freq;
+}
+
+static int pmac_cpufreq_target( struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ int rc;
+
+ rc = do_set_cpu_speed(policy, index);
+
+ ppc_proc_freq = cur_freq * 1000ul;
+ return rc;
+}
+
+static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency);
+ return 0;
+}
+
+static u32 read_gpio(struct device_node *np)
+{
+ const u32 *reg = of_get_property(np, "reg", NULL);
+ u32 offset;
+
+ if (reg == NULL)
+ return 0;
+ /* That works for all keylargos but shall be fixed properly
+ * some day... The problem is that it seems we can't rely
+ * on the "reg" property of the GPIO nodes, they are either
+ * relative to the base of KeyLargo or to the base of the
+ * GPIO space, and the device-tree doesn't help.
+ */
+ offset = *reg;
+ if (offset < KEYLARGO_GPIO_LEVELS0)
+ offset += KEYLARGO_GPIO_LEVELS0;
+ return offset;
+}
+
+static int pmac_cpufreq_suspend(struct cpufreq_policy *policy)
+{
+ /* Ok, this could be made a bit smarter, but let's be robust for now. We
+ * always force a speed change to high speed before sleep, to make sure
+ * we have appropriate voltage and/or bus speed for the wakeup process,
+ * and to make sure our loops_per_jiffies are "good enough", that is will
+ * not cause too short delays if we sleep in low speed and wake in high
+ * speed..
+ */
+ no_schedule = 1;
+ sleep_freq = cur_freq;
+ if (cur_freq == low_freq && !is_pmu_based)
+ do_set_cpu_speed(policy, CPUFREQ_HIGH);
+ return 0;
+}
+
+static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ /* If we resume, first check if we have a get() function */
+ if (get_speed_proc)
+ cur_freq = get_speed_proc();
+ else
+ cur_freq = 0;
+
+ /* We don't, hrm... we don't really know our speed here, best
+ * is that we force a switch to whatever it was, which is
+ * probably high speed due to our suspend() routine
+ */
+ do_set_cpu_speed(policy, sleep_freq == low_freq ?
+ CPUFREQ_LOW : CPUFREQ_HIGH);
+
+ ppc_proc_freq = cur_freq * 1000ul;
+
+ no_schedule = 0;
+ return 0;
+}
+
+static struct cpufreq_driver pmac_cpufreq_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pmac_cpufreq_target,
+ .get = pmac_cpufreq_get_speed,
+ .init = pmac_cpufreq_cpu_init,
+ .suspend = pmac_cpufreq_suspend,
+ .resume = pmac_cpufreq_resume,
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
+ .attr = cpufreq_generic_attr,
+ .name = "powermac",
+};
+
+
+static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
+{
+ struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
+ "voltage-gpio");
+ struct device_node *freq_gpio_np = of_find_node_by_name(NULL,
+ "frequency-gpio");
+ struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL,
+ "slewing-done");
+ const u32 *value;
+
+ /*
+ * Check to see if it's GPIO driven or PMU only
+ *
+ * The way we extract the GPIO address is slightly hackish, but it
+ * works well enough for now. We need to abstract the whole GPIO
+ * stuff sooner or later anyway
+ */
+
+ if (volt_gpio_np)
+ voltage_gpio = read_gpio(volt_gpio_np);
+ if (freq_gpio_np)
+ frequency_gpio = read_gpio(freq_gpio_np);
+ if (slew_done_gpio_np)
+ slew_done_gpio = read_gpio(slew_done_gpio_np);
+
+ of_node_put(volt_gpio_np);
+ of_node_put(freq_gpio_np);
+ of_node_put(slew_done_gpio_np);
+
+ /* If we use the frequency GPIOs, calculate the min/max speeds based
+ * on the bus frequencies
+ */
+ if (frequency_gpio && slew_done_gpio) {
+ int lenp, rc;
+ const u32 *freqs, *ratio;
+
+ freqs = of_get_property(cpunode, "bus-frequencies", &lenp);
+ lenp /= sizeof(u32);
+ if (freqs == NULL || lenp != 2) {
+ pr_err("bus-frequencies incorrect or missing\n");
+ return 1;
+ }
+ ratio = of_get_property(cpunode, "processor-to-bus-ratio*2",
+ NULL);
+ if (ratio == NULL) {
+ pr_err("processor-to-bus-ratio*2 missing\n");
+ return 1;
+ }
+
+ /* Get the min/max bus frequencies */
+ low_freq = min(freqs[0], freqs[1]);
+ hi_freq = max(freqs[0], freqs[1]);
+
+ /* Grrrr.. It _seems_ that the device-tree is lying on the low bus
+ * frequency, it claims it to be around 84Mhz on some models while
+ * it appears to be approx. 101Mhz on all. Let's hack around here...
+ * fortunately, we don't need to be too precise
+ */
+ if (low_freq < 98000000)
+ low_freq = 101000000;
+
+ /* Convert those to CPU core clocks */
+ low_freq = (low_freq * (*ratio)) / 2000;
+ hi_freq = (hi_freq * (*ratio)) / 2000;
+
+ /* Now we get the frequencies, we read the GPIO to see what is out current
+ * speed
+ */
+ rc = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
+ cur_freq = (rc & 0x01) ? hi_freq : low_freq;
+
+ set_speed_proc = gpios_set_cpu_speed;
+ return 1;
+ }
+
+ /* If we use the PMU, look for the min & max frequencies in the
+ * device-tree
+ */
+ value = of_get_property(cpunode, "min-clock-frequency", NULL);
+ if (!value)
+ return 1;
+ low_freq = (*value) / 1000;
+ /* The PowerBook G4 12" (PowerBook6,1) has an error in the device-tree
+ * here */
+ if (low_freq < 100000)
+ low_freq *= 10;
+
+ value = of_get_property(cpunode, "max-clock-frequency", NULL);
+ if (!value)
+ return 1;
+ hi_freq = (*value) / 1000;
+ set_speed_proc = pmu_set_cpu_speed;
+ is_pmu_based = 1;
+
+ return 0;
+}
+
+static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
+{
+ struct device_node *volt_gpio_np;
+
+ if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL)
+ return 1;
+
+ volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
+ if (volt_gpio_np)
+ voltage_gpio = read_gpio(volt_gpio_np);
+ of_node_put(volt_gpio_np);
+ if (!voltage_gpio){
+ pr_err("missing cpu-vcore-select gpio\n");
+ return 1;
+ }
+
+ /* OF only reports the high frequency */
+ hi_freq = cur_freq;
+ low_freq = cur_freq/2;
+
+ /* Read actual frequency from CPU */
+ cur_freq = dfs_get_cpu_speed();
+ set_speed_proc = dfs_set_cpu_speed;
+ get_speed_proc = dfs_get_cpu_speed;
+
+ return 0;
+}
+
+static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
+{
+ struct device_node *volt_gpio_np;
+ u32 pvr;
+ const u32 *value;
+
+ if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL)
+ return 1;
+
+ hi_freq = cur_freq;
+ value = of_get_property(cpunode, "reduced-clock-frequency", NULL);
+ if (!value)
+ return 1;
+ low_freq = (*value) / 1000;
+
+ volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
+ if (volt_gpio_np)
+ voltage_gpio = read_gpio(volt_gpio_np);
+
+ of_node_put(volt_gpio_np);
+ pvr = mfspr(SPRN_PVR);
+ has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
+
+ set_speed_proc = cpu_750fx_cpu_speed;
+ get_speed_proc = cpu_750fx_get_cpu_speed;
+ cur_freq = cpu_750fx_get_cpu_speed();
+
+ return 0;
+}
+
+/* Currently, we support the following machines:
+ *
+ * - Titanium PowerBook 1Ghz (PMU based, 667Mhz & 1Ghz)
+ * - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz)
+ * - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz)
+ * - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz)
+ * - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz)
+ * - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage)
+ * - Recent MacRISC3 laptops
+ * - All new machines with 7447A CPUs
+ */
+static int __init pmac_cpufreq_setup(void)
+{
+ struct device_node *cpunode;
+ const u32 *value;
+
+ if (strstr(boot_command_line, "nocpufreq"))
+ return 0;
+
+ /* Get first CPU node */
+ cpunode = of_cpu_device_node_get(0);
+ if (!cpunode)
+ goto out;
+
+ /* Get current cpu clock freq */
+ value = of_get_property(cpunode, "clock-frequency", NULL);
+ if (!value)
+ goto out;
+ cur_freq = (*value) / 1000;
+
+ /* Check for 7447A based MacRISC3 */
+ if (of_machine_is_compatible("MacRISC3") &&
+ of_get_property(cpunode, "dynamic-power-step", NULL) &&
+ PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
+ pmac_cpufreq_init_7447A(cpunode);
+
+ /* Allow dynamic switching */
+ transition_latency = 8000000;
+ pmac_cpufreq_driver.flags &= ~CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING;
+ /* Check for other MacRISC3 machines */
+ } else if (of_machine_is_compatible("PowerBook3,4") ||
+ of_machine_is_compatible("PowerBook3,5") ||
+ of_machine_is_compatible("MacRISC3")) {
+ pmac_cpufreq_init_MacRISC3(cpunode);
+ /* Else check for iBook2 500/600 */
+ } else if (of_machine_is_compatible("PowerBook4,1")) {
+ hi_freq = cur_freq;
+ low_freq = 400000;
+ set_speed_proc = pmu_set_cpu_speed;
+ is_pmu_based = 1;
+ }
+ /* Else check for TiPb 550 */
+ else if (of_machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
+ hi_freq = cur_freq;
+ low_freq = 500000;
+ set_speed_proc = pmu_set_cpu_speed;
+ is_pmu_based = 1;
+ }
+ /* Else check for TiPb 400 & 500 */
+ else if (of_machine_is_compatible("PowerBook3,2")) {
+ /* We only know about the 400 MHz and the 500Mhz model
+ * they both have 300 MHz as low frequency
+ */
+ if (cur_freq < 350000 || cur_freq > 550000)
+ goto out;
+ hi_freq = cur_freq;
+ low_freq = 300000;
+ set_speed_proc = pmu_set_cpu_speed;
+ is_pmu_based = 1;
+ }
+ /* Else check for 750FX */
+ else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000)
+ pmac_cpufreq_init_750FX(cpunode);
+out:
+ of_node_put(cpunode);
+ if (set_speed_proc == NULL)
+ return -ENODEV;
+
+ pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq;
+ pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
+ ppc_proc_freq = cur_freq * 1000ul;
+
+ pr_info("Registering PowerMac CPU frequency driver\n");
+ pr_info("Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
+ low_freq/1000, hi_freq/1000, cur_freq/1000);
+
+ return cpufreq_register_driver(&pmac_cpufreq_driver);
+}
+
+module_init(pmac_cpufreq_setup);
+
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
new file mode 100644
index 000000000..ba9c31d98
--- /dev/null
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -0,0 +1,674 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
+ *
+ * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs,
+ * that is iMac G5 and latest single CPU desktop.
+ */
+
+#undef DEBUG
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/sections.h>
+#include <asm/cputable.h>
+#include <asm/time.h>
+#include <asm/smu.h>
+#include <asm/pmac_pfunc.h>
+
+#define DBG(fmt...) pr_debug(fmt)
+
+/* see 970FX user manual */
+
+#define SCOM_PCR 0x0aa001 /* PCR scom addr */
+
+#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */
+#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */
+#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */
+#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */
+#define PCR_SPEED_MASK 0x000e0000U /* speed mask */
+#define PCR_SPEED_SHIFT 17
+#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */
+#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */
+#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */
+#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */
+#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */
+#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */
+
+#define SCOM_PSR 0x408001 /* PSR scom addr */
+/* warning: PSR is a 64 bits register */
+#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */
+#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */
+#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */
+#define PSR_CUR_SPEED_SHIFT (56)
+
+/*
+ * The G5 only supports two frequencies (Quarter speed is not supported)
+ */
+#define CPUFREQ_HIGH 0
+#define CPUFREQ_LOW 1
+
+static struct cpufreq_frequency_table g5_cpu_freqs[] = {
+ {0, CPUFREQ_HIGH, 0},
+ {0, CPUFREQ_LOW, 0},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+/* Power mode data is an array of the 32 bits PCR values to use for
+ * the various frequencies, retrieved from the device-tree
+ */
+static int g5_pmode_cur;
+
+static void (*g5_switch_volt)(int speed_mode);
+static int (*g5_switch_freq)(int speed_mode);
+static int (*g5_query_freq)(void);
+
+static unsigned long transition_latency;
+
+#ifdef CONFIG_PMAC_SMU
+
+static const u32 *g5_pmode_data;
+static int g5_pmode_max;
+
+static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */
+static int g5_fvt_count; /* number of op. points */
+static int g5_fvt_cur; /* current op. point */
+
+/*
+ * SMU based voltage switching for Neo2 platforms
+ */
+
+static void g5_smu_switch_volt(int speed_mode)
+{
+ struct smu_simple_cmd cmd;
+
+ DECLARE_COMPLETION_ONSTACK(comp);
+ smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, smu_done_complete,
+ &comp, 'V', 'S', 'L', 'E', 'W',
+ 0xff, g5_fvt_cur+1, speed_mode);
+ wait_for_completion(&comp);
+}
+
+/*
+ * Platform function based voltage/vdnap switching for Neo2
+ */
+
+static struct pmf_function *pfunc_set_vdnap0;
+static struct pmf_function *pfunc_vdnap0_complete;
+
+static void g5_vdnap_switch_volt(int speed_mode)
+{
+ struct pmf_args args;
+ u32 slew, done = 0;
+ unsigned long timeout;
+
+ slew = (speed_mode == CPUFREQ_LOW) ? 1 : 0;
+ args.count = 1;
+ args.u[0].p = &slew;
+
+ pmf_call_one(pfunc_set_vdnap0, &args);
+
+ /* It's an irq GPIO so we should be able to just block here,
+ * I'll do that later after I've properly tested the IRQ code for
+ * platform functions
+ */
+ timeout = jiffies + HZ/10;
+ while(!time_after(jiffies, timeout)) {
+ args.count = 1;
+ args.u[0].p = &done;
+ pmf_call_one(pfunc_vdnap0_complete, &args);
+ if (done)
+ break;
+ usleep_range(1000, 1000);
+ }
+ if (done == 0)
+ pr_warn("Timeout in clock slewing !\n");
+}
+
+
+/*
+ * SCOM based frequency switching for 970FX rev3
+ */
+static int g5_scom_switch_freq(int speed_mode)
+{
+ unsigned long flags;
+ int to;
+
+ /* If frequency is going up, first ramp up the voltage */
+ if (speed_mode < g5_pmode_cur)
+ g5_switch_volt(speed_mode);
+
+ local_irq_save(flags);
+
+ /* Clear PCR high */
+ scom970_write(SCOM_PCR, 0);
+ /* Clear PCR low */
+ scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0);
+ /* Set PCR low */
+ scom970_write(SCOM_PCR, PCR_HILO_SELECT |
+ g5_pmode_data[speed_mode]);
+
+ /* Wait for completion */
+ for (to = 0; to < 10; to++) {
+ unsigned long psr = scom970_read(SCOM_PSR);
+
+ if ((psr & PSR_CMD_RECEIVED) == 0 &&
+ (((psr >> PSR_CUR_SPEED_SHIFT) ^
+ (g5_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3)
+ == 0)
+ break;
+ if (psr & PSR_CMD_COMPLETED)
+ break;
+ udelay(100);
+ }
+
+ local_irq_restore(flags);
+
+ /* If frequency is going down, last ramp the voltage */
+ if (speed_mode > g5_pmode_cur)
+ g5_switch_volt(speed_mode);
+
+ g5_pmode_cur = speed_mode;
+ ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
+
+ return 0;
+}
+
+static int g5_scom_query_freq(void)
+{
+ unsigned long psr = scom970_read(SCOM_PSR);
+ int i;
+
+ for (i = 0; i <= g5_pmode_max; i++)
+ if ((((psr >> PSR_CUR_SPEED_SHIFT) ^
+ (g5_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0)
+ break;
+ return i;
+}
+
+/*
+ * Fake voltage switching for platforms with missing support
+ */
+
+static void g5_dummy_switch_volt(int speed_mode)
+{
+}
+
+#endif /* CONFIG_PMAC_SMU */
+
+/*
+ * Platform function based voltage switching for PowerMac7,2 & 7,3
+ */
+
+static struct pmf_function *pfunc_cpu0_volt_high;
+static struct pmf_function *pfunc_cpu0_volt_low;
+static struct pmf_function *pfunc_cpu1_volt_high;
+static struct pmf_function *pfunc_cpu1_volt_low;
+
+static void g5_pfunc_switch_volt(int speed_mode)
+{
+ if (speed_mode == CPUFREQ_HIGH) {
+ if (pfunc_cpu0_volt_high)
+ pmf_call_one(pfunc_cpu0_volt_high, NULL);
+ if (pfunc_cpu1_volt_high)
+ pmf_call_one(pfunc_cpu1_volt_high, NULL);
+ } else {
+ if (pfunc_cpu0_volt_low)
+ pmf_call_one(pfunc_cpu0_volt_low, NULL);
+ if (pfunc_cpu1_volt_low)
+ pmf_call_one(pfunc_cpu1_volt_low, NULL);
+ }
+ usleep_range(10000, 10000); /* should be faster , to fix */
+}
+
+/*
+ * Platform function based frequency switching for PowerMac7,2 & 7,3
+ */
+
+static struct pmf_function *pfunc_cpu_setfreq_high;
+static struct pmf_function *pfunc_cpu_setfreq_low;
+static struct pmf_function *pfunc_cpu_getfreq;
+static struct pmf_function *pfunc_slewing_done;
+
+static int g5_pfunc_switch_freq(int speed_mode)
+{
+ struct pmf_args args;
+ u32 done = 0;
+ unsigned long timeout;
+ int rc;
+
+ DBG("g5_pfunc_switch_freq(%d)\n", speed_mode);
+
+ /* If frequency is going up, first ramp up the voltage */
+ if (speed_mode < g5_pmode_cur)
+ g5_switch_volt(speed_mode);
+
+ /* Do it */
+ if (speed_mode == CPUFREQ_HIGH)
+ rc = pmf_call_one(pfunc_cpu_setfreq_high, NULL);
+ else
+ rc = pmf_call_one(pfunc_cpu_setfreq_low, NULL);
+
+ if (rc)
+ pr_warn("pfunc switch error %d\n", rc);
+
+ /* It's an irq GPIO so we should be able to just block here,
+ * I'll do that later after I've properly tested the IRQ code for
+ * platform functions
+ */
+ timeout = jiffies + HZ/10;
+ while(!time_after(jiffies, timeout)) {
+ args.count = 1;
+ args.u[0].p = &done;
+ pmf_call_one(pfunc_slewing_done, &args);
+ if (done)
+ break;
+ usleep_range(500, 500);
+ }
+ if (done == 0)
+ pr_warn("Timeout in clock slewing !\n");
+
+ /* If frequency is going down, last ramp the voltage */
+ if (speed_mode > g5_pmode_cur)
+ g5_switch_volt(speed_mode);
+
+ g5_pmode_cur = speed_mode;
+ ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
+
+ return 0;
+}
+
+static int g5_pfunc_query_freq(void)
+{
+ struct pmf_args args;
+ u32 val = 0;
+
+ args.count = 1;
+ args.u[0].p = &val;
+ pmf_call_one(pfunc_cpu_getfreq, &args);
+ return val ? CPUFREQ_HIGH : CPUFREQ_LOW;
+}
+
+
+/*
+ * Common interface to the cpufreq core
+ */
+
+static int g5_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ return g5_switch_freq(index);
+}
+
+static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
+{
+ return g5_cpu_freqs[g5_pmode_cur].frequency;
+}
+
+static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency);
+ return 0;
+}
+
+static struct cpufreq_driver g5_cpufreq_driver = {
+ .name = "powermac",
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = g5_cpufreq_cpu_init,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = g5_cpufreq_target,
+ .get = g5_cpufreq_get_speed,
+ .attr = cpufreq_generic_attr,
+};
+
+
+#ifdef CONFIG_PMAC_SMU
+
+static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
+{
+ unsigned int psize, ssize;
+ unsigned long max_freq;
+ char *freq_method, *volt_method;
+ const u32 *valp;
+ u32 pvr_hi;
+ int use_volts_vdnap = 0;
+ int use_volts_smu = 0;
+ int rc = -ENODEV;
+
+ /* Check supported platforms */
+ if (of_machine_is_compatible("PowerMac8,1") ||
+ of_machine_is_compatible("PowerMac8,2") ||
+ of_machine_is_compatible("PowerMac9,1") ||
+ of_machine_is_compatible("PowerMac12,1"))
+ use_volts_smu = 1;
+ else if (of_machine_is_compatible("PowerMac11,2"))
+ use_volts_vdnap = 1;
+ else
+ return -ENODEV;
+
+ /* Check 970FX for now */
+ valp = of_get_property(cpunode, "cpu-version", NULL);
+ if (!valp) {
+ DBG("No cpu-version property !\n");
+ goto bail_noprops;
+ }
+ pvr_hi = (*valp) >> 16;
+ if (pvr_hi != 0x3c && pvr_hi != 0x44) {
+ pr_err("Unsupported CPU version\n");
+ goto bail_noprops;
+ }
+
+ /* Look for the powertune data in the device-tree */
+ g5_pmode_data = of_get_property(cpunode, "power-mode-data",&psize);
+ if (!g5_pmode_data) {
+ DBG("No power-mode-data !\n");
+ goto bail_noprops;
+ }
+ g5_pmode_max = psize / sizeof(u32) - 1;
+
+ if (use_volts_smu) {
+ const struct smu_sdbp_header *shdr;
+
+ /* Look for the FVT table */
+ shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL);
+ if (!shdr)
+ goto bail_noprops;
+ g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1];
+ ssize = (shdr->len * sizeof(u32)) - sizeof(*shdr);
+ g5_fvt_count = ssize / sizeof(*g5_fvt_table);
+ g5_fvt_cur = 0;
+
+ /* Sanity checking */
+ if (g5_fvt_count < 1 || g5_pmode_max < 1)
+ goto bail_noprops;
+
+ g5_switch_volt = g5_smu_switch_volt;
+ volt_method = "SMU";
+ } else if (use_volts_vdnap) {
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (root == NULL) {
+ pr_err("Can't find root of device tree\n");
+ goto bail_noprops;
+ }
+ pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0");
+ pfunc_vdnap0_complete =
+ pmf_find_function(root, "slewing-done");
+ of_node_put(root);
+ if (pfunc_set_vdnap0 == NULL ||
+ pfunc_vdnap0_complete == NULL) {
+ pr_err("Can't find required platform function\n");
+ goto bail_noprops;
+ }
+
+ g5_switch_volt = g5_vdnap_switch_volt;
+ volt_method = "GPIO";
+ } else {
+ g5_switch_volt = g5_dummy_switch_volt;
+ volt_method = "none";
+ }
+
+ /*
+ * From what I see, clock-frequency is always the maximal frequency.
+ * The current driver can not slew sysclk yet, so we really only deal
+ * with powertune steps for now. We also only implement full freq and
+ * half freq in this version. So far, I haven't yet seen a machine
+ * supporting anything else.
+ */
+ valp = of_get_property(cpunode, "clock-frequency", NULL);
+ if (!valp)
+ return -ENODEV;
+ max_freq = (*valp)/1000;
+ g5_cpu_freqs[0].frequency = max_freq;
+ g5_cpu_freqs[1].frequency = max_freq/2;
+
+ /* Set callbacks */
+ transition_latency = 12000;
+ g5_switch_freq = g5_scom_switch_freq;
+ g5_query_freq = g5_scom_query_freq;
+ freq_method = "SCOM";
+
+ /* Force apply current frequency to make sure everything is in
+ * sync (voltage is right for example). Firmware may leave us with
+ * a strange setting ...
+ */
+ g5_switch_volt(CPUFREQ_HIGH);
+ msleep(10);
+ g5_pmode_cur = -1;
+ g5_switch_freq(g5_query_freq());
+
+ pr_info("Registering G5 CPU frequency driver\n");
+ pr_info("Frequency method: %s, Voltage method: %s\n",
+ freq_method, volt_method);
+ pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ g5_cpu_freqs[1].frequency/1000,
+ g5_cpu_freqs[0].frequency/1000,
+ g5_cpu_freqs[g5_pmode_cur].frequency/1000);
+
+ rc = cpufreq_register_driver(&g5_cpufreq_driver);
+
+ /* We keep the CPU node on hold... hopefully, Apple G5 don't have
+ * hotplug CPU with a dynamic device-tree ...
+ */
+ return rc;
+
+ bail_noprops:
+ of_node_put(cpunode);
+
+ return rc;
+}
+
+#endif /* CONFIG_PMAC_SMU */
+
+
+static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
+{
+ struct device_node *cpuid = NULL, *hwclock = NULL;
+ const u8 *eeprom = NULL;
+ const u32 *valp;
+ u64 max_freq, min_freq, ih, il;
+ int has_volt = 1, rc = 0;
+
+ DBG("cpufreq: Initializing for PowerMac7,2, PowerMac7,3 and"
+ " RackMac3,1...\n");
+
+ /* Lookup the cpuid eeprom node */
+ cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0");
+ if (cpuid != NULL)
+ eeprom = of_get_property(cpuid, "cpuid", NULL);
+ if (eeprom == NULL) {
+ pr_err("Can't find cpuid EEPROM !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ /* Lookup the i2c hwclock */
+ for_each_node_by_name(hwclock, "i2c-hwclock") {
+ const char *loc = of_get_property(hwclock,
+ "hwctrl-location", NULL);
+ if (loc == NULL)
+ continue;
+ if (strcmp(loc, "CPU CLOCK"))
+ continue;
+ if (!of_get_property(hwclock, "platform-get-frequency", NULL))
+ continue;
+ break;
+ }
+ if (hwclock == NULL) {
+ pr_err("Can't find i2c clock chip !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ DBG("cpufreq: i2c clock chip found: %pOF\n", hwclock);
+
+ /* Now get all the platform functions */
+ pfunc_cpu_getfreq =
+ pmf_find_function(hwclock, "get-frequency");
+ pfunc_cpu_setfreq_high =
+ pmf_find_function(hwclock, "set-frequency-high");
+ pfunc_cpu_setfreq_low =
+ pmf_find_function(hwclock, "set-frequency-low");
+ pfunc_slewing_done =
+ pmf_find_function(hwclock, "slewing-done");
+ pfunc_cpu0_volt_high =
+ pmf_find_function(hwclock, "set-voltage-high-0");
+ pfunc_cpu0_volt_low =
+ pmf_find_function(hwclock, "set-voltage-low-0");
+ pfunc_cpu1_volt_high =
+ pmf_find_function(hwclock, "set-voltage-high-1");
+ pfunc_cpu1_volt_low =
+ pmf_find_function(hwclock, "set-voltage-low-1");
+
+ /* Check we have minimum requirements */
+ if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL ||
+ pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) {
+ pr_err("Can't find platform functions !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ /* Check that we have complete sets */
+ if (pfunc_cpu0_volt_high == NULL || pfunc_cpu0_volt_low == NULL) {
+ pmf_put_function(pfunc_cpu0_volt_high);
+ pmf_put_function(pfunc_cpu0_volt_low);
+ pfunc_cpu0_volt_high = pfunc_cpu0_volt_low = NULL;
+ has_volt = 0;
+ }
+ if (!has_volt ||
+ pfunc_cpu1_volt_high == NULL || pfunc_cpu1_volt_low == NULL) {
+ pmf_put_function(pfunc_cpu1_volt_high);
+ pmf_put_function(pfunc_cpu1_volt_low);
+ pfunc_cpu1_volt_high = pfunc_cpu1_volt_low = NULL;
+ }
+
+ /* Note: The device tree also contains a "platform-set-values"
+ * function for which I haven't quite figured out the usage. It
+ * might have to be called on init and/or wakeup, I'm not too sure
+ * but things seem to work fine without it so far ...
+ */
+
+ /* Get max frequency from device-tree */
+ valp = of_get_property(cpunode, "clock-frequency", NULL);
+ if (!valp) {
+ pr_err("Can't find CPU frequency !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ max_freq = (*valp)/1000;
+
+ /* Now calculate reduced frequency by using the cpuid input freq
+ * ratio. This requires 64 bits math unless we are willing to lose
+ * some precision
+ */
+ ih = *((u32 *)(eeprom + 0x10));
+ il = *((u32 *)(eeprom + 0x20));
+
+ /* Check for machines with no useful settings */
+ if (il == ih) {
+ pr_warn("No low frequency mode available on this model !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ min_freq = 0;
+ if (ih != 0 && il != 0)
+ min_freq = (max_freq * il) / ih;
+
+ /* Sanity check */
+ if (min_freq >= max_freq || min_freq < 1000) {
+ pr_err("Can't calculate low frequency !\n");
+ rc = -ENXIO;
+ goto bail;
+ }
+ g5_cpu_freqs[0].frequency = max_freq;
+ g5_cpu_freqs[1].frequency = min_freq;
+
+ /* Based on a measurement on Xserve G5, rounded up. */
+ transition_latency = 10 * NSEC_PER_MSEC;
+
+ /* Set callbacks */
+ g5_switch_volt = g5_pfunc_switch_volt;
+ g5_switch_freq = g5_pfunc_switch_freq;
+ g5_query_freq = g5_pfunc_query_freq;
+
+ /* Force apply current frequency to make sure everything is in
+ * sync (voltage is right for example). Firmware may leave us with
+ * a strange setting ...
+ */
+ g5_switch_volt(CPUFREQ_HIGH);
+ msleep(10);
+ g5_pmode_cur = -1;
+ g5_switch_freq(g5_query_freq());
+
+ pr_info("Registering G5 CPU frequency driver\n");
+ pr_info("Frequency method: i2c/pfunc, Voltage method: %s\n",
+ has_volt ? "i2c/pfunc" : "none");
+ pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ g5_cpu_freqs[1].frequency/1000,
+ g5_cpu_freqs[0].frequency/1000,
+ g5_cpu_freqs[g5_pmode_cur].frequency/1000);
+
+ rc = cpufreq_register_driver(&g5_cpufreq_driver);
+ bail:
+ if (rc != 0) {
+ pmf_put_function(pfunc_cpu_getfreq);
+ pmf_put_function(pfunc_cpu_setfreq_high);
+ pmf_put_function(pfunc_cpu_setfreq_low);
+ pmf_put_function(pfunc_slewing_done);
+ pmf_put_function(pfunc_cpu0_volt_high);
+ pmf_put_function(pfunc_cpu0_volt_low);
+ pmf_put_function(pfunc_cpu1_volt_high);
+ pmf_put_function(pfunc_cpu1_volt_low);
+ }
+ of_node_put(hwclock);
+ of_node_put(cpuid);
+ of_node_put(cpunode);
+
+ return rc;
+}
+
+static int __init g5_cpufreq_init(void)
+{
+ struct device_node *cpunode;
+ int rc = 0;
+
+ /* Get first CPU node */
+ cpunode = of_cpu_device_node_get(0);
+ if (cpunode == NULL) {
+ pr_err("Can't find any CPU node\n");
+ return -ENODEV;
+ }
+
+ if (of_machine_is_compatible("PowerMac7,2") ||
+ of_machine_is_compatible("PowerMac7,3") ||
+ of_machine_is_compatible("RackMac3,1"))
+ rc = g5_pm72_cpufreq_init(cpunode);
+#ifdef CONFIG_PMAC_SMU
+ else
+ rc = g5_neo2_cpufreq_init(cpunode);
+#endif /* CONFIG_PMAC_SMU */
+
+ return rc;
+}
+
+module_init(g5_cpufreq_init);
+
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
new file mode 100644
index 000000000..41eefef95
--- /dev/null
+++ b/drivers/cpufreq/powernow-k6.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This file was based upon code in Powertweak Linux (http://powertweak.sf.net)
+ * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä,
+ * Dominik Brodowski.
+ *
+ * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/ioport.h>
+#include <linux/timex.h>
+#include <linux/io.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
+
+#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long
+ as it is unused */
+
+static unsigned int busfreq; /* FSB, in 10 kHz */
+static unsigned int max_multiplier;
+
+static unsigned int param_busfreq = 0;
+static unsigned int param_max_multiplier = 0;
+
+module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO);
+MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)");
+
+module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO);
+MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz");
+
+/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
+static struct cpufreq_frequency_table clock_ratio[] = {
+ {0, 60, /* 110 -> 6.0x */ 0},
+ {0, 55, /* 011 -> 5.5x */ 0},
+ {0, 50, /* 001 -> 5.0x */ 0},
+ {0, 45, /* 000 -> 4.5x */ 0},
+ {0, 40, /* 010 -> 4.0x */ 0},
+ {0, 35, /* 111 -> 3.5x */ 0},
+ {0, 30, /* 101 -> 3.0x */ 0},
+ {0, 20, /* 100 -> 2.0x */ 0},
+ {0, 0, CPUFREQ_TABLE_END}
+};
+
+static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 };
+static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 };
+
+static const struct {
+ unsigned freq;
+ unsigned mult;
+} usual_frequency_table[] = {
+ { 350000, 35 }, // 100 * 3.5
+ { 400000, 40 }, // 100 * 4
+ { 450000, 45 }, // 100 * 4.5
+ { 475000, 50 }, // 95 * 5
+ { 500000, 50 }, // 100 * 5
+ { 506250, 45 }, // 112.5 * 4.5
+ { 533500, 55 }, // 97 * 5.5
+ { 550000, 55 }, // 100 * 5.5
+ { 562500, 50 }, // 112.5 * 5
+ { 570000, 60 }, // 95 * 6
+ { 600000, 60 }, // 100 * 6
+ { 618750, 55 }, // 112.5 * 5.5
+ { 660000, 55 }, // 120 * 5.5
+ { 675000, 60 }, // 112.5 * 6
+ { 720000, 60 }, // 120 * 6
+};
+
+#define FREQ_RANGE 3000
+
+/**
+ * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier
+ *
+ * Returns the current setting of the frequency multiplier. Core clock
+ * speed is frequency of the Front-Side Bus multiplied with this value.
+ */
+static int powernow_k6_get_cpu_multiplier(void)
+{
+ unsigned long invalue = 0;
+ u32 msrval;
+
+ local_irq_disable();
+
+ msrval = POWERNOW_IOPORT + 0x1;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
+ invalue = inl(POWERNOW_IOPORT + 0x8);
+ msrval = POWERNOW_IOPORT + 0x0;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
+
+ local_irq_enable();
+
+ return clock_ratio[register_to_index[(invalue >> 5)&7]].driver_data;
+}
+
+static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
+{
+ unsigned long outvalue, invalue;
+ unsigned long msrval;
+ unsigned long cr0;
+
+ /* we now need to transform best_i to the BVC format, see AMD#23446 */
+
+ /*
+ * The processor doesn't respond to inquiry cycles while changing the
+ * frequency, so we must disable cache.
+ */
+ local_irq_disable();
+ cr0 = read_cr0();
+ write_cr0(cr0 | X86_CR0_CD);
+ wbinvd();
+
+ outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5);
+
+ msrval = POWERNOW_IOPORT + 0x1;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
+ invalue = inl(POWERNOW_IOPORT + 0x8);
+ invalue = invalue & 0x1f;
+ outvalue = outvalue | invalue;
+ outl(outvalue, (POWERNOW_IOPORT + 0x8));
+ msrval = POWERNOW_IOPORT + 0x0;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
+
+ write_cr0(cr0);
+ local_irq_enable();
+}
+
+/**
+ * powernow_k6_target - set the PowerNow! multiplier
+ * @best_i: clock_ratio[best_i] is the target multiplier
+ *
+ * Tries to change the PowerNow! multiplier
+ */
+static int powernow_k6_target(struct cpufreq_policy *policy,
+ unsigned int best_i)
+{
+
+ if (clock_ratio[best_i].driver_data > max_multiplier) {
+ pr_err("invalid target frequency\n");
+ return -EINVAL;
+ }
+
+ powernow_k6_set_cpu_multiplier(best_i);
+
+ return 0;
+}
+
+static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *pos;
+ unsigned int i, f;
+ unsigned khz;
+
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+ max_multiplier = 0;
+ khz = cpu_khz;
+ for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) {
+ if (khz >= usual_frequency_table[i].freq - FREQ_RANGE &&
+ khz <= usual_frequency_table[i].freq + FREQ_RANGE) {
+ khz = usual_frequency_table[i].freq;
+ max_multiplier = usual_frequency_table[i].mult;
+ break;
+ }
+ }
+ if (param_max_multiplier) {
+ cpufreq_for_each_entry(pos, clock_ratio)
+ if (pos->driver_data == param_max_multiplier) {
+ max_multiplier = param_max_multiplier;
+ goto have_max_multiplier;
+ }
+ pr_err("invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
+ return -EINVAL;
+ }
+
+ if (!max_multiplier) {
+ pr_warn("unknown frequency %u, cannot determine current multiplier\n",
+ khz);
+ pr_warn("use module parameters max_multiplier and bus_frequency\n");
+ return -EOPNOTSUPP;
+ }
+
+have_max_multiplier:
+ param_max_multiplier = max_multiplier;
+
+ if (param_busfreq) {
+ if (param_busfreq >= 50000 && param_busfreq <= 150000) {
+ busfreq = param_busfreq / 10;
+ goto have_busfreq;
+ }
+ pr_err("invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
+ return -EINVAL;
+ }
+
+ busfreq = khz / max_multiplier;
+have_busfreq:
+ param_busfreq = busfreq * 10;
+
+ /* table init */
+ cpufreq_for_each_entry(pos, clock_ratio) {
+ f = pos->driver_data;
+ if (f > max_multiplier)
+ pos->frequency = CPUFREQ_ENTRY_INVALID;
+ else
+ pos->frequency = busfreq * f;
+ }
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.transition_latency = 500000;
+ policy->freq_table = clock_ratio;
+
+ return 0;
+}
+
+
+static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
+{
+ unsigned int i;
+
+ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
+ if (clock_ratio[i].driver_data == max_multiplier) {
+ struct cpufreq_freqs freqs;
+
+ freqs.old = policy->cur;
+ freqs.new = clock_ratio[i].frequency;
+ freqs.flags = 0;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ powernow_k6_target(policy, i);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+ break;
+ }
+ }
+ return 0;
+}
+
+static unsigned int powernow_k6_get(unsigned int cpu)
+{
+ unsigned int ret;
+ ret = (busfreq * powernow_k6_get_cpu_multiplier());
+ return ret;
+}
+
+static struct cpufreq_driver powernow_k6_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernow_k6_target,
+ .init = powernow_k6_cpu_init,
+ .exit = powernow_k6_cpu_exit,
+ .get = powernow_k6_get,
+ .name = "powernow-k6",
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct x86_cpu_id powernow_k6_ids[] = {
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 5, 12, NULL),
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 5, 13, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, powernow_k6_ids);
+
+/**
+ * powernow_k6_init - initializes the k6 PowerNow! CPUFreq driver
+ *
+ * Initializes the K6 PowerNow! support. Returns -ENODEV on unsupported
+ * devices, -EINVAL or -ENOMEM on problems during initiatization, and zero
+ * on success.
+ */
+static int __init powernow_k6_init(void)
+{
+ if (!x86_match_cpu(powernow_k6_ids))
+ return -ENODEV;
+
+ if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) {
+ pr_info("PowerNow IOPORT region already used\n");
+ return -EIO;
+ }
+
+ if (cpufreq_register_driver(&powernow_k6_driver)) {
+ release_region(POWERNOW_IOPORT, 16);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+/**
+ * powernow_k6_exit - unregisters AMD K6-2+/3+ PowerNow! support
+ *
+ * Unregisters AMD K6-2+ / K6-3+ PowerNow! support.
+ */
+static void __exit powernow_k6_exit(void)
+{
+ cpufreq_unregister_driver(&powernow_k6_driver);
+ release_region(POWERNOW_IOPORT, 16);
+}
+
+
+MODULE_AUTHOR("Arjan van de Ven, Dave Jones, "
+ "Dominik Brodowski <linux@brodo.de>");
+MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors.");
+MODULE_LICENSE("GPL");
+
+module_init(powernow_k6_init);
+module_exit(powernow_k6_exit);
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
new file mode 100644
index 000000000..5d515fc34
--- /dev/null
+++ b/drivers/cpufreq/powernow-k7.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD K7 Powernow driver.
+ * (C) 2003 Dave Jones on behalf of SuSE Labs.
+ *
+ * Based upon datasheets & sample CPUs kindly provided by AMD.
+ *
+ * Errata 5:
+ * CPU may fail to execute a FID/VID change in presence of interrupt.
+ * - We cli/sti on stepping A0 CPUs around the FID/VID transition.
+ * Errata 15:
+ * CPU with half frequency multipliers may hang upon wakeup from disconnect.
+ * - We disable half multipliers if ACPI is used on A0 stepping CPUs.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/dmi.h>
+#include <linux/timex.h>
+#include <linux/io.h>
+
+#include <asm/timer.h> /* Needed for recalibrate_cpu_khz() */
+#include <asm/msr.h>
+#include <asm/cpu_device_id.h>
+
+#ifdef CONFIG_X86_POWERNOW_K7_ACPI
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+#endif
+
+#include "powernow-k7.h"
+
+struct psb_s {
+ u8 signature[10];
+ u8 tableversion;
+ u8 flags;
+ u16 settlingtime;
+ u8 reserved1;
+ u8 numpst;
+};
+
+struct pst_s {
+ u32 cpuid;
+ u8 fsbspeed;
+ u8 maxfid;
+ u8 startvid;
+ u8 numpstates;
+};
+
+#ifdef CONFIG_X86_POWERNOW_K7_ACPI
+union powernow_acpi_control_t {
+ struct {
+ unsigned long fid:5,
+ vid:5,
+ sgtc:20,
+ res1:2;
+ } bits;
+ unsigned long val;
+};
+#endif
+
+/* divide by 1000 to get VCore voltage in V. */
+static const int mobile_vid_table[32] = {
+ 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650,
+ 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0,
+ 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100,
+ 1075, 1050, 1025, 1000, 975, 950, 925, 0,
+};
+
+/* divide by 10 to get FID. */
+static const int fid_codes[32] = {
+ 110, 115, 120, 125, 50, 55, 60, 65,
+ 70, 75, 80, 85, 90, 95, 100, 105,
+ 30, 190, 40, 200, 130, 135, 140, 210,
+ 150, 225, 160, 165, 170, 180, -1, -1,
+};
+
+/* This parameter is used in order to force ACPI instead of legacy method for
+ * configuration purpose.
+ */
+
+static int acpi_force;
+
+static struct cpufreq_frequency_table *powernow_table;
+
+static unsigned int can_scale_bus;
+static unsigned int can_scale_vid;
+static unsigned int minimum_speed = -1;
+static unsigned int maximum_speed;
+static unsigned int number_scales;
+static unsigned int fsb;
+static unsigned int latency;
+static char have_a0;
+
+static int check_fsb(unsigned int fsbspeed)
+{
+ int delta;
+ unsigned int f = fsb / 1000;
+
+ delta = (fsbspeed > f) ? fsbspeed - f : f - fsbspeed;
+ return delta < 5;
+}
+
+static const struct x86_cpu_id powernow_k7_cpuids[] = {
+ X86_MATCH_VENDOR_FAM(AMD, 6, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, powernow_k7_cpuids);
+
+static int check_powernow(void)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ unsigned int maxei, eax, ebx, ecx, edx;
+
+ if (!x86_match_cpu(powernow_k7_cpuids))
+ return 0;
+
+ /* Get maximum capabilities */
+ maxei = cpuid_eax(0x80000000);
+ if (maxei < 0x80000007) { /* Any powernow info ? */
+#ifdef MODULE
+ pr_info("No powernow capabilities detected\n");
+#endif
+ return 0;
+ }
+
+ if ((c->x86_model == 6) && (c->x86_stepping == 0)) {
+ pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
+ have_a0 = 1;
+ }
+
+ cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
+
+ /* Check we can actually do something before we say anything.*/
+ if (!(edx & (1 << 1 | 1 << 2)))
+ return 0;
+
+ pr_info("PowerNOW! Technology present. Can scale: ");
+
+ if (edx & 1 << 1) {
+ pr_cont("frequency");
+ can_scale_bus = 1;
+ }
+
+ if ((edx & (1 << 1 | 1 << 2)) == 0x6)
+ pr_cont(" and ");
+
+ if (edx & 1 << 2) {
+ pr_cont("voltage");
+ can_scale_vid = 1;
+ }
+
+ pr_cont("\n");
+ return 1;
+}
+
+#ifdef CONFIG_X86_POWERNOW_K7_ACPI
+static void invalidate_entry(unsigned int entry)
+{
+ powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
+}
+#endif
+
+static int get_ranges(unsigned char *pst)
+{
+ unsigned int j;
+ unsigned int speed;
+ u8 fid, vid;
+
+ powernow_table = kzalloc((sizeof(*powernow_table) *
+ (number_scales + 1)), GFP_KERNEL);
+ if (!powernow_table)
+ return -ENOMEM;
+
+ for (j = 0 ; j < number_scales; j++) {
+ fid = *pst++;
+
+ powernow_table[j].frequency = (fsb * fid_codes[fid]) / 10;
+ powernow_table[j].driver_data = fid; /* lower 8 bits */
+
+ speed = powernow_table[j].frequency;
+
+ if ((fid_codes[fid] % 10) == 5) {
+#ifdef CONFIG_X86_POWERNOW_K7_ACPI
+ if (have_a0 == 1)
+ invalidate_entry(j);
+#endif
+ }
+
+ if (speed < minimum_speed)
+ minimum_speed = speed;
+ if (speed > maximum_speed)
+ maximum_speed = speed;
+
+ vid = *pst++;
+ powernow_table[j].driver_data |= (vid << 8); /* upper 8 bits */
+
+ pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) "
+ "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
+ fid_codes[fid] % 10, speed/1000, vid,
+ mobile_vid_table[vid]/1000,
+ mobile_vid_table[vid]%1000);
+ }
+ powernow_table[number_scales].frequency = CPUFREQ_TABLE_END;
+ powernow_table[number_scales].driver_data = 0;
+
+ return 0;
+}
+
+
+static void change_FID(int fid)
+{
+ union msr_fidvidctl fidvidctl;
+
+ rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ if (fidvidctl.bits.FID != fid) {
+ fidvidctl.bits.SGTC = latency;
+ fidvidctl.bits.FID = fid;
+ fidvidctl.bits.VIDC = 0;
+ fidvidctl.bits.FIDC = 1;
+ wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ }
+}
+
+
+static void change_VID(int vid)
+{
+ union msr_fidvidctl fidvidctl;
+
+ rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ if (fidvidctl.bits.VID != vid) {
+ fidvidctl.bits.SGTC = latency;
+ fidvidctl.bits.VID = vid;
+ fidvidctl.bits.FIDC = 0;
+ fidvidctl.bits.VIDC = 1;
+ wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ }
+}
+
+
+static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ u8 fid, vid;
+ struct cpufreq_freqs freqs;
+ union msr_fidvidstatus fidvidstatus;
+ int cfid;
+
+ /* fid are the lower 8 bits of the index we stored into
+ * the cpufreq frequency table in powernow_decode_bios,
+ * vid are the upper 8 bits.
+ */
+
+ fid = powernow_table[index].driver_data & 0xFF;
+ vid = (powernow_table[index].driver_data & 0xFF00) >> 8;
+
+ rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ cfid = fidvidstatus.bits.CFID;
+ freqs.old = fsb * fid_codes[cfid] / 10;
+
+ freqs.new = powernow_table[index].frequency;
+
+ /* Now do the magic poking into the MSRs. */
+
+ if (have_a0 == 1) /* A0 errata 5 */
+ local_irq_disable();
+
+ if (freqs.old > freqs.new) {
+ /* Going down, so change FID first */
+ change_FID(fid);
+ change_VID(vid);
+ } else {
+ /* Going up, so change VID first */
+ change_VID(vid);
+ change_FID(fid);
+ }
+
+
+ if (have_a0 == 1)
+ local_irq_enable();
+
+ return 0;
+}
+
+
+#ifdef CONFIG_X86_POWERNOW_K7_ACPI
+
+static struct acpi_processor_performance *acpi_processor_perf;
+
+static int powernow_acpi_init(void)
+{
+ int i;
+ int retval = 0;
+ union powernow_acpi_control_t pc;
+
+ if (acpi_processor_perf != NULL && powernow_table != NULL) {
+ retval = -EINVAL;
+ goto err0;
+ }
+
+ acpi_processor_perf = kzalloc(sizeof(*acpi_processor_perf), GFP_KERNEL);
+ if (!acpi_processor_perf) {
+ retval = -ENOMEM;
+ goto err0;
+ }
+
+ if (!zalloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
+ GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto err05;
+ }
+
+ if (acpi_processor_register_performance(acpi_processor_perf, 0)) {
+ retval = -EIO;
+ goto err1;
+ }
+
+ if (acpi_processor_perf->control_register.space_id !=
+ ACPI_ADR_SPACE_FIXED_HARDWARE) {
+ retval = -ENODEV;
+ goto err2;
+ }
+
+ if (acpi_processor_perf->status_register.space_id !=
+ ACPI_ADR_SPACE_FIXED_HARDWARE) {
+ retval = -ENODEV;
+ goto err2;
+ }
+
+ number_scales = acpi_processor_perf->state_count;
+
+ if (number_scales < 2) {
+ retval = -ENODEV;
+ goto err2;
+ }
+
+ powernow_table = kzalloc((sizeof(*powernow_table) *
+ (number_scales + 1)), GFP_KERNEL);
+ if (!powernow_table) {
+ retval = -ENOMEM;
+ goto err2;
+ }
+
+ pc.val = (unsigned long) acpi_processor_perf->states[0].control;
+ for (i = 0; i < number_scales; i++) {
+ u8 fid, vid;
+ struct acpi_processor_px *state =
+ &acpi_processor_perf->states[i];
+ unsigned int speed, speed_mhz;
+
+ pc.val = (unsigned long) state->control;
+ pr_debug("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n",
+ i,
+ (u32) state->core_frequency,
+ (u32) state->power,
+ (u32) state->transition_latency,
+ (u32) state->control,
+ pc.bits.sgtc);
+
+ vid = pc.bits.vid;
+ fid = pc.bits.fid;
+
+ powernow_table[i].frequency = fsb * fid_codes[fid] / 10;
+ powernow_table[i].driver_data = fid; /* lower 8 bits */
+ powernow_table[i].driver_data |= (vid << 8); /* upper 8 bits */
+
+ speed = powernow_table[i].frequency;
+ speed_mhz = speed / 1000;
+
+ /* processor_perflib will multiply the MHz value by 1000 to
+ * get a KHz value (e.g. 1266000). However, powernow-k7 works
+ * with true KHz values (e.g. 1266768). To ensure that all
+ * powernow frequencies are available, we must ensure that
+ * ACPI doesn't restrict them, so we round up the MHz value
+ * to ensure that perflib's computed KHz value is greater than
+ * or equal to powernow's KHz value.
+ */
+ if (speed % 1000 > 0)
+ speed_mhz++;
+
+ if ((fid_codes[fid] % 10) == 5) {
+ if (have_a0 == 1)
+ invalidate_entry(i);
+ }
+
+ pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) "
+ "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
+ fid_codes[fid] % 10, speed_mhz, vid,
+ mobile_vid_table[vid]/1000,
+ mobile_vid_table[vid]%1000);
+
+ if (state->core_frequency != speed_mhz) {
+ state->core_frequency = speed_mhz;
+ pr_debug(" Corrected ACPI frequency to %d\n",
+ speed_mhz);
+ }
+
+ if (latency < pc.bits.sgtc)
+ latency = pc.bits.sgtc;
+
+ if (speed < minimum_speed)
+ minimum_speed = speed;
+ if (speed > maximum_speed)
+ maximum_speed = speed;
+ }
+
+ powernow_table[i].frequency = CPUFREQ_TABLE_END;
+ powernow_table[i].driver_data = 0;
+
+ /* notify BIOS that we exist */
+ acpi_processor_notify_smm(THIS_MODULE);
+
+ return 0;
+
+err2:
+ acpi_processor_unregister_performance(0);
+err1:
+ free_cpumask_var(acpi_processor_perf->shared_cpu_map);
+err05:
+ kfree(acpi_processor_perf);
+err0:
+ pr_warn("ACPI perflib can not be used on this platform\n");
+ acpi_processor_perf = NULL;
+ return retval;
+}
+#else
+static int powernow_acpi_init(void)
+{
+ pr_info("no support for ACPI processor found - please recompile your kernel with ACPI processor\n");
+ return -EINVAL;
+}
+#endif
+
+static void print_pst_entry(struct pst_s *pst, unsigned int j)
+{
+ pr_debug("PST:%d (@%p)\n", j, pst);
+ pr_debug(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n",
+ pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid);
+}
+
+static int powernow_decode_bios(int maxfid, int startvid)
+{
+ struct psb_s *psb;
+ struct pst_s *pst;
+ unsigned int i, j;
+ unsigned char *p;
+ unsigned int etuple;
+ unsigned int ret;
+
+ etuple = cpuid_eax(0x80000001);
+
+ for (i = 0xC0000; i < 0xffff0 ; i += 16) {
+
+ p = phys_to_virt(i);
+
+ if (memcmp(p, "AMDK7PNOW!", 10) == 0) {
+ pr_debug("Found PSB header at %p\n", p);
+ psb = (struct psb_s *) p;
+ pr_debug("Table version: 0x%x\n", psb->tableversion);
+ if (psb->tableversion != 0x12) {
+ pr_info("Sorry, only v1.2 tables supported right now\n");
+ return -ENODEV;
+ }
+
+ pr_debug("Flags: 0x%x\n", psb->flags);
+ if ((psb->flags & 1) == 0)
+ pr_debug("Mobile voltage regulator\n");
+ else
+ pr_debug("Desktop voltage regulator\n");
+
+ latency = psb->settlingtime;
+ if (latency < 100) {
+ pr_info("BIOS set settling time to %d microseconds. Should be at least 100. Correcting.\n",
+ latency);
+ latency = 100;
+ }
+ pr_debug("Settling Time: %d microseconds.\n",
+ psb->settlingtime);
+ pr_debug("Has %d PST tables. (Only dumping ones "
+ "relevant to this CPU).\n",
+ psb->numpst);
+
+ p += sizeof(*psb);
+
+ pst = (struct pst_s *) p;
+
+ for (j = 0; j < psb->numpst; j++) {
+ pst = (struct pst_s *) p;
+ number_scales = pst->numpstates;
+
+ if ((etuple == pst->cpuid) &&
+ check_fsb(pst->fsbspeed) &&
+ (maxfid == pst->maxfid) &&
+ (startvid == pst->startvid)) {
+ print_pst_entry(pst, j);
+ p = (char *)pst + sizeof(*pst);
+ ret = get_ranges(p);
+ return ret;
+ } else {
+ unsigned int k;
+ p = (char *)pst + sizeof(*pst);
+ for (k = 0; k < number_scales; k++)
+ p += 2;
+ }
+ }
+ pr_info("No PST tables match this cpuid (0x%x)\n",
+ etuple);
+ pr_info("This is indicative of a broken BIOS\n");
+
+ return -EINVAL;
+ }
+ p++;
+ }
+
+ return -ENODEV;
+}
+
+
+/*
+ * We use the fact that the bus frequency is somehow
+ * a multiple of 100000/3 khz, then we compute sgtc according
+ * to this multiple.
+ * That way, we match more how AMD thinks all of that work.
+ * We will then get the same kind of behaviour already tested under
+ * the "well-known" other OS.
+ */
+static int fixup_sgtc(void)
+{
+ unsigned int sgtc;
+ unsigned int m;
+
+ m = fsb / 3333;
+ if ((m % 10) >= 5)
+ m += 5;
+
+ m /= 10;
+
+ sgtc = 100 * m * latency;
+ sgtc = sgtc / 3;
+ if (sgtc > 0xfffff) {
+ pr_warn("SGTC too large %d\n", sgtc);
+ sgtc = 0xfffff;
+ }
+ return sgtc;
+}
+
+static unsigned int powernow_get(unsigned int cpu)
+{
+ union msr_fidvidstatus fidvidstatus;
+ unsigned int cfid;
+
+ if (cpu)
+ return 0;
+ rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ cfid = fidvidstatus.bits.CFID;
+
+ return fsb * fid_codes[cfid] / 10;
+}
+
+
+static int acer_cpufreq_pst(const struct dmi_system_id *d)
+{
+ pr_warn("%s laptop with broken PST tables in BIOS detected\n",
+ d->ident);
+ pr_warn("You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n");
+ pr_warn("cpufreq scaling has been disabled as a result of this\n");
+ return 0;
+}
+
+/*
+ * Some Athlon laptops have really fucked PST tables.
+ * A BIOS update is all that can save them.
+ * Mention this, and disable cpufreq.
+ */
+static const struct dmi_system_id powernow_dmi_table[] = {
+ {
+ .callback = acer_cpufreq_pst,
+ .ident = "Acer Aspire",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde Software"),
+ DMI_MATCH(DMI_BIOS_VERSION, "3A71"),
+ },
+ },
+ { }
+};
+
+static int powernow_cpu_init(struct cpufreq_policy *policy)
+{
+ union msr_fidvidstatus fidvidstatus;
+ int result;
+
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+ rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+
+ recalibrate_cpu_khz();
+
+ fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID];
+ if (!fsb) {
+ pr_warn("can not determine bus frequency\n");
+ return -EINVAL;
+ }
+ pr_debug("FSB: %3dMHz\n", fsb/1000);
+
+ if (dmi_check_system(powernow_dmi_table) || acpi_force) {
+ pr_info("PSB/PST known to be broken - trying ACPI instead\n");
+ result = powernow_acpi_init();
+ } else {
+ result = powernow_decode_bios(fidvidstatus.bits.MFID,
+ fidvidstatus.bits.SVID);
+ if (result) {
+ pr_info("Trying ACPI perflib\n");
+ maximum_speed = 0;
+ minimum_speed = -1;
+ latency = 0;
+ result = powernow_acpi_init();
+ if (result) {
+ pr_info("ACPI and legacy methods failed\n");
+ }
+ } else {
+ /* SGTC use the bus clock as timer */
+ latency = fixup_sgtc();
+ pr_info("SGTC: %d\n", latency);
+ }
+ }
+
+ if (result)
+ return result;
+
+ pr_info("Minimum speed %d MHz - Maximum speed %d MHz\n",
+ minimum_speed/1000, maximum_speed/1000);
+
+ policy->cpuinfo.transition_latency =
+ cpufreq_scale(2000000UL, fsb, latency);
+ policy->freq_table = powernow_table;
+
+ return 0;
+}
+
+static int powernow_cpu_exit(struct cpufreq_policy *policy)
+{
+#ifdef CONFIG_X86_POWERNOW_K7_ACPI
+ if (acpi_processor_perf) {
+ acpi_processor_unregister_performance(0);
+ free_cpumask_var(acpi_processor_perf->shared_cpu_map);
+ kfree(acpi_processor_perf);
+ }
+#endif
+
+ kfree(powernow_table);
+ return 0;
+}
+
+static struct cpufreq_driver powernow_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernow_target,
+ .get = powernow_get,
+#ifdef CONFIG_X86_POWERNOW_K7_ACPI
+ .bios_limit = acpi_processor_get_bios_limit,
+#endif
+ .init = powernow_cpu_init,
+ .exit = powernow_cpu_exit,
+ .name = "powernow-k7",
+ .attr = cpufreq_generic_attr,
+};
+
+static int __init powernow_init(void)
+{
+ if (check_powernow() == 0)
+ return -ENODEV;
+ return cpufreq_register_driver(&powernow_driver);
+}
+
+
+static void __exit powernow_exit(void)
+{
+ cpufreq_unregister_driver(&powernow_driver);
+}
+
+module_param(acpi_force, int, 0444);
+MODULE_PARM_DESC(acpi_force, "Force ACPI to be used.");
+
+MODULE_AUTHOR("Dave Jones");
+MODULE_DESCRIPTION("Powernow driver for AMD K7 processors.");
+MODULE_LICENSE("GPL");
+
+late_initcall(powernow_init);
+module_exit(powernow_exit);
+
diff --git a/drivers/cpufreq/powernow-k7.h b/drivers/cpufreq/powernow-k7.h
new file mode 100644
index 000000000..4bc673fae
--- /dev/null
+++ b/drivers/cpufreq/powernow-k7.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (C) 2003 Dave Jones.
+ *
+ * AMD-specific information
+ */
+
+union msr_fidvidctl {
+ struct {
+ unsigned FID:5, // 4:0
+ reserved1:3, // 7:5
+ VID:5, // 12:8
+ reserved2:3, // 15:13
+ FIDC:1, // 16
+ VIDC:1, // 17
+ reserved3:2, // 19:18
+ FIDCHGRATIO:1, // 20
+ reserved4:11, // 31-21
+ SGTC:20, // 32:51
+ reserved5:12; // 63:52
+ } bits;
+ unsigned long long val;
+};
+
+union msr_fidvidstatus {
+ struct {
+ unsigned CFID:5, // 4:0
+ reserved1:3, // 7:5
+ SFID:5, // 12:8
+ reserved2:3, // 15:13
+ MFID:5, // 20:16
+ reserved3:11, // 31:21
+ CVID:5, // 36:32
+ reserved4:3, // 39:37
+ SVID:5, // 44:40
+ reserved5:3, // 47:45
+ MVID:5, // 52:48
+ reserved6:11; // 63:53
+ } bits;
+ unsigned long long val;
+};
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
new file mode 100644
index 000000000..b10f7a1b7
--- /dev/null
+++ b/drivers/cpufreq/powernow-k8.c
@@ -0,0 +1,1221 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (c) 2003-2012 Advanced Micro Devices, Inc.
+ *
+ * Maintainer:
+ * Andreas Herrmann <herrmann.der.user@googlemail.com>
+ *
+ * Based on the powernow-k7.c module written by Dave Jones.
+ * (C) 2003 Dave Jones on behalf of SuSE Labs
+ * (C) 2004 Dominik Brodowski <linux@brodo.de>
+ * (C) 2004 Pavel Machek <pavel@ucw.cz>
+ * Based upon datasheets & sample CPUs kindly provided by AMD.
+ *
+ * Valuable input gratefully received from Dave Jones, Pavel Machek,
+ * Dominik Brodowski, Jacob Shin, and others.
+ * Originally developed by Paul Devriendt.
+ *
+ * Processor information obtained from Chapter 9 (Power and Thermal
+ * Management) of the "BIOS and Kernel Developer's Guide (BKDG) for
+ * the AMD Athlon 64 and AMD Opteron Processors" and section "2.x
+ * Power Management" in BKDGs for newer AMD CPU families.
+ *
+ * Tables for specific CPUs can be inferred from AMD's processor
+ * power and thermal data sheets, (e.g. 30417.pdf, 30430.pdf, 43375.pdf)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include <asm/msr.h>
+#include <asm/cpu_device_id.h>
+
+#include <linux/acpi.h>
+#include <linux/mutex.h>
+#include <acpi/processor.h>
+
+#define VERSION "version 2.20.00"
+#include "powernow-k8.h"
+
+/* serialize freq changes */
+static DEFINE_MUTEX(fidvid_mutex);
+
+static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
+
+static struct cpufreq_driver cpufreq_amd64_driver;
+
+/* Return a frequency in MHz, given an input fid */
+static u32 find_freq_from_fid(u32 fid)
+{
+ return 800 + (fid * 100);
+}
+
+/* Return a frequency in KHz, given an input fid */
+static u32 find_khz_freq_from_fid(u32 fid)
+{
+ return 1000 * find_freq_from_fid(fid);
+}
+
+/* Return the vco fid for an input fid
+ *
+ * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
+ * only from corresponding high fids. This returns "high" fid corresponding to
+ * "low" one.
+ */
+static u32 convert_fid_to_vco_fid(u32 fid)
+{
+ if (fid < HI_FID_TABLE_BOTTOM)
+ return 8 + (2 * fid);
+ else
+ return fid;
+}
+
+/*
+ * Return 1 if the pending bit is set. Unless we just instructed the processor
+ * to transition to a new state, seeing this bit set is really bad news.
+ */
+static int pending_bit_stuck(void)
+{
+ u32 lo, hi __always_unused;
+
+ rdmsr(MSR_FIDVID_STATUS, lo, hi);
+ return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
+}
+
+/*
+ * Update the global current fid / vid values from the status msr.
+ * Returns 1 on error.
+ */
+static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
+{
+ u32 lo, hi;
+ u32 i = 0;
+
+ do {
+ if (i++ > 10000) {
+ pr_debug("detected change pending stuck\n");
+ return 1;
+ }
+ rdmsr(MSR_FIDVID_STATUS, lo, hi);
+ } while (lo & MSR_S_LO_CHANGE_PENDING);
+
+ data->currvid = hi & MSR_S_HI_CURRENT_VID;
+ data->currfid = lo & MSR_S_LO_CURRENT_FID;
+
+ return 0;
+}
+
+/* the isochronous relief time */
+static void count_off_irt(struct powernow_k8_data *data)
+{
+ udelay((1 << data->irt) * 10);
+}
+
+/* the voltage stabilization time */
+static void count_off_vst(struct powernow_k8_data *data)
+{
+ udelay(data->vstable * VST_UNITS_20US);
+}
+
+/* need to init the control msr to a safe value (for each cpu) */
+static void fidvid_msr_init(void)
+{
+ u32 lo, hi;
+ u8 fid, vid;
+
+ rdmsr(MSR_FIDVID_STATUS, lo, hi);
+ vid = hi & MSR_S_HI_CURRENT_VID;
+ fid = lo & MSR_S_LO_CURRENT_FID;
+ lo = fid | (vid << MSR_C_LO_VID_SHIFT);
+ hi = MSR_C_HI_STP_GNT_BENIGN;
+ pr_debug("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi);
+ wrmsr(MSR_FIDVID_CTL, lo, hi);
+}
+
+/* write the new fid value along with the other control fields to the msr */
+static int write_new_fid(struct powernow_k8_data *data, u32 fid)
+{
+ u32 lo;
+ u32 savevid = data->currvid;
+ u32 i = 0;
+
+ if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) {
+ pr_err("internal error - overflow on fid write\n");
+ return 1;
+ }
+
+ lo = fid;
+ lo |= (data->currvid << MSR_C_LO_VID_SHIFT);
+ lo |= MSR_C_LO_INIT_FID_VID;
+
+ pr_debug("writing fid 0x%x, lo 0x%x, hi 0x%x\n",
+ fid, lo, data->plllock * PLL_LOCK_CONVERSION);
+
+ do {
+ wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
+ if (i++ > 100) {
+ pr_err("Hardware error - pending bit very stuck - no further pstate changes possible\n");
+ return 1;
+ }
+ } while (query_current_values_with_pending_wait(data));
+
+ count_off_irt(data);
+
+ if (savevid != data->currvid) {
+ pr_err("vid change on fid trans, old 0x%x, new 0x%x\n",
+ savevid, data->currvid);
+ return 1;
+ }
+
+ if (fid != data->currfid) {
+ pr_err("fid trans failed, fid 0x%x, curr 0x%x\n", fid,
+ data->currfid);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Write a new vid to the hardware */
+static int write_new_vid(struct powernow_k8_data *data, u32 vid)
+{
+ u32 lo;
+ u32 savefid = data->currfid;
+ int i = 0;
+
+ if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) {
+ pr_err("internal error - overflow on vid write\n");
+ return 1;
+ }
+
+ lo = data->currfid;
+ lo |= (vid << MSR_C_LO_VID_SHIFT);
+ lo |= MSR_C_LO_INIT_FID_VID;
+
+ pr_debug("writing vid 0x%x, lo 0x%x, hi 0x%x\n",
+ vid, lo, STOP_GRANT_5NS);
+
+ do {
+ wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
+ if (i++ > 100) {
+ pr_err("internal error - pending bit very stuck - no further pstate changes possible\n");
+ return 1;
+ }
+ } while (query_current_values_with_pending_wait(data));
+
+ if (savefid != data->currfid) {
+ pr_err("fid changed on vid trans, old 0x%x new 0x%x\n",
+ savefid, data->currfid);
+ return 1;
+ }
+
+ if (vid != data->currvid) {
+ pr_err("vid trans failed, vid 0x%x, curr 0x%x\n",
+ vid, data->currvid);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Reduce the vid by the max of step or reqvid.
+ * Decreasing vid codes represent increasing voltages:
+ * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off.
+ */
+static int decrease_vid_code_by_step(struct powernow_k8_data *data,
+ u32 reqvid, u32 step)
+{
+ if ((data->currvid - reqvid) > step)
+ reqvid = data->currvid - step;
+
+ if (write_new_vid(data, reqvid))
+ return 1;
+
+ count_off_vst(data);
+
+ return 0;
+}
+
+/* Change Opteron/Athlon64 fid and vid, by the 3 phases. */
+static int transition_fid_vid(struct powernow_k8_data *data,
+ u32 reqfid, u32 reqvid)
+{
+ if (core_voltage_pre_transition(data, reqvid, reqfid))
+ return 1;
+
+ if (core_frequency_transition(data, reqfid))
+ return 1;
+
+ if (core_voltage_post_transition(data, reqvid))
+ return 1;
+
+ if (query_current_values_with_pending_wait(data))
+ return 1;
+
+ if ((reqfid != data->currfid) || (reqvid != data->currvid)) {
+ pr_err("failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n",
+ smp_processor_id(),
+ reqfid, reqvid, data->currfid, data->currvid);
+ return 1;
+ }
+
+ pr_debug("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n",
+ smp_processor_id(), data->currfid, data->currvid);
+
+ return 0;
+}
+
+/* Phase 1 - core voltage transition ... setup voltage */
+static int core_voltage_pre_transition(struct powernow_k8_data *data,
+ u32 reqvid, u32 reqfid)
+{
+ u32 rvosteps = data->rvo;
+ u32 savefid = data->currfid;
+ u32 maxvid, lo __always_unused, rvomult = 1;
+
+ pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n",
+ smp_processor_id(),
+ data->currfid, data->currvid, reqvid, data->rvo);
+
+ if ((savefid < LO_FID_TABLE_TOP) && (reqfid < LO_FID_TABLE_TOP))
+ rvomult = 2;
+ rvosteps *= rvomult;
+ rdmsr(MSR_FIDVID_STATUS, lo, maxvid);
+ maxvid = 0x1f & (maxvid >> 16);
+ pr_debug("ph1 maxvid=0x%x\n", maxvid);
+ if (reqvid < maxvid) /* lower numbers are higher voltages */
+ reqvid = maxvid;
+
+ while (data->currvid > reqvid) {
+ pr_debug("ph1: curr 0x%x, req vid 0x%x\n",
+ data->currvid, reqvid);
+ if (decrease_vid_code_by_step(data, reqvid, data->vidmvs))
+ return 1;
+ }
+
+ while ((rvosteps > 0) &&
+ ((rvomult * data->rvo + data->currvid) > reqvid)) {
+ if (data->currvid == maxvid) {
+ rvosteps = 0;
+ } else {
+ pr_debug("ph1: changing vid for rvo, req 0x%x\n",
+ data->currvid - 1);
+ if (decrease_vid_code_by_step(data, data->currvid-1, 1))
+ return 1;
+ rvosteps--;
+ }
+ }
+
+ if (query_current_values_with_pending_wait(data))
+ return 1;
+
+ if (savefid != data->currfid) {
+ pr_err("ph1 err, currfid changed 0x%x\n", data->currfid);
+ return 1;
+ }
+
+ pr_debug("ph1 complete, currfid 0x%x, currvid 0x%x\n",
+ data->currfid, data->currvid);
+
+ return 0;
+}
+
+/* Phase 2 - core frequency transition */
+static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
+{
+ u32 vcoreqfid, vcocurrfid, vcofiddiff;
+ u32 fid_interval, savevid = data->currvid;
+
+ if (data->currfid == reqfid) {
+ pr_err("ph2 null fid transition 0x%x\n", data->currfid);
+ return 0;
+ }
+
+ pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n",
+ smp_processor_id(),
+ data->currfid, data->currvid, reqfid);
+
+ vcoreqfid = convert_fid_to_vco_fid(reqfid);
+ vcocurrfid = convert_fid_to_vco_fid(data->currfid);
+ vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
+ : vcoreqfid - vcocurrfid;
+
+ if ((reqfid <= LO_FID_TABLE_TOP) && (data->currfid <= LO_FID_TABLE_TOP))
+ vcofiddiff = 0;
+
+ while (vcofiddiff > 2) {
+ (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2);
+
+ if (reqfid > data->currfid) {
+ if (data->currfid > LO_FID_TABLE_TOP) {
+ if (write_new_fid(data,
+ data->currfid + fid_interval))
+ return 1;
+ } else {
+ if (write_new_fid
+ (data,
+ 2 + convert_fid_to_vco_fid(data->currfid)))
+ return 1;
+ }
+ } else {
+ if (write_new_fid(data, data->currfid - fid_interval))
+ return 1;
+ }
+
+ vcocurrfid = convert_fid_to_vco_fid(data->currfid);
+ vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
+ : vcoreqfid - vcocurrfid;
+ }
+
+ if (write_new_fid(data, reqfid))
+ return 1;
+
+ if (query_current_values_with_pending_wait(data))
+ return 1;
+
+ if (data->currfid != reqfid) {
+ pr_err("ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n",
+ data->currfid, reqfid);
+ return 1;
+ }
+
+ if (savevid != data->currvid) {
+ pr_err("ph2: vid changed, save 0x%x, curr 0x%x\n",
+ savevid, data->currvid);
+ return 1;
+ }
+
+ pr_debug("ph2 complete, currfid 0x%x, currvid 0x%x\n",
+ data->currfid, data->currvid);
+
+ return 0;
+}
+
+/* Phase 3 - core voltage transition flow ... jump to the final vid. */
+static int core_voltage_post_transition(struct powernow_k8_data *data,
+ u32 reqvid)
+{
+ u32 savefid = data->currfid;
+ u32 savereqvid = reqvid;
+
+ pr_debug("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n",
+ smp_processor_id(),
+ data->currfid, data->currvid);
+
+ if (reqvid != data->currvid) {
+ if (write_new_vid(data, reqvid))
+ return 1;
+
+ if (savefid != data->currfid) {
+ pr_err("ph3: bad fid change, save 0x%x, curr 0x%x\n",
+ savefid, data->currfid);
+ return 1;
+ }
+
+ if (data->currvid != reqvid) {
+ pr_err("ph3: failed vid transition\n, req 0x%x, curr 0x%x",
+ reqvid, data->currvid);
+ return 1;
+ }
+ }
+
+ if (query_current_values_with_pending_wait(data))
+ return 1;
+
+ if (savereqvid != data->currvid) {
+ pr_debug("ph3 failed, currvid 0x%x\n", data->currvid);
+ return 1;
+ }
+
+ if (savefid != data->currfid) {
+ pr_debug("ph3 failed, currfid changed 0x%x\n",
+ data->currfid);
+ return 1;
+ }
+
+ pr_debug("ph3 complete, currfid 0x%x, currvid 0x%x\n",
+ data->currfid, data->currvid);
+
+ return 0;
+}
+
+static const struct x86_cpu_id powernow_k8_ids[] = {
+ /* IO based frequency switching */
+ X86_MATCH_VENDOR_FAM(AMD, 0xf, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids);
+
+static void check_supported_cpu(void *_rc)
+{
+ u32 eax, ebx, ecx, edx;
+ int *rc = _rc;
+
+ *rc = -ENODEV;
+
+ eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
+
+ if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
+ if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
+ ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
+ pr_info("Processor cpuid %x not supported\n", eax);
+ return;
+ }
+
+ eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES);
+ if (eax < CPUID_FREQ_VOLT_CAPABILITIES) {
+ pr_info("No frequency change capabilities detected\n");
+ return;
+ }
+
+ cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
+ if ((edx & P_STATE_TRANSITION_CAPABLE)
+ != P_STATE_TRANSITION_CAPABLE) {
+ pr_info("Power state transitions not supported\n");
+ return;
+ }
+ *rc = 0;
+ }
+}
+
+static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
+ u8 maxvid)
+{
+ unsigned int j;
+ u8 lastfid = 0xff;
+
+ for (j = 0; j < data->numps; j++) {
+ if (pst[j].vid > LEAST_VID) {
+ pr_err(FW_BUG "vid %d invalid : 0x%x\n", j,
+ pst[j].vid);
+ return -EINVAL;
+ }
+ if (pst[j].vid < data->rvo) {
+ /* vid + rvo >= 0 */
+ pr_err(FW_BUG "0 vid exceeded with pstate %d\n", j);
+ return -ENODEV;
+ }
+ if (pst[j].vid < maxvid + data->rvo) {
+ /* vid + rvo >= maxvid */
+ pr_err(FW_BUG "maxvid exceeded with pstate %d\n", j);
+ return -ENODEV;
+ }
+ if (pst[j].fid > MAX_FID) {
+ pr_err(FW_BUG "maxfid exceeded with pstate %d\n", j);
+ return -ENODEV;
+ }
+ if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) {
+ /* Only first fid is allowed to be in "low" range */
+ pr_err(FW_BUG "two low fids - %d : 0x%x\n", j,
+ pst[j].fid);
+ return -EINVAL;
+ }
+ if (pst[j].fid < lastfid)
+ lastfid = pst[j].fid;
+ }
+ if (lastfid & 1) {
+ pr_err(FW_BUG "lastfid invalid\n");
+ return -EINVAL;
+ }
+ if (lastfid > LO_FID_TABLE_TOP)
+ pr_info(FW_BUG "first fid not from lo freq table\n");
+
+ return 0;
+}
+
+static void invalidate_entry(struct cpufreq_frequency_table *powernow_table,
+ unsigned int entry)
+{
+ powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
+}
+
+static void print_basics(struct powernow_k8_data *data)
+{
+ int j;
+ for (j = 0; j < data->numps; j++) {
+ if (data->powernow_table[j].frequency !=
+ CPUFREQ_ENTRY_INVALID) {
+ pr_info("fid 0x%x (%d MHz), vid 0x%x\n",
+ data->powernow_table[j].driver_data & 0xff,
+ data->powernow_table[j].frequency/1000,
+ data->powernow_table[j].driver_data >> 8);
+ }
+ }
+ if (data->batps)
+ pr_info("Only %d pstates on battery\n", data->batps);
+}
+
+static int fill_powernow_table(struct powernow_k8_data *data,
+ struct pst_s *pst, u8 maxvid)
+{
+ struct cpufreq_frequency_table *powernow_table;
+ unsigned int j;
+
+ if (data->batps) {
+ /* use ACPI support to get full speed on mains power */
+ pr_warn("Only %d pstates usable (use ACPI driver for full range\n",
+ data->batps);
+ data->numps = data->batps;
+ }
+
+ for (j = 1; j < data->numps; j++) {
+ if (pst[j-1].fid >= pst[j].fid) {
+ pr_err("PST out of sequence\n");
+ return -EINVAL;
+ }
+ }
+
+ if (data->numps < 2) {
+ pr_err("no p states to transition\n");
+ return -ENODEV;
+ }
+
+ if (check_pst_table(data, pst, maxvid))
+ return -EINVAL;
+
+ powernow_table = kzalloc((sizeof(*powernow_table)
+ * (data->numps + 1)), GFP_KERNEL);
+ if (!powernow_table)
+ return -ENOMEM;
+
+ for (j = 0; j < data->numps; j++) {
+ int freq;
+ powernow_table[j].driver_data = pst[j].fid; /* lower 8 bits */
+ powernow_table[j].driver_data |= (pst[j].vid << 8); /* upper 8 bits */
+ freq = find_khz_freq_from_fid(pst[j].fid);
+ powernow_table[j].frequency = freq;
+ }
+ powernow_table[data->numps].frequency = CPUFREQ_TABLE_END;
+ powernow_table[data->numps].driver_data = 0;
+
+ if (query_current_values_with_pending_wait(data)) {
+ kfree(powernow_table);
+ return -EIO;
+ }
+
+ pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
+ data->powernow_table = powernow_table;
+ if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu)
+ print_basics(data);
+
+ for (j = 0; j < data->numps; j++)
+ if ((pst[j].fid == data->currfid) &&
+ (pst[j].vid == data->currvid))
+ return 0;
+
+ pr_debug("currfid/vid do not match PST, ignoring\n");
+ return 0;
+}
+
+/* Find and validate the PSB/PST table in BIOS. */
+static int find_psb_table(struct powernow_k8_data *data)
+{
+ struct psb_s *psb;
+ unsigned int i;
+ u32 mvs;
+ u8 maxvid;
+ u32 cpst = 0;
+ u32 thiscpuid;
+
+ for (i = 0xc0000; i < 0xffff0; i += 0x10) {
+ /* Scan BIOS looking for the signature. */
+ /* It can not be at ffff0 - it is too big. */
+
+ psb = phys_to_virt(i);
+ if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0)
+ continue;
+
+ pr_debug("found PSB header at 0x%p\n", psb);
+
+ pr_debug("table vers: 0x%x\n", psb->tableversion);
+ if (psb->tableversion != PSB_VERSION_1_4) {
+ pr_err(FW_BUG "PSB table is not v1.4\n");
+ return -ENODEV;
+ }
+
+ pr_debug("flags: 0x%x\n", psb->flags1);
+ if (psb->flags1) {
+ pr_err(FW_BUG "unknown flags\n");
+ return -ENODEV;
+ }
+
+ data->vstable = psb->vstable;
+ pr_debug("voltage stabilization time: %d(*20us)\n",
+ data->vstable);
+
+ pr_debug("flags2: 0x%x\n", psb->flags2);
+ data->rvo = psb->flags2 & 3;
+ data->irt = ((psb->flags2) >> 2) & 3;
+ mvs = ((psb->flags2) >> 4) & 3;
+ data->vidmvs = 1 << mvs;
+ data->batps = ((psb->flags2) >> 6) & 3;
+
+ pr_debug("ramp voltage offset: %d\n", data->rvo);
+ pr_debug("isochronous relief time: %d\n", data->irt);
+ pr_debug("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs);
+
+ pr_debug("numpst: 0x%x\n", psb->num_tables);
+ cpst = psb->num_tables;
+ if ((psb->cpuid == 0x00000fc0) ||
+ (psb->cpuid == 0x00000fe0)) {
+ thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
+ if ((thiscpuid == 0x00000fc0) ||
+ (thiscpuid == 0x00000fe0))
+ cpst = 1;
+ }
+ if (cpst != 1) {
+ pr_err(FW_BUG "numpst must be 1\n");
+ return -ENODEV;
+ }
+
+ data->plllock = psb->plllocktime;
+ pr_debug("plllocktime: 0x%x (units 1us)\n", psb->plllocktime);
+ pr_debug("maxfid: 0x%x\n", psb->maxfid);
+ pr_debug("maxvid: 0x%x\n", psb->maxvid);
+ maxvid = psb->maxvid;
+
+ data->numps = psb->numps;
+ pr_debug("numpstates: 0x%x\n", data->numps);
+ return fill_powernow_table(data,
+ (struct pst_s *)(psb+1), maxvid);
+ }
+ /*
+ * If you see this message, complain to BIOS manufacturer. If
+ * he tells you "we do not support Linux" or some similar
+ * nonsense, remember that Windows 2000 uses the same legacy
+ * mechanism that the old Linux PSB driver uses. Tell them it
+ * is broken with Windows 2000.
+ *
+ * The reference to the AMD documentation is chapter 9 in the
+ * BIOS and Kernel Developer's Guide, which is available on
+ * www.amd.com
+ */
+ pr_err(FW_BUG "No PSB or ACPI _PSS objects\n");
+ pr_err("Make sure that your BIOS is up to date and Cool'N'Quiet support is enabled in BIOS setup\n");
+ return -ENODEV;
+}
+
+static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data,
+ unsigned int index)
+{
+ u64 control;
+
+ if (!data->acpi_data.state_count)
+ return;
+
+ control = data->acpi_data.states[index].control;
+ data->irt = (control >> IRT_SHIFT) & IRT_MASK;
+ data->rvo = (control >> RVO_SHIFT) & RVO_MASK;
+ data->exttype = (control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
+ data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK;
+ data->vidmvs = 1 << ((control >> MVS_SHIFT) & MVS_MASK);
+ data->vstable = (control >> VST_SHIFT) & VST_MASK;
+}
+
+static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
+{
+ struct cpufreq_frequency_table *powernow_table;
+ int ret_val = -ENODEV;
+ u64 control, status;
+
+ if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
+ pr_debug("register performance failed: bad ACPI data\n");
+ return -EIO;
+ }
+
+ /* verify the data contained in the ACPI structures */
+ if (data->acpi_data.state_count <= 1) {
+ pr_debug("No ACPI P-States\n");
+ goto err_out;
+ }
+
+ control = data->acpi_data.control_register.space_id;
+ status = data->acpi_data.status_register.space_id;
+
+ if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
+ (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ pr_debug("Invalid control/status registers (%llx - %llx)\n",
+ control, status);
+ goto err_out;
+ }
+
+ /* fill in data->powernow_table */
+ powernow_table = kzalloc((sizeof(*powernow_table)
+ * (data->acpi_data.state_count + 1)), GFP_KERNEL);
+ if (!powernow_table)
+ goto err_out;
+
+ /* fill in data */
+ data->numps = data->acpi_data.state_count;
+ powernow_k8_acpi_pst_values(data, 0);
+
+ ret_val = fill_powernow_table_fidvid(data, powernow_table);
+ if (ret_val)
+ goto err_out_mem;
+
+ powernow_table[data->acpi_data.state_count].frequency =
+ CPUFREQ_TABLE_END;
+ data->powernow_table = powernow_table;
+
+ if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu)
+ print_basics(data);
+
+ /* notify BIOS that we exist */
+ acpi_processor_notify_smm(THIS_MODULE);
+
+ if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
+ pr_err("unable to alloc powernow_k8_data cpumask\n");
+ ret_val = -ENOMEM;
+ goto err_out_mem;
+ }
+
+ return 0;
+
+err_out_mem:
+ kfree(powernow_table);
+
+err_out:
+ acpi_processor_unregister_performance(data->cpu);
+
+ /* data->acpi_data.state_count informs us at ->exit()
+ * whether ACPI was used */
+ data->acpi_data.state_count = 0;
+
+ return ret_val;
+}
+
+static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
+ struct cpufreq_frequency_table *powernow_table)
+{
+ int i;
+
+ for (i = 0; i < data->acpi_data.state_count; i++) {
+ u32 fid;
+ u32 vid;
+ u32 freq, index;
+ u64 status, control;
+
+ if (data->exttype) {
+ status = data->acpi_data.states[i].status;
+ fid = status & EXT_FID_MASK;
+ vid = (status >> VID_SHIFT) & EXT_VID_MASK;
+ } else {
+ control = data->acpi_data.states[i].control;
+ fid = control & FID_MASK;
+ vid = (control >> VID_SHIFT) & VID_MASK;
+ }
+
+ pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
+
+ index = fid | (vid<<8);
+ powernow_table[i].driver_data = index;
+
+ freq = find_khz_freq_from_fid(fid);
+ powernow_table[i].frequency = freq;
+
+ /* verify frequency is OK */
+ if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) {
+ pr_debug("invalid freq %u kHz, ignoring\n", freq);
+ invalidate_entry(powernow_table, i);
+ continue;
+ }
+
+ /* verify voltage is OK -
+ * BIOSs are using "off" to indicate invalid */
+ if (vid == VID_OFF) {
+ pr_debug("invalid vid %u, ignoring\n", vid);
+ invalidate_entry(powernow_table, i);
+ continue;
+ }
+
+ if (freq != (data->acpi_data.states[i].core_frequency * 1000)) {
+ pr_info("invalid freq entries %u kHz vs. %u kHz\n",
+ freq, (unsigned int)
+ (data->acpi_data.states[i].core_frequency
+ * 1000));
+ invalidate_entry(powernow_table, i);
+ continue;
+ }
+ }
+ return 0;
+}
+
+static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
+{
+ if (data->acpi_data.state_count)
+ acpi_processor_unregister_performance(data->cpu);
+ free_cpumask_var(data->acpi_data.shared_cpu_map);
+}
+
+static int get_transition_latency(struct powernow_k8_data *data)
+{
+ int max_latency = 0;
+ int i;
+ for (i = 0; i < data->acpi_data.state_count; i++) {
+ int cur_latency = data->acpi_data.states[i].transition_latency
+ + data->acpi_data.states[i].bus_master_latency;
+ if (cur_latency > max_latency)
+ max_latency = cur_latency;
+ }
+ if (max_latency == 0) {
+ pr_err(FW_WARN "Invalid zero transition latency\n");
+ max_latency = 1;
+ }
+ /* value in usecs, needs to be in nanoseconds */
+ return 1000 * max_latency;
+}
+
+/* Take a frequency, and issue the fid/vid transition command */
+static int transition_frequency_fidvid(struct powernow_k8_data *data,
+ unsigned int index,
+ struct cpufreq_policy *policy)
+{
+ u32 fid = 0;
+ u32 vid = 0;
+ int res;
+ struct cpufreq_freqs freqs;
+
+ pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
+
+ /* fid/vid correctness check for k8 */
+ /* fid are the lower 8 bits of the index we stored into
+ * the cpufreq frequency table in find_psb_table, vid
+ * are the upper 8 bits.
+ */
+ fid = data->powernow_table[index].driver_data & 0xFF;
+ vid = (data->powernow_table[index].driver_data & 0xFF00) >> 8;
+
+ pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid);
+
+ if (query_current_values_with_pending_wait(data))
+ return 1;
+
+ if ((data->currvid == vid) && (data->currfid == fid)) {
+ pr_debug("target matches current values (fid 0x%x, vid 0x%x)\n",
+ fid, vid);
+ return 0;
+ }
+
+ pr_debug("cpu %d, changing to fid 0x%x, vid 0x%x\n",
+ smp_processor_id(), fid, vid);
+ freqs.old = find_khz_freq_from_fid(data->currfid);
+ freqs.new = find_khz_freq_from_fid(fid);
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ res = transition_fid_vid(data, fid, vid);
+ cpufreq_freq_transition_end(policy, &freqs, res);
+
+ return res;
+}
+
+struct powernowk8_target_arg {
+ struct cpufreq_policy *pol;
+ unsigned newstate;
+};
+
+static long powernowk8_target_fn(void *arg)
+{
+ struct powernowk8_target_arg *pta = arg;
+ struct cpufreq_policy *pol = pta->pol;
+ unsigned newstate = pta->newstate;
+ struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
+ u32 checkfid;
+ u32 checkvid;
+ int ret;
+
+ if (!data)
+ return -EINVAL;
+
+ checkfid = data->currfid;
+ checkvid = data->currvid;
+
+ if (pending_bit_stuck()) {
+ pr_err("failing targ, change pending bit set\n");
+ return -EIO;
+ }
+
+ pr_debug("targ: cpu %d, %d kHz, min %d, max %d\n",
+ pol->cpu, data->powernow_table[newstate].frequency, pol->min,
+ pol->max);
+
+ if (query_current_values_with_pending_wait(data))
+ return -EIO;
+
+ pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
+ data->currfid, data->currvid);
+
+ if ((checkvid != data->currvid) ||
+ (checkfid != data->currfid)) {
+ pr_info("error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
+ checkfid, data->currfid,
+ checkvid, data->currvid);
+ }
+
+ mutex_lock(&fidvid_mutex);
+
+ powernow_k8_acpi_pst_values(data, newstate);
+
+ ret = transition_frequency_fidvid(data, newstate, pol);
+
+ if (ret) {
+ pr_err("transition frequency failed\n");
+ mutex_unlock(&fidvid_mutex);
+ return 1;
+ }
+ mutex_unlock(&fidvid_mutex);
+
+ pol->cur = find_khz_freq_from_fid(data->currfid);
+
+ return 0;
+}
+
+/* Driver entry point to switch to the target frequency */
+static int powernowk8_target(struct cpufreq_policy *pol, unsigned index)
+{
+ struct powernowk8_target_arg pta = { .pol = pol, .newstate = index };
+
+ return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
+}
+
+struct init_on_cpu {
+ struct powernow_k8_data *data;
+ int rc;
+};
+
+static void powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
+{
+ struct init_on_cpu *init_on_cpu = _init_on_cpu;
+
+ if (pending_bit_stuck()) {
+ pr_err("failing init, change pending bit set\n");
+ init_on_cpu->rc = -ENODEV;
+ return;
+ }
+
+ if (query_current_values_with_pending_wait(init_on_cpu->data)) {
+ init_on_cpu->rc = -ENODEV;
+ return;
+ }
+
+ fidvid_msr_init();
+
+ init_on_cpu->rc = 0;
+}
+
+#define MISSING_PSS_MSG \
+ FW_BUG "No compatible ACPI _PSS objects found.\n" \
+ FW_BUG "First, make sure Cool'N'Quiet is enabled in the BIOS.\n" \
+ FW_BUG "If that doesn't help, try upgrading your BIOS.\n"
+
+/* per CPU init entry point to the driver */
+static int powernowk8_cpu_init(struct cpufreq_policy *pol)
+{
+ struct powernow_k8_data *data;
+ struct init_on_cpu init_on_cpu;
+ int rc, cpu;
+
+ smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
+ if (rc)
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->cpu = pol->cpu;
+
+ if (powernow_k8_cpu_init_acpi(data)) {
+ /*
+ * Use the PSB BIOS structure. This is only available on
+ * an UP version, and is deprecated by AMD.
+ */
+ if (num_online_cpus() != 1) {
+ pr_err_once(MISSING_PSS_MSG);
+ goto err_out;
+ }
+ if (pol->cpu != 0) {
+ pr_err(FW_BUG "No ACPI _PSS objects for CPU other than CPU0. Complain to your BIOS vendor.\n");
+ goto err_out;
+ }
+ rc = find_psb_table(data);
+ if (rc)
+ goto err_out;
+
+ /* Take a crude guess here.
+ * That guess was in microseconds, so multiply with 1000 */
+ pol->cpuinfo.transition_latency = (
+ ((data->rvo + 8) * data->vstable * VST_UNITS_20US) +
+ ((1 << data->irt) * 30)) * 1000;
+ } else /* ACPI _PSS objects available */
+ pol->cpuinfo.transition_latency = get_transition_latency(data);
+
+ /* only run on specific CPU from here on */
+ init_on_cpu.data = data;
+ smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu,
+ &init_on_cpu, 1);
+ rc = init_on_cpu.rc;
+ if (rc != 0)
+ goto err_out_exit_acpi;
+
+ cpumask_copy(pol->cpus, topology_core_cpumask(pol->cpu));
+ data->available_cores = pol->cpus;
+ pol->freq_table = data->powernow_table;
+
+ pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
+ data->currfid, data->currvid);
+
+ /* Point all the CPUs in this policy to the same data */
+ for_each_cpu(cpu, pol->cpus)
+ per_cpu(powernow_data, cpu) = data;
+
+ return 0;
+
+err_out_exit_acpi:
+ powernow_k8_cpu_exit_acpi(data);
+
+err_out:
+ kfree(data);
+ return -ENODEV;
+}
+
+static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
+{
+ struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
+ int cpu;
+
+ if (!data)
+ return -EINVAL;
+
+ powernow_k8_cpu_exit_acpi(data);
+
+ kfree(data->powernow_table);
+ kfree(data);
+ /* pol->cpus will be empty here, use related_cpus instead. */
+ for_each_cpu(cpu, pol->related_cpus)
+ per_cpu(powernow_data, cpu) = NULL;
+
+ return 0;
+}
+
+static void query_values_on_cpu(void *_err)
+{
+ int *err = _err;
+ struct powernow_k8_data *data = __this_cpu_read(powernow_data);
+
+ *err = query_current_values_with_pending_wait(data);
+}
+
+static unsigned int powernowk8_get(unsigned int cpu)
+{
+ struct powernow_k8_data *data = per_cpu(powernow_data, cpu);
+ unsigned int khz = 0;
+ int err;
+
+ if (!data)
+ return 0;
+
+ smp_call_function_single(cpu, query_values_on_cpu, &err, true);
+ if (err)
+ goto out;
+
+ khz = find_khz_freq_from_fid(data->currfid);
+
+
+out:
+ return khz;
+}
+
+static struct cpufreq_driver cpufreq_amd64_driver = {
+ .flags = CPUFREQ_ASYNC_NOTIFICATION,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernowk8_target,
+ .bios_limit = acpi_processor_get_bios_limit,
+ .init = powernowk8_cpu_init,
+ .exit = powernowk8_cpu_exit,
+ .get = powernowk8_get,
+ .name = "powernow-k8",
+ .attr = cpufreq_generic_attr,
+};
+
+static void __request_acpi_cpufreq(void)
+{
+ const char drv[] = "acpi-cpufreq";
+ const char *cur_drv;
+
+ cur_drv = cpufreq_get_current_driver();
+ if (!cur_drv)
+ goto request;
+
+ if (strncmp(cur_drv, drv, min_t(size_t, strlen(cur_drv), strlen(drv))))
+ pr_warn("WTF driver: %s\n", cur_drv);
+
+ return;
+
+ request:
+ pr_warn("This CPU is not supported anymore, using acpi-cpufreq instead.\n");
+ request_module(drv);
+}
+
+/* driver entry point for init */
+static int powernowk8_init(void)
+{
+ unsigned int i, supported_cpus = 0;
+ int ret;
+
+ if (!x86_match_cpu(powernow_k8_ids))
+ return -ENODEV;
+
+ if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) {
+ __request_acpi_cpufreq();
+ return -ENODEV;
+ }
+
+ cpus_read_lock();
+ for_each_online_cpu(i) {
+ smp_call_function_single(i, check_supported_cpu, &ret, 1);
+ if (!ret)
+ supported_cpus++;
+ }
+
+ if (supported_cpus != num_online_cpus()) {
+ cpus_read_unlock();
+ return -ENODEV;
+ }
+ cpus_read_unlock();
+
+ ret = cpufreq_register_driver(&cpufreq_amd64_driver);
+ if (ret)
+ return ret;
+
+ pr_info("Found %d %s (%d cpu cores) (" VERSION ")\n",
+ num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
+
+ return ret;
+}
+
+/* driver entry point for term */
+static void __exit powernowk8_exit(void)
+{
+ pr_debug("exit\n");
+
+ cpufreq_unregister_driver(&cpufreq_amd64_driver);
+}
+
+MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com>");
+MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@amd.com>");
+MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver.");
+MODULE_LICENSE("GPL");
+
+late_initcall(powernowk8_init);
+module_exit(powernowk8_exit);
diff --git a/drivers/cpufreq/powernow-k8.h b/drivers/cpufreq/powernow-k8.h
new file mode 100644
index 000000000..83331ceb6
--- /dev/null
+++ b/drivers/cpufreq/powernow-k8.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (c) 2003-2006 Advanced Micro Devices, Inc.
+ */
+
+struct powernow_k8_data {
+ unsigned int cpu;
+
+ u32 numps; /* number of p-states */
+ u32 batps; /* number of p-states supported on battery */
+
+ /* these values are constant when the PSB is used to determine
+ * vid/fid pairings, but are modified during the ->target() call
+ * when ACPI is used */
+ u32 rvo; /* ramp voltage offset */
+ u32 irt; /* isochronous relief time */
+ u32 vidmvs; /* usable value calculated from mvs */
+ u32 vstable; /* voltage stabilization time, units 20 us */
+ u32 plllock; /* pll lock time, units 1 us */
+ u32 exttype; /* extended interface = 1 */
+
+ /* keep track of the current fid / vid or pstate */
+ u32 currvid;
+ u32 currfid;
+
+ /* the powernow_table includes all frequency and vid/fid pairings:
+ * fid are the lower 8 bits of the index, vid are the upper 8 bits.
+ * frequency is in kHz */
+ struct cpufreq_frequency_table *powernow_table;
+
+ /* the acpi table needs to be kept. it's only available if ACPI was
+ * used to determine valid frequency/vid/fid states */
+ struct acpi_processor_performance acpi_data;
+
+ /* we need to keep track of associated cores, but let cpufreq
+ * handle hotplug events - so just point at cpufreq pol->cpus
+ * structure */
+ struct cpumask *available_cores;
+};
+
+/* processor's cpuid instruction support */
+#define CPUID_PROCESSOR_SIGNATURE 1 /* function 1 */
+#define CPUID_XFAM 0x0ff00000 /* extended family */
+#define CPUID_XFAM_K8 0
+#define CPUID_XMOD 0x000f0000 /* extended model */
+#define CPUID_XMOD_REV_MASK 0x000c0000
+#define CPUID_XFAM_10H 0x00100000 /* family 0x10 */
+#define CPUID_USE_XFAM_XMOD 0x00000f00
+#define CPUID_GET_MAX_CAPABILITIES 0x80000000
+#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
+#define P_STATE_TRANSITION_CAPABLE 6
+
+/* Model Specific Registers for p-state transitions. MSRs are 64-bit. For */
+/* writes (wrmsr - opcode 0f 30), the register number is placed in ecx, and */
+/* the value to write is placed in edx:eax. For reads (rdmsr - opcode 0f 32), */
+/* the register number is placed in ecx, and the data is returned in edx:eax. */
+
+#define MSR_FIDVID_CTL 0xc0010041
+#define MSR_FIDVID_STATUS 0xc0010042
+
+/* Field definitions within the FID VID Low Control MSR : */
+#define MSR_C_LO_INIT_FID_VID 0x00010000
+#define MSR_C_LO_NEW_VID 0x00003f00
+#define MSR_C_LO_NEW_FID 0x0000003f
+#define MSR_C_LO_VID_SHIFT 8
+
+/* Field definitions within the FID VID High Control MSR : */
+#define MSR_C_HI_STP_GNT_TO 0x000fffff
+
+/* Field definitions within the FID VID Low Status MSR : */
+#define MSR_S_LO_CHANGE_PENDING 0x80000000 /* cleared when completed */
+#define MSR_S_LO_MAX_RAMP_VID 0x3f000000
+#define MSR_S_LO_MAX_FID 0x003f0000
+#define MSR_S_LO_START_FID 0x00003f00
+#define MSR_S_LO_CURRENT_FID 0x0000003f
+
+/* Field definitions within the FID VID High Status MSR : */
+#define MSR_S_HI_MIN_WORKING_VID 0x3f000000
+#define MSR_S_HI_MAX_WORKING_VID 0x003f0000
+#define MSR_S_HI_START_VID 0x00003f00
+#define MSR_S_HI_CURRENT_VID 0x0000003f
+#define MSR_C_HI_STP_GNT_BENIGN 0x00000001
+
+/*
+ * There are restrictions frequencies have to follow:
+ * - only 1 entry in the low fid table ( <=1.4GHz )
+ * - lowest entry in the high fid table must be >= 2 * the entry in the
+ * low fid table
+ * - lowest entry in the high fid table must be a <= 200MHz + 2 * the entry
+ * in the low fid table
+ * - the parts can only step at <= 200 MHz intervals, odd fid values are
+ * supported in revision G and later revisions.
+ * - lowest frequency must be >= interprocessor hypertransport link speed
+ * (only applies to MP systems obviously)
+ */
+
+/* fids (frequency identifiers) are arranged in 2 tables - lo and hi */
+#define LO_FID_TABLE_TOP 7 /* fid values marking the boundary */
+#define HI_FID_TABLE_BOTTOM 8 /* between the low and high tables */
+
+#define LO_VCOFREQ_TABLE_TOP 1400 /* corresponding vco frequency values */
+#define HI_VCOFREQ_TABLE_BOTTOM 1600
+
+#define MIN_FREQ_RESOLUTION 200 /* fids jump by 2 matching freq jumps by 200 */
+
+#define MAX_FID 0x2a /* Spec only gives FID values as far as 5 GHz */
+#define LEAST_VID 0x3e /* Lowest (numerically highest) useful vid value */
+
+#define MIN_FREQ 800 /* Min and max freqs, per spec */
+#define MAX_FREQ 5000
+
+#define INVALID_FID_MASK 0xffffffc0 /* not a valid fid if these bits are set */
+#define INVALID_VID_MASK 0xffffffc0 /* not a valid vid if these bits are set */
+
+#define VID_OFF 0x3f
+
+#define STOP_GRANT_5NS 1 /* min poss memory access latency for voltage change */
+
+#define PLL_LOCK_CONVERSION (1000/5) /* ms to ns, then divide by clock period */
+
+#define MAXIMUM_VID_STEPS 1 /* Current cpus only allow a single step of 25mV */
+#define VST_UNITS_20US 20 /* Voltage Stabilization Time is in units of 20us */
+
+/*
+ * Most values of interest are encoded in a single field of the _PSS
+ * entries: the "control" value.
+ */
+
+#define IRT_SHIFT 30
+#define RVO_SHIFT 28
+#define EXT_TYPE_SHIFT 27
+#define PLL_L_SHIFT 20
+#define MVS_SHIFT 18
+#define VST_SHIFT 11
+#define VID_SHIFT 6
+#define IRT_MASK 3
+#define RVO_MASK 3
+#define EXT_TYPE_MASK 1
+#define PLL_L_MASK 0x7f
+#define MVS_MASK 3
+#define VST_MASK 0x7f
+#define VID_MASK 0x1f
+#define FID_MASK 0x1f
+#define EXT_VID_MASK 0x3f
+#define EXT_FID_MASK 0x3f
+
+
+/*
+ * Version 1.4 of the PSB table. This table is constructed by BIOS and is
+ * to tell the OS's power management driver which VIDs and FIDs are
+ * supported by this particular processor.
+ * If the data in the PSB / PST is wrong, then this driver will program the
+ * wrong values into hardware, which is very likely to lead to a crash.
+ */
+
+#define PSB_ID_STRING "AMDK7PNOW!"
+#define PSB_ID_STRING_LEN 10
+
+#define PSB_VERSION_1_4 0x14
+
+struct psb_s {
+ u8 signature[10];
+ u8 tableversion;
+ u8 flags1;
+ u16 vstable;
+ u8 flags2;
+ u8 num_tables;
+ u32 cpuid;
+ u8 plllocktime;
+ u8 maxfid;
+ u8 maxvid;
+ u8 numps;
+};
+
+/* Pairs of fid/vid values are appended to the version 1.4 PSB table. */
+struct pst_s {
+ u8 fid;
+ u8 vid;
+};
+
+static int core_voltage_pre_transition(struct powernow_k8_data *data,
+ u32 reqvid, u32 regfid);
+static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid);
+static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
+
+static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
+
+static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
new file mode 100644
index 000000000..fddbd1ea1
--- /dev/null
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -0,0 +1,1166 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * POWERNV cpufreq driver for the IBM POWER processors
+ *
+ * (C) Copyright IBM 2014
+ *
+ * Author: Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>
+ */
+
+#define pr_fmt(fmt) "powernv-cpufreq: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/cpumask.h>
+#include <linux/module.h>
+#include <linux/cpufreq.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/hashtable.h>
+#include <trace/events/power.h>
+
+#include <asm/cputhreads.h>
+#include <asm/firmware.h>
+#include <asm/reg.h>
+#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
+#include <asm/opal.h>
+#include <linux/timer.h>
+
+#define POWERNV_MAX_PSTATES_ORDER 8
+#define POWERNV_MAX_PSTATES (1UL << (POWERNV_MAX_PSTATES_ORDER))
+#define PMSR_PSAFE_ENABLE (1UL << 30)
+#define PMSR_SPR_EM_DISABLE (1UL << 31)
+#define MAX_PSTATE_SHIFT 32
+#define LPSTATE_SHIFT 48
+#define GPSTATE_SHIFT 56
+#define MAX_NR_CHIPS 32
+
+#define MAX_RAMP_DOWN_TIME 5120
+/*
+ * On an idle system we want the global pstate to ramp-down from max value to
+ * min over a span of ~5 secs. Also we want it to initially ramp-down slowly and
+ * then ramp-down rapidly later on.
+ *
+ * This gives a percentage rampdown for time elapsed in milliseconds.
+ * ramp_down_percentage = ((ms * ms) >> 18)
+ * ~= 3.8 * (sec * sec)
+ *
+ * At 0 ms ramp_down_percent = 0
+ * At 5120 ms ramp_down_percent = 100
+ */
+#define ramp_down_percent(time) ((time * time) >> 18)
+
+/* Interval after which the timer is queued to bring down global pstate */
+#define GPSTATE_TIMER_INTERVAL 2000
+
+/**
+ * struct global_pstate_info - Per policy data structure to maintain history of
+ * global pstates
+ * @highest_lpstate_idx: The local pstate index from which we are
+ * ramping down
+ * @elapsed_time: Time in ms spent in ramping down from
+ * highest_lpstate_idx
+ * @last_sampled_time: Time from boot in ms when global pstates were
+ * last set
+ * @last_lpstate_idx: Last set value of local pstate and global
+ * @last_gpstate_idx: pstate in terms of cpufreq table index
+ * @timer: Is used for ramping down if cpu goes idle for
+ * a long time with global pstate held high
+ * @gpstate_lock: A spinlock to maintain synchronization between
+ * routines called by the timer handler and
+ * governer's target_index calls
+ * @policy: Associated CPUFreq policy
+ */
+struct global_pstate_info {
+ int highest_lpstate_idx;
+ unsigned int elapsed_time;
+ unsigned int last_sampled_time;
+ int last_lpstate_idx;
+ int last_gpstate_idx;
+ spinlock_t gpstate_lock;
+ struct timer_list timer;
+ struct cpufreq_policy *policy;
+};
+
+static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
+
+static DEFINE_HASHTABLE(pstate_revmap, POWERNV_MAX_PSTATES_ORDER);
+/**
+ * struct pstate_idx_revmap_data: Entry in the hashmap pstate_revmap
+ * indexed by a function of pstate id.
+ *
+ * @pstate_id: pstate id for this entry.
+ *
+ * @cpufreq_table_idx: Index into the powernv_freqs
+ * cpufreq_frequency_table for frequency
+ * corresponding to pstate_id.
+ *
+ * @hentry: hlist_node that hooks this entry into the pstate_revmap
+ * hashtable
+ */
+struct pstate_idx_revmap_data {
+ u8 pstate_id;
+ unsigned int cpufreq_table_idx;
+ struct hlist_node hentry;
+};
+
+static bool rebooting, throttled, occ_reset;
+
+static const char * const throttle_reason[] = {
+ "No throttling",
+ "Power Cap",
+ "Processor Over Temperature",
+ "Power Supply Failure",
+ "Over Current",
+ "OCC Reset"
+};
+
+enum throttle_reason_type {
+ NO_THROTTLE = 0,
+ POWERCAP,
+ CPU_OVERTEMP,
+ POWER_SUPPLY_FAILURE,
+ OVERCURRENT,
+ OCC_RESET_THROTTLE,
+ OCC_MAX_REASON
+};
+
+static struct chip {
+ unsigned int id;
+ bool throttled;
+ bool restore;
+ u8 throttle_reason;
+ cpumask_t mask;
+ struct work_struct throttle;
+ int throttle_turbo;
+ int throttle_sub_turbo;
+ int reason[OCC_MAX_REASON];
+} *chips;
+
+static int nr_chips;
+static DEFINE_PER_CPU(struct chip *, chip_info);
+
+/*
+ * Note:
+ * The set of pstates consists of contiguous integers.
+ * powernv_pstate_info stores the index of the frequency table for
+ * max, min and nominal frequencies. It also stores number of
+ * available frequencies.
+ *
+ * powernv_pstate_info.nominal indicates the index to the highest
+ * non-turbo frequency.
+ */
+static struct powernv_pstate_info {
+ unsigned int min;
+ unsigned int max;
+ unsigned int nominal;
+ unsigned int nr_pstates;
+ bool wof_enabled;
+} powernv_pstate_info;
+
+static inline u8 extract_pstate(u64 pmsr_val, unsigned int shift)
+{
+ return ((pmsr_val >> shift) & 0xFF);
+}
+
+#define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT)
+#define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT)
+#define extract_max_pstate(x) extract_pstate(x, MAX_PSTATE_SHIFT)
+
+/* Use following functions for conversions between pstate_id and index */
+
+/*
+ * idx_to_pstate : Returns the pstate id corresponding to the
+ * frequency in the cpufreq frequency table
+ * powernv_freqs indexed by @i.
+ *
+ * If @i is out of bound, this will return the pstate
+ * corresponding to the nominal frequency.
+ */
+static inline u8 idx_to_pstate(unsigned int i)
+{
+ if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
+ pr_warn_once("idx_to_pstate: index %u is out of bound\n", i);
+ return powernv_freqs[powernv_pstate_info.nominal].driver_data;
+ }
+
+ return powernv_freqs[i].driver_data;
+}
+
+/*
+ * pstate_to_idx : Returns the index in the cpufreq frequencytable
+ * powernv_freqs for the frequency whose corresponding
+ * pstate id is @pstate.
+ *
+ * If no frequency corresponding to @pstate is found,
+ * this will return the index of the nominal
+ * frequency.
+ */
+static unsigned int pstate_to_idx(u8 pstate)
+{
+ unsigned int key = pstate % POWERNV_MAX_PSTATES;
+ struct pstate_idx_revmap_data *revmap_data;
+
+ hash_for_each_possible(pstate_revmap, revmap_data, hentry, key) {
+ if (revmap_data->pstate_id == pstate)
+ return revmap_data->cpufreq_table_idx;
+ }
+
+ pr_warn_once("pstate_to_idx: pstate 0x%x not found\n", pstate);
+ return powernv_pstate_info.nominal;
+}
+
+static inline void reset_gpstates(struct cpufreq_policy *policy)
+{
+ struct global_pstate_info *gpstates = policy->driver_data;
+
+ gpstates->highest_lpstate_idx = 0;
+ gpstates->elapsed_time = 0;
+ gpstates->last_sampled_time = 0;
+ gpstates->last_lpstate_idx = 0;
+ gpstates->last_gpstate_idx = 0;
+}
+
+/*
+ * Initialize the freq table based on data obtained
+ * from the firmware passed via device-tree
+ */
+static int init_powernv_pstates(void)
+{
+ struct device_node *power_mgt;
+ int i, nr_pstates = 0;
+ const __be32 *pstate_ids, *pstate_freqs;
+ u32 len_ids, len_freqs;
+ u32 pstate_min, pstate_max, pstate_nominal;
+ u32 pstate_turbo, pstate_ultra_turbo;
+ int rc = -ENODEV;
+
+ power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
+ if (!power_mgt) {
+ pr_warn("power-mgt node not found\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) {
+ pr_warn("ibm,pstate-min node not found\n");
+ goto out;
+ }
+
+ if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) {
+ pr_warn("ibm,pstate-max node not found\n");
+ goto out;
+ }
+
+ if (of_property_read_u32(power_mgt, "ibm,pstate-nominal",
+ &pstate_nominal)) {
+ pr_warn("ibm,pstate-nominal not found\n");
+ goto out;
+ }
+
+ if (of_property_read_u32(power_mgt, "ibm,pstate-ultra-turbo",
+ &pstate_ultra_turbo)) {
+ powernv_pstate_info.wof_enabled = false;
+ goto next;
+ }
+
+ if (of_property_read_u32(power_mgt, "ibm,pstate-turbo",
+ &pstate_turbo)) {
+ powernv_pstate_info.wof_enabled = false;
+ goto next;
+ }
+
+ if (pstate_turbo == pstate_ultra_turbo)
+ powernv_pstate_info.wof_enabled = false;
+ else
+ powernv_pstate_info.wof_enabled = true;
+
+next:
+ pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
+ pstate_nominal, pstate_max);
+ pr_info("Workload Optimized Frequency is %s in the platform\n",
+ (powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
+
+ pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
+ if (!pstate_ids) {
+ pr_warn("ibm,pstate-ids not found\n");
+ goto out;
+ }
+
+ pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz",
+ &len_freqs);
+ if (!pstate_freqs) {
+ pr_warn("ibm,pstate-frequencies-mhz not found\n");
+ goto out;
+ }
+
+ if (len_ids != len_freqs) {
+ pr_warn("Entries in ibm,pstate-ids and "
+ "ibm,pstate-frequencies-mhz does not match\n");
+ }
+
+ nr_pstates = min(len_ids, len_freqs) / sizeof(u32);
+ if (!nr_pstates) {
+ pr_warn("No PStates found\n");
+ goto out;
+ }
+
+ powernv_pstate_info.nr_pstates = nr_pstates;
+ pr_debug("NR PStates %d\n", nr_pstates);
+
+ for (i = 0; i < nr_pstates; i++) {
+ u32 id = be32_to_cpu(pstate_ids[i]);
+ u32 freq = be32_to_cpu(pstate_freqs[i]);
+ struct pstate_idx_revmap_data *revmap_data;
+ unsigned int key;
+
+ pr_debug("PState id %d freq %d MHz\n", id, freq);
+ powernv_freqs[i].frequency = freq * 1000; /* kHz */
+ powernv_freqs[i].driver_data = id & 0xFF;
+
+ revmap_data = kmalloc(sizeof(*revmap_data), GFP_KERNEL);
+ if (!revmap_data) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ revmap_data->pstate_id = id & 0xFF;
+ revmap_data->cpufreq_table_idx = i;
+ key = (revmap_data->pstate_id) % POWERNV_MAX_PSTATES;
+ hash_add(pstate_revmap, &revmap_data->hentry, key);
+
+ if (id == pstate_max)
+ powernv_pstate_info.max = i;
+ if (id == pstate_nominal)
+ powernv_pstate_info.nominal = i;
+ if (id == pstate_min)
+ powernv_pstate_info.min = i;
+
+ if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
+ int j;
+
+ for (j = i - 1; j >= (int)powernv_pstate_info.max; j--)
+ powernv_freqs[j].flags = CPUFREQ_BOOST_FREQ;
+ }
+ }
+
+ /* End of list marker entry */
+ powernv_freqs[i].frequency = CPUFREQ_TABLE_END;
+
+ of_node_put(power_mgt);
+ return 0;
+out:
+ of_node_put(power_mgt);
+ return rc;
+}
+
+/* Returns the CPU frequency corresponding to the pstate_id. */
+static unsigned int pstate_id_to_freq(u8 pstate_id)
+{
+ int i;
+
+ i = pstate_to_idx(pstate_id);
+ if (i >= powernv_pstate_info.nr_pstates || i < 0) {
+ pr_warn("PState id 0x%x outside of PState table, reporting nominal id 0x%x instead\n",
+ pstate_id, idx_to_pstate(powernv_pstate_info.nominal));
+ i = powernv_pstate_info.nominal;
+ }
+
+ return powernv_freqs[i].frequency;
+}
+
+/*
+ * cpuinfo_nominal_freq_show - Show the nominal CPU frequency as indicated by
+ * the firmware
+ */
+static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
+ char *buf)
+{
+ return sprintf(buf, "%u\n",
+ powernv_freqs[powernv_pstate_info.nominal].frequency);
+}
+
+static struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
+ __ATTR_RO(cpuinfo_nominal_freq);
+
+#define SCALING_BOOST_FREQS_ATTR_INDEX 2
+
+static struct freq_attr *powernv_cpu_freq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ &cpufreq_freq_attr_cpuinfo_nominal_freq,
+ &cpufreq_freq_attr_scaling_boost_freqs,
+ NULL,
+};
+
+#define throttle_attr(name, member) \
+static ssize_t name##_show(struct cpufreq_policy *policy, char *buf) \
+{ \
+ struct chip *chip = per_cpu(chip_info, policy->cpu); \
+ \
+ return sprintf(buf, "%u\n", chip->member); \
+} \
+ \
+static struct freq_attr throttle_attr_##name = __ATTR_RO(name) \
+
+throttle_attr(unthrottle, reason[NO_THROTTLE]);
+throttle_attr(powercap, reason[POWERCAP]);
+throttle_attr(overtemp, reason[CPU_OVERTEMP]);
+throttle_attr(supply_fault, reason[POWER_SUPPLY_FAILURE]);
+throttle_attr(overcurrent, reason[OVERCURRENT]);
+throttle_attr(occ_reset, reason[OCC_RESET_THROTTLE]);
+throttle_attr(turbo_stat, throttle_turbo);
+throttle_attr(sub_turbo_stat, throttle_sub_turbo);
+
+static struct attribute *throttle_attrs[] = {
+ &throttle_attr_unthrottle.attr,
+ &throttle_attr_powercap.attr,
+ &throttle_attr_overtemp.attr,
+ &throttle_attr_supply_fault.attr,
+ &throttle_attr_overcurrent.attr,
+ &throttle_attr_occ_reset.attr,
+ &throttle_attr_turbo_stat.attr,
+ &throttle_attr_sub_turbo_stat.attr,
+ NULL,
+};
+
+static const struct attribute_group throttle_attr_grp = {
+ .name = "throttle_stats",
+ .attrs = throttle_attrs,
+};
+
+/* Helper routines */
+
+/* Access helpers to power mgt SPR */
+
+static inline unsigned long get_pmspr(unsigned long sprn)
+{
+ switch (sprn) {
+ case SPRN_PMCR:
+ return mfspr(SPRN_PMCR);
+
+ case SPRN_PMICR:
+ return mfspr(SPRN_PMICR);
+
+ case SPRN_PMSR:
+ return mfspr(SPRN_PMSR);
+ }
+ BUG();
+}
+
+static inline void set_pmspr(unsigned long sprn, unsigned long val)
+{
+ switch (sprn) {
+ case SPRN_PMCR:
+ mtspr(SPRN_PMCR, val);
+ return;
+
+ case SPRN_PMICR:
+ mtspr(SPRN_PMICR, val);
+ return;
+ }
+ BUG();
+}
+
+/*
+ * Use objects of this type to query/update
+ * pstates on a remote CPU via smp_call_function.
+ */
+struct powernv_smp_call_data {
+ unsigned int freq;
+ u8 pstate_id;
+ u8 gpstate_id;
+};
+
+/*
+ * powernv_read_cpu_freq: Reads the current frequency on this CPU.
+ *
+ * Called via smp_call_function.
+ *
+ * Note: The caller of the smp_call_function should pass an argument of
+ * the type 'struct powernv_smp_call_data *' along with this function.
+ *
+ * The current frequency on this CPU will be returned via
+ * ((struct powernv_smp_call_data *)arg)->freq;
+ */
+static void powernv_read_cpu_freq(void *arg)
+{
+ unsigned long pmspr_val;
+ struct powernv_smp_call_data *freq_data = arg;
+
+ pmspr_val = get_pmspr(SPRN_PMSR);
+ freq_data->pstate_id = extract_local_pstate(pmspr_val);
+ freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
+
+ pr_debug("cpu %d pmsr %016lX pstate_id 0x%x frequency %d kHz\n",
+ raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
+ freq_data->freq);
+}
+
+/*
+ * powernv_cpufreq_get: Returns the CPU frequency as reported by the
+ * firmware for CPU 'cpu'. This value is reported through the sysfs
+ * file cpuinfo_cur_freq.
+ */
+static unsigned int powernv_cpufreq_get(unsigned int cpu)
+{
+ struct powernv_smp_call_data freq_data;
+
+ smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
+ &freq_data, 1);
+
+ return freq_data.freq;
+}
+
+/*
+ * set_pstate: Sets the pstate on this CPU.
+ *
+ * This is called via an smp_call_function.
+ *
+ * The caller must ensure that freq_data is of the type
+ * (struct powernv_smp_call_data *) and the pstate_id which needs to be set
+ * on this CPU should be present in freq_data->pstate_id.
+ */
+static void set_pstate(void *data)
+{
+ unsigned long val;
+ struct powernv_smp_call_data *freq_data = data;
+ unsigned long pstate_ul = freq_data->pstate_id;
+ unsigned long gpstate_ul = freq_data->gpstate_id;
+
+ val = get_pmspr(SPRN_PMCR);
+ val = val & 0x0000FFFFFFFFFFFFULL;
+
+ pstate_ul = pstate_ul & 0xFF;
+ gpstate_ul = gpstate_ul & 0xFF;
+
+ /* Set both global(bits 56..63) and local(bits 48..55) PStates */
+ val = val | (gpstate_ul << 56) | (pstate_ul << 48);
+
+ pr_debug("Setting cpu %d pmcr to %016lX\n",
+ raw_smp_processor_id(), val);
+ set_pmspr(SPRN_PMCR, val);
+}
+
+/*
+ * get_nominal_index: Returns the index corresponding to the nominal
+ * pstate in the cpufreq table
+ */
+static inline unsigned int get_nominal_index(void)
+{
+ return powernv_pstate_info.nominal;
+}
+
+static void powernv_cpufreq_throttle_check(void *data)
+{
+ struct chip *chip;
+ unsigned int cpu = smp_processor_id();
+ unsigned long pmsr;
+ u8 pmsr_pmax;
+ unsigned int pmsr_pmax_idx;
+
+ pmsr = get_pmspr(SPRN_PMSR);
+ chip = this_cpu_read(chip_info);
+
+ /* Check for Pmax Capping */
+ pmsr_pmax = extract_max_pstate(pmsr);
+ pmsr_pmax_idx = pstate_to_idx(pmsr_pmax);
+ if (pmsr_pmax_idx != powernv_pstate_info.max) {
+ if (chip->throttled)
+ goto next;
+ chip->throttled = true;
+ if (pmsr_pmax_idx > powernv_pstate_info.nominal) {
+ pr_warn_once("CPU %d on Chip %u has Pmax(0x%x) reduced below that of nominal frequency(0x%x)\n",
+ cpu, chip->id, pmsr_pmax,
+ idx_to_pstate(powernv_pstate_info.nominal));
+ chip->throttle_sub_turbo++;
+ } else {
+ chip->throttle_turbo++;
+ }
+ trace_powernv_throttle(chip->id,
+ throttle_reason[chip->throttle_reason],
+ pmsr_pmax);
+ } else if (chip->throttled) {
+ chip->throttled = false;
+ trace_powernv_throttle(chip->id,
+ throttle_reason[chip->throttle_reason],
+ pmsr_pmax);
+ }
+
+ /* Check if Psafe_mode_active is set in PMSR. */
+next:
+ if (pmsr & PMSR_PSAFE_ENABLE) {
+ throttled = true;
+ pr_info("Pstate set to safe frequency\n");
+ }
+
+ /* Check if SPR_EM_DISABLE is set in PMSR */
+ if (pmsr & PMSR_SPR_EM_DISABLE) {
+ throttled = true;
+ pr_info("Frequency Control disabled from OS\n");
+ }
+
+ if (throttled) {
+ pr_info("PMSR = %16lx\n", pmsr);
+ pr_warn("CPU Frequency could be throttled\n");
+ }
+}
+
+/**
+ * calc_global_pstate - Calculate global pstate
+ * @elapsed_time: Elapsed time in milliseconds
+ * @local_pstate_idx: New local pstate
+ * @highest_lpstate_idx: pstate from which its ramping down
+ *
+ * Finds the appropriate global pstate based on the pstate from which its
+ * ramping down and the time elapsed in ramping down. It follows a quadratic
+ * equation which ensures that it reaches ramping down to pmin in 5sec.
+ */
+static inline int calc_global_pstate(unsigned int elapsed_time,
+ int highest_lpstate_idx,
+ int local_pstate_idx)
+{
+ int index_diff;
+
+ /*
+ * Using ramp_down_percent we get the percentage of rampdown
+ * that we are expecting to be dropping. Difference between
+ * highest_lpstate_idx and powernv_pstate_info.min will give a absolute
+ * number of how many pstates we will drop eventually by the end of
+ * 5 seconds, then just scale it get the number pstates to be dropped.
+ */
+ index_diff = ((int)ramp_down_percent(elapsed_time) *
+ (powernv_pstate_info.min - highest_lpstate_idx)) / 100;
+
+ /* Ensure that global pstate is >= to local pstate */
+ if (highest_lpstate_idx + index_diff >= local_pstate_idx)
+ return local_pstate_idx;
+ else
+ return highest_lpstate_idx + index_diff;
+}
+
+static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
+{
+ unsigned int timer_interval;
+
+ /*
+ * Setting up timer to fire after GPSTATE_TIMER_INTERVAL ms, But
+ * if it exceeds MAX_RAMP_DOWN_TIME ms for ramp down time.
+ * Set timer such that it fires exactly at MAX_RAMP_DOWN_TIME
+ * seconds of ramp down time.
+ */
+ if ((gpstates->elapsed_time + GPSTATE_TIMER_INTERVAL)
+ > MAX_RAMP_DOWN_TIME)
+ timer_interval = MAX_RAMP_DOWN_TIME - gpstates->elapsed_time;
+ else
+ timer_interval = GPSTATE_TIMER_INTERVAL;
+
+ mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval));
+}
+
+/**
+ * gpstate_timer_handler
+ *
+ * @t: Timer context used to fetch global pstate info struct
+ *
+ * This handler brings down the global pstate closer to the local pstate
+ * according quadratic equation. Queues a new timer if it is still not equal
+ * to local pstate
+ */
+static void gpstate_timer_handler(struct timer_list *t)
+{
+ struct global_pstate_info *gpstates = from_timer(gpstates, t, timer);
+ struct cpufreq_policy *policy = gpstates->policy;
+ int gpstate_idx, lpstate_idx;
+ unsigned long val;
+ unsigned int time_diff = jiffies_to_msecs(jiffies)
+ - gpstates->last_sampled_time;
+ struct powernv_smp_call_data freq_data;
+
+ if (!spin_trylock(&gpstates->gpstate_lock))
+ return;
+ /*
+ * If the timer has migrated to the different cpu then bring
+ * it back to one of the policy->cpus
+ */
+ if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
+ gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
+ add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
+ spin_unlock(&gpstates->gpstate_lock);
+ return;
+ }
+
+ /*
+ * If PMCR was last updated was using fast_swtich then
+ * We may have wrong in gpstate->last_lpstate_idx
+ * value. Hence, read from PMCR to get correct data.
+ */
+ val = get_pmspr(SPRN_PMCR);
+ freq_data.gpstate_id = extract_global_pstate(val);
+ freq_data.pstate_id = extract_local_pstate(val);
+ if (freq_data.gpstate_id == freq_data.pstate_id) {
+ reset_gpstates(policy);
+ spin_unlock(&gpstates->gpstate_lock);
+ return;
+ }
+
+ gpstates->last_sampled_time += time_diff;
+ gpstates->elapsed_time += time_diff;
+
+ if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
+ gpstate_idx = pstate_to_idx(freq_data.pstate_id);
+ lpstate_idx = gpstate_idx;
+ reset_gpstates(policy);
+ gpstates->highest_lpstate_idx = gpstate_idx;
+ } else {
+ lpstate_idx = pstate_to_idx(freq_data.pstate_id);
+ gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
+ gpstates->highest_lpstate_idx,
+ lpstate_idx);
+ }
+ freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
+ gpstates->last_gpstate_idx = gpstate_idx;
+ gpstates->last_lpstate_idx = lpstate_idx;
+ /*
+ * If local pstate is equal to global pstate, rampdown is over
+ * So timer is not required to be queued.
+ */
+ if (gpstate_idx != gpstates->last_lpstate_idx)
+ queue_gpstate_timer(gpstates);
+
+ set_pstate(&freq_data);
+ spin_unlock(&gpstates->gpstate_lock);
+}
+
+/*
+ * powernv_cpufreq_target_index: Sets the frequency corresponding to
+ * the cpufreq table entry indexed by new_index on the cpus in the
+ * mask policy->cpus
+ */
+static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
+ unsigned int new_index)
+{
+ struct powernv_smp_call_data freq_data;
+ unsigned int cur_msec, gpstate_idx;
+ struct global_pstate_info *gpstates = policy->driver_data;
+
+ if (unlikely(rebooting) && new_index != get_nominal_index())
+ return 0;
+
+ if (!throttled) {
+ /* we don't want to be preempted while
+ * checking if the CPU frequency has been throttled
+ */
+ preempt_disable();
+ powernv_cpufreq_throttle_check(NULL);
+ preempt_enable();
+ }
+
+ cur_msec = jiffies_to_msecs(get_jiffies_64());
+
+ freq_data.pstate_id = idx_to_pstate(new_index);
+ if (!gpstates) {
+ freq_data.gpstate_id = freq_data.pstate_id;
+ goto no_gpstate;
+ }
+
+ spin_lock(&gpstates->gpstate_lock);
+
+ if (!gpstates->last_sampled_time) {
+ gpstate_idx = new_index;
+ gpstates->highest_lpstate_idx = new_index;
+ goto gpstates_done;
+ }
+
+ if (gpstates->last_gpstate_idx < new_index) {
+ gpstates->elapsed_time += cur_msec -
+ gpstates->last_sampled_time;
+
+ /*
+ * If its has been ramping down for more than MAX_RAMP_DOWN_TIME
+ * we should be resetting all global pstate related data. Set it
+ * equal to local pstate to start fresh.
+ */
+ if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
+ reset_gpstates(policy);
+ gpstates->highest_lpstate_idx = new_index;
+ gpstate_idx = new_index;
+ } else {
+ /* Elaspsed_time is less than 5 seconds, continue to rampdown */
+ gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
+ gpstates->highest_lpstate_idx,
+ new_index);
+ }
+ } else {
+ reset_gpstates(policy);
+ gpstates->highest_lpstate_idx = new_index;
+ gpstate_idx = new_index;
+ }
+
+ /*
+ * If local pstate is equal to global pstate, rampdown is over
+ * So timer is not required to be queued.
+ */
+ if (gpstate_idx != new_index)
+ queue_gpstate_timer(gpstates);
+ else
+ del_timer_sync(&gpstates->timer);
+
+gpstates_done:
+ freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
+ gpstates->last_sampled_time = cur_msec;
+ gpstates->last_gpstate_idx = gpstate_idx;
+ gpstates->last_lpstate_idx = new_index;
+
+ spin_unlock(&gpstates->gpstate_lock);
+
+no_gpstate:
+ /*
+ * Use smp_call_function to send IPI and execute the
+ * mtspr on target CPU. We could do that without IPI
+ * if current CPU is within policy->cpus (core)
+ */
+ smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
+ return 0;
+}
+
+static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ int base, i;
+ struct kernfs_node *kn;
+ struct global_pstate_info *gpstates;
+
+ base = cpu_first_thread_sibling(policy->cpu);
+
+ for (i = 0; i < threads_per_core; i++)
+ cpumask_set_cpu(base + i, policy->cpus);
+
+ kn = kernfs_find_and_get(policy->kobj.sd, throttle_attr_grp.name);
+ if (!kn) {
+ int ret;
+
+ ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp);
+ if (ret) {
+ pr_info("Failed to create throttle stats directory for cpu %d\n",
+ policy->cpu);
+ return ret;
+ }
+ } else {
+ kernfs_put(kn);
+ }
+
+ policy->freq_table = powernv_freqs;
+ policy->fast_switch_possible = true;
+
+ if (pvr_version_is(PVR_POWER9))
+ return 0;
+
+ /* Initialise Gpstate ramp-down timer only on POWER8 */
+ gpstates = kzalloc(sizeof(*gpstates), GFP_KERNEL);
+ if (!gpstates)
+ return -ENOMEM;
+
+ policy->driver_data = gpstates;
+
+ /* initialize timer */
+ gpstates->policy = policy;
+ timer_setup(&gpstates->timer, gpstate_timer_handler,
+ TIMER_PINNED | TIMER_DEFERRABLE);
+ gpstates->timer.expires = jiffies +
+ msecs_to_jiffies(GPSTATE_TIMER_INTERVAL);
+ spin_lock_init(&gpstates->gpstate_lock);
+
+ return 0;
+}
+
+static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct powernv_smp_call_data freq_data;
+ struct global_pstate_info *gpstates = policy->driver_data;
+
+ freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min);
+ freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min);
+ smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
+ if (gpstates)
+ del_timer_sync(&gpstates->timer);
+
+ kfree(policy->driver_data);
+
+ return 0;
+}
+
+static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+{
+ int cpu;
+ struct cpufreq_policy *cpu_policy;
+
+ rebooting = true;
+ for_each_online_cpu(cpu) {
+ cpu_policy = cpufreq_cpu_get(cpu);
+ if (!cpu_policy)
+ continue;
+ powernv_cpufreq_target_index(cpu_policy, get_nominal_index());
+ cpufreq_cpu_put(cpu_policy);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block powernv_cpufreq_reboot_nb = {
+ .notifier_call = powernv_cpufreq_reboot_notifier,
+};
+
+static void powernv_cpufreq_work_fn(struct work_struct *work)
+{
+ struct chip *chip = container_of(work, struct chip, throttle);
+ struct cpufreq_policy *policy;
+ unsigned int cpu;
+ cpumask_t mask;
+
+ cpus_read_lock();
+ cpumask_and(&mask, &chip->mask, cpu_online_mask);
+ smp_call_function_any(&mask,
+ powernv_cpufreq_throttle_check, NULL, 0);
+
+ if (!chip->restore)
+ goto out;
+
+ chip->restore = false;
+ for_each_cpu(cpu, &mask) {
+ int index;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ continue;
+ index = cpufreq_table_find_index_c(policy, policy->cur, false);
+ powernv_cpufreq_target_index(policy, index);
+ cpumask_andnot(&mask, &mask, policy->cpus);
+ cpufreq_cpu_put(policy);
+ }
+out:
+ cpus_read_unlock();
+}
+
+static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
+ unsigned long msg_type, void *_msg)
+{
+ struct opal_msg *msg = _msg;
+ struct opal_occ_msg omsg;
+ int i;
+
+ if (msg_type != OPAL_MSG_OCC)
+ return 0;
+
+ omsg.type = be64_to_cpu(msg->params[0]);
+
+ switch (omsg.type) {
+ case OCC_RESET:
+ occ_reset = true;
+ pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n");
+ /*
+ * powernv_cpufreq_throttle_check() is called in
+ * target() callback which can detect the throttle state
+ * for governors like ondemand.
+ * But static governors will not call target() often thus
+ * report throttling here.
+ */
+ if (!throttled) {
+ throttled = true;
+ pr_warn("CPU frequency is throttled for duration\n");
+ }
+
+ break;
+ case OCC_LOAD:
+ pr_info("OCC Loading, CPU frequency is throttled until OCC is started\n");
+ break;
+ case OCC_THROTTLE:
+ omsg.chip = be64_to_cpu(msg->params[1]);
+ omsg.throttle_status = be64_to_cpu(msg->params[2]);
+
+ if (occ_reset) {
+ occ_reset = false;
+ throttled = false;
+ pr_info("OCC Active, CPU frequency is no longer throttled\n");
+
+ for (i = 0; i < nr_chips; i++) {
+ chips[i].restore = true;
+ schedule_work(&chips[i].throttle);
+ }
+
+ return 0;
+ }
+
+ for (i = 0; i < nr_chips; i++)
+ if (chips[i].id == omsg.chip)
+ break;
+
+ if (omsg.throttle_status >= 0 &&
+ omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS) {
+ chips[i].throttle_reason = omsg.throttle_status;
+ chips[i].reason[omsg.throttle_status]++;
+ }
+
+ if (!omsg.throttle_status)
+ chips[i].restore = true;
+
+ schedule_work(&chips[i].throttle);
+ }
+ return 0;
+}
+
+static struct notifier_block powernv_cpufreq_opal_nb = {
+ .notifier_call = powernv_cpufreq_occ_msg,
+ .next = NULL,
+ .priority = 0,
+};
+
+static unsigned int powernv_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ int index;
+ struct powernv_smp_call_data freq_data;
+
+ index = cpufreq_table_find_index_dl(policy, target_freq, false);
+ freq_data.pstate_id = powernv_freqs[index].driver_data;
+ freq_data.gpstate_id = powernv_freqs[index].driver_data;
+ set_pstate(&freq_data);
+
+ return powernv_freqs[index].frequency;
+}
+
+static struct cpufreq_driver powernv_cpufreq_driver = {
+ .name = "powernv-cpufreq",
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = powernv_cpufreq_cpu_init,
+ .exit = powernv_cpufreq_cpu_exit,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernv_cpufreq_target_index,
+ .fast_switch = powernv_fast_switch,
+ .get = powernv_cpufreq_get,
+ .attr = powernv_cpu_freq_attr,
+};
+
+static int init_chip_info(void)
+{
+ unsigned int *chip;
+ unsigned int cpu, i;
+ unsigned int prev_chip_id = UINT_MAX;
+ cpumask_t *chip_cpu_mask;
+ int ret = 0;
+
+ chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ /* Allocate a chip cpu mask large enough to fit mask for all chips */
+ chip_cpu_mask = kcalloc(MAX_NR_CHIPS, sizeof(cpumask_t), GFP_KERNEL);
+ if (!chip_cpu_mask) {
+ ret = -ENOMEM;
+ goto free_and_return;
+ }
+
+ for_each_possible_cpu(cpu) {
+ unsigned int id = cpu_to_chip_id(cpu);
+
+ if (prev_chip_id != id) {
+ prev_chip_id = id;
+ chip[nr_chips++] = id;
+ }
+ cpumask_set_cpu(cpu, &chip_cpu_mask[nr_chips-1]);
+ }
+
+ chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
+ if (!chips) {
+ ret = -ENOMEM;
+ goto out_free_chip_cpu_mask;
+ }
+
+ for (i = 0; i < nr_chips; i++) {
+ chips[i].id = chip[i];
+ cpumask_copy(&chips[i].mask, &chip_cpu_mask[i]);
+ INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
+ for_each_cpu(cpu, &chips[i].mask)
+ per_cpu(chip_info, cpu) = &chips[i];
+ }
+
+out_free_chip_cpu_mask:
+ kfree(chip_cpu_mask);
+free_and_return:
+ kfree(chip);
+ return ret;
+}
+
+static inline void clean_chip_info(void)
+{
+ int i;
+
+ /* flush any pending work items */
+ if (chips)
+ for (i = 0; i < nr_chips; i++)
+ cancel_work_sync(&chips[i].throttle);
+ kfree(chips);
+}
+
+static inline void unregister_all_notifiers(void)
+{
+ opal_message_notifier_unregister(OPAL_MSG_OCC,
+ &powernv_cpufreq_opal_nb);
+ unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
+}
+
+static int __init powernv_cpufreq_init(void)
+{
+ int rc = 0;
+
+ /* Don't probe on pseries (guest) platforms */
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ return -ENODEV;
+
+ /* Discover pstates from device tree and init */
+ rc = init_powernv_pstates();
+ if (rc)
+ goto out;
+
+ /* Populate chip info */
+ rc = init_chip_info();
+ if (rc)
+ goto out;
+
+ if (powernv_pstate_info.wof_enabled)
+ powernv_cpufreq_driver.boost_enabled = true;
+ else
+ powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL;
+
+ rc = cpufreq_register_driver(&powernv_cpufreq_driver);
+ if (rc) {
+ pr_info("Failed to register the cpufreq driver (%d)\n", rc);
+ goto cleanup;
+ }
+
+ if (powernv_pstate_info.wof_enabled)
+ cpufreq_enable_boost_support();
+
+ register_reboot_notifier(&powernv_cpufreq_reboot_nb);
+ opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
+
+ return 0;
+cleanup:
+ clean_chip_info();
+out:
+ pr_info("Platform driver disabled. System does not support PState control\n");
+ return rc;
+}
+module_init(powernv_cpufreq_init);
+
+static void __exit powernv_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&powernv_cpufreq_driver);
+ unregister_all_notifiers();
+ clean_chip_info();
+}
+module_exit(powernv_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
new file mode 100644
index 000000000..e3313ce63
--- /dev/null
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * cpufreq driver for the cell processor
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include <asm/machdep.h>
+#include <asm/cell-regs.h>
+
+#include "ppc_cbe_cpufreq.h"
+
+/* the CBE supports an 8 step frequency scaling */
+static struct cpufreq_frequency_table cbe_freqs[] = {
+ {0, 1, 0},
+ {0, 2, 0},
+ {0, 3, 0},
+ {0, 4, 0},
+ {0, 5, 0},
+ {0, 6, 0},
+ {0, 8, 0},
+ {0, 10, 0},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+/*
+ * hardware specific functions
+ */
+
+static int set_pmode(unsigned int cpu, unsigned int slow_mode)
+{
+ int rc;
+
+ if (cbe_cpufreq_has_pmi)
+ rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
+ else
+ rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
+
+ pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
+
+ return rc;
+}
+
+/*
+ * cpufreq functions
+ */
+
+static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *pos;
+ const u32 *max_freqp;
+ u32 max_freq;
+ int cur_pmode;
+ struct device_node *cpu;
+
+ cpu = of_get_cpu_node(policy->cpu, NULL);
+
+ if (!cpu)
+ return -ENODEV;
+
+ pr_debug("init cpufreq on CPU %d\n", policy->cpu);
+
+ /*
+ * Let's check we can actually get to the CELL regs
+ */
+ if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
+ !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
+ pr_info("invalid CBE regs pointers for cpufreq\n");
+ of_node_put(cpu);
+ return -EINVAL;
+ }
+
+ max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+
+ of_node_put(cpu);
+
+ if (!max_freqp)
+ return -EINVAL;
+
+ /* we need the freq in kHz */
+ max_freq = *max_freqp / 1000;
+
+ pr_debug("max clock-frequency is at %u kHz\n", max_freq);
+ pr_debug("initializing frequency table\n");
+
+ /* initialize frequency table */
+ cpufreq_for_each_entry(pos, cbe_freqs) {
+ pos->frequency = max_freq / pos->driver_data;
+ pr_debug("%d: %d\n", (int)(pos - cbe_freqs), pos->frequency);
+ }
+
+ /* if DEBUG is enabled set_pmode() measures the latency
+ * of a transition */
+ policy->cpuinfo.transition_latency = 25000;
+
+ cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
+ pr_debug("current pmode is at %d\n",cur_pmode);
+
+ policy->cur = cbe_freqs[cur_pmode].frequency;
+
+#ifdef CONFIG_SMP
+ cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
+#endif
+
+ policy->freq_table = cbe_freqs;
+ cbe_cpufreq_pmi_policy_init(policy);
+ return 0;
+}
+
+static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cbe_cpufreq_pmi_policy_exit(policy);
+ return 0;
+}
+
+static int cbe_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int cbe_pmode_new)
+{
+ pr_debug("setting frequency for cpu %d to %d kHz, " \
+ "1/%d of max frequency\n",
+ policy->cpu,
+ cbe_freqs[cbe_pmode_new].frequency,
+ cbe_freqs[cbe_pmode_new].driver_data);
+
+ return set_pmode(policy->cpu, cbe_pmode_new);
+}
+
+static struct cpufreq_driver cbe_cpufreq_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cbe_cpufreq_target,
+ .init = cbe_cpufreq_cpu_init,
+ .exit = cbe_cpufreq_cpu_exit,
+ .name = "cbe-cpufreq",
+ .flags = CPUFREQ_CONST_LOOPS,
+};
+
+/*
+ * module init and destoy
+ */
+
+static int __init cbe_cpufreq_init(void)
+{
+ int ret;
+
+ if (!machine_is(cell))
+ return -ENODEV;
+
+ cbe_cpufreq_pmi_init();
+
+ ret = cpufreq_register_driver(&cbe_cpufreq_driver);
+ if (ret)
+ cbe_cpufreq_pmi_exit();
+
+ return ret;
+}
+
+static void __exit cbe_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&cbe_cpufreq_driver);
+ cbe_cpufreq_pmi_exit();
+}
+
+module_init(cbe_cpufreq_init);
+module_exit(cbe_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h
new file mode 100644
index 000000000..00cd8633b
--- /dev/null
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ppc_cbe_cpufreq.h
+ *
+ * This file contains the definitions used by the cbe_cpufreq driver.
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/types.h>
+
+int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
+int cbe_cpufreq_get_pmode(int cpu);
+
+int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
+
+#if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI)
+extern bool cbe_cpufreq_has_pmi;
+void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy);
+void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy);
+void cbe_cpufreq_pmi_init(void);
+void cbe_cpufreq_pmi_exit(void);
+#else
+#define cbe_cpufreq_has_pmi (0)
+static inline void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) {}
+static inline void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) {}
+static inline void cbe_cpufreq_pmi_init(void) {}
+static inline void cbe_cpufreq_pmi_exit(void) {}
+#endif
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
new file mode 100644
index 000000000..04830cd95
--- /dev/null
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * pervasive backend for the cbe_cpufreq driver
+ *
+ * This driver makes use of the pervasive unit to
+ * engage the desired frequency.
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <asm/machdep.h>
+#include <asm/hw_irq.h>
+#include <asm/cell-regs.h>
+
+#include "ppc_cbe_cpufreq.h"
+
+/* to write to MIC register */
+static u64 MIC_Slow_Fast_Timer_table[] = {
+ [0 ... 7] = 0x007fc00000000000ull,
+};
+
+/* more values for the MIC */
+static u64 MIC_Slow_Next_Timer_table[] = {
+ 0x0000240000000000ull,
+ 0x0000268000000000ull,
+ 0x000029C000000000ull,
+ 0x00002D0000000000ull,
+ 0x0000300000000000ull,
+ 0x0000334000000000ull,
+ 0x000039C000000000ull,
+ 0x00003FC000000000ull,
+};
+
+
+int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
+{
+ struct cbe_pmd_regs __iomem *pmd_regs;
+ struct cbe_mic_tm_regs __iomem *mic_tm_regs;
+ unsigned long flags;
+ u64 value;
+#ifdef DEBUG
+ long time;
+#endif
+
+ local_irq_save(flags);
+
+ mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
+ pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+
+#ifdef DEBUG
+ time = jiffies;
+#endif
+
+ out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
+ out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
+
+ out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
+ out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
+
+ value = in_be64(&pmd_regs->pmcr);
+ /* set bits to zero */
+ value &= 0xFFFFFFFFFFFFFFF8ull;
+ /* set bits to next pmode */
+ value |= pmode;
+
+ out_be64(&pmd_regs->pmcr, value);
+
+#ifdef DEBUG
+ /* wait until new pmode appears in status register */
+ value = in_be64(&pmd_regs->pmsr) & 0x07;
+ while (value != pmode) {
+ cpu_relax();
+ value = in_be64(&pmd_regs->pmsr) & 0x07;
+ }
+
+ time = jiffies - time;
+ time = jiffies_to_msecs(time);
+ pr_debug("had to wait %lu ms for a transition using " \
+ "pervasive unit\n", time);
+#endif
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+
+int cbe_cpufreq_get_pmode(int cpu)
+{
+ int ret;
+ struct cbe_pmd_regs __iomem *pmd_regs;
+
+ pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+ ret = in_be64(&pmd_regs->pmsr) & 0x07;
+
+ return ret;
+}
+
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
new file mode 100644
index 000000000..4fba3637b
--- /dev/null
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * pmi backend for the cbe_cpufreq driver
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+
+#include <asm/processor.h>
+#include <asm/pmi.h>
+#include <asm/cell-regs.h>
+
+#ifdef DEBUG
+#include <asm/time.h>
+#endif
+
+#include "ppc_cbe_cpufreq.h"
+
+bool cbe_cpufreq_has_pmi = false;
+EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
+
+/*
+ * hardware specific functions
+ */
+
+int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode)
+{
+ int ret;
+ pmi_message_t pmi_msg;
+#ifdef DEBUG
+ long time;
+#endif
+ pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
+ pmi_msg.data1 = cbe_cpu_to_node(cpu);
+ pmi_msg.data2 = pmode;
+
+#ifdef DEBUG
+ time = jiffies;
+#endif
+ pmi_send_message(pmi_msg);
+
+#ifdef DEBUG
+ time = jiffies - time;
+ time = jiffies_to_msecs(time);
+ pr_debug("had to wait %lu ms for a transition using " \
+ "PMI\n", time);
+#endif
+ ret = pmi_msg.data2;
+ pr_debug("PMI returned slow mode %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
+
+
+static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
+{
+ struct cpufreq_policy *policy;
+ struct freq_qos_request *req;
+ u8 node, slow_mode;
+ int cpu, ret;
+
+ BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
+
+ node = pmi_msg.data1;
+ slow_mode = pmi_msg.data2;
+
+ cpu = cbe_node_to_cpu(node);
+
+ pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy) {
+ pr_warn("cpufreq policy not found cpu%d\n", cpu);
+ return;
+ }
+
+ req = policy->driver_data;
+
+ ret = freq_qos_update_request(req,
+ policy->freq_table[slow_mode].frequency);
+ if (ret < 0)
+ pr_warn("Failed to update freq constraint: %d\n", ret);
+ else
+ pr_debug("limiting node %d to slow mode %d\n", node, slow_mode);
+
+ cpufreq_cpu_put(policy);
+}
+
+static struct pmi_handler cbe_pmi_handler = {
+ .type = PMI_TYPE_FREQ_CHANGE,
+ .handle_pmi_message = cbe_cpufreq_handle_pmi,
+};
+
+void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
+{
+ struct freq_qos_request *req;
+ int ret;
+
+ if (!cbe_cpufreq_has_pmi)
+ return;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return;
+
+ ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX,
+ policy->freq_table[0].frequency);
+ if (ret < 0) {
+ pr_err("Failed to add freq constraint (%d)\n", ret);
+ kfree(req);
+ return;
+ }
+
+ policy->driver_data = req;
+}
+EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init);
+
+void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy)
+{
+ struct freq_qos_request *req = policy->driver_data;
+
+ if (cbe_cpufreq_has_pmi) {
+ freq_qos_remove_request(req);
+ kfree(req);
+ }
+}
+EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_exit);
+
+void cbe_cpufreq_pmi_init(void)
+{
+ if (!pmi_register_handler(&cbe_pmi_handler))
+ cbe_cpufreq_has_pmi = true;
+}
+EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_init);
+
+void cbe_cpufreq_pmi_exit(void)
+{
+ pmi_unregister_handler(&cbe_pmi_handler);
+ cbe_cpufreq_has_pmi = false;
+}
+EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_exit);
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
new file mode 100644
index 000000000..ed1ae061a
--- /dev/null
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2002,2003 Intrinsyc Software
+ *
+ * History:
+ * 31-Jul-2002 : Initial version [FB]
+ * 29-Jan-2003 : added PXA255 support [FB]
+ * 20-Apr-2003 : ported to v2.5 (Dustin McIntire, Sensoria Corp.)
+ *
+ * Note:
+ * This driver may change the memory bus clock rate, but will not do any
+ * platform specific access timing changes... for example if you have flash
+ * memory connected to CS0, you will need to register a platform specific
+ * notifier which will adjust the memory access strobes to maintain a
+ * minimum strobe width.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/io.h>
+
+#ifdef DEBUG
+static unsigned int freq_debug;
+module_param(freq_debug, uint, 0);
+MODULE_PARM_DESC(freq_debug, "Set the debug messages to on=1/off=0");
+#else
+#define freq_debug 0
+#endif
+
+static struct regulator *vcc_core;
+
+static unsigned int pxa27x_maxfreq;
+module_param(pxa27x_maxfreq, uint, 0);
+MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz"
+ "(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)");
+
+struct pxa_cpufreq_data {
+ struct clk *clk_core;
+};
+static struct pxa_cpufreq_data pxa_cpufreq_data;
+
+struct pxa_freqs {
+ unsigned int khz;
+ int vmin;
+ int vmax;
+};
+
+/*
+ * PXA255 definitions
+ */
+static const struct pxa_freqs pxa255_run_freqs[] =
+{
+ /* CPU MEMBUS run turbo PXbus SDRAM */
+ { 99500, -1, -1}, /* 99, 99, 50, 50 */
+ {132700, -1, -1}, /* 133, 133, 66, 66 */
+ {199100, -1, -1}, /* 199, 199, 99, 99 */
+ {265400, -1, -1}, /* 265, 265, 133, 66 */
+ {331800, -1, -1}, /* 331, 331, 166, 83 */
+ {398100, -1, -1}, /* 398, 398, 196, 99 */
+};
+
+/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */
+static const struct pxa_freqs pxa255_turbo_freqs[] =
+{
+ /* CPU run turbo PXbus SDRAM */
+ { 99500, -1, -1}, /* 99, 99, 50, 50 */
+ {199100, -1, -1}, /* 99, 199, 50, 99 */
+ {298500, -1, -1}, /* 99, 287, 50, 99 */
+ {298600, -1, -1}, /* 199, 287, 99, 99 */
+ {398100, -1, -1}, /* 199, 398, 99, 99 */
+};
+
+#define NUM_PXA25x_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs)
+#define NUM_PXA25x_TURBO_FREQS ARRAY_SIZE(pxa255_turbo_freqs)
+
+static struct cpufreq_frequency_table
+ pxa255_run_freq_table[NUM_PXA25x_RUN_FREQS+1];
+static struct cpufreq_frequency_table
+ pxa255_turbo_freq_table[NUM_PXA25x_TURBO_FREQS+1];
+
+static unsigned int pxa255_turbo_table;
+module_param(pxa255_turbo_table, uint, 0);
+MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table, !0 = turbo table)");
+
+static struct pxa_freqs pxa27x_freqs[] = {
+ {104000, 900000, 1705000 },
+ {156000, 1000000, 1705000 },
+ {208000, 1180000, 1705000 },
+ {312000, 1250000, 1705000 },
+ {416000, 1350000, 1705000 },
+ {520000, 1450000, 1705000 },
+ {624000, 1550000, 1705000 }
+};
+
+#define NUM_PXA27x_FREQS ARRAY_SIZE(pxa27x_freqs)
+static struct cpufreq_frequency_table
+ pxa27x_freq_table[NUM_PXA27x_FREQS+1];
+
+#ifdef CONFIG_REGULATOR
+
+static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
+{
+ int ret = 0;
+ int vmin, vmax;
+
+ if (!cpu_is_pxa27x())
+ return 0;
+
+ vmin = pxa_freq->vmin;
+ vmax = pxa_freq->vmax;
+ if ((vmin == -1) || (vmax == -1))
+ return 0;
+
+ ret = regulator_set_voltage(vcc_core, vmin, vmax);
+ if (ret)
+ pr_err("Failed to set vcc_core in [%dmV..%dmV]\n", vmin, vmax);
+ return ret;
+}
+
+static void pxa_cpufreq_init_voltages(void)
+{
+ vcc_core = regulator_get(NULL, "vcc_core");
+ if (IS_ERR(vcc_core)) {
+ pr_info("Didn't find vcc_core regulator\n");
+ vcc_core = NULL;
+ } else {
+ pr_info("Found vcc_core regulator\n");
+ }
+}
+#else
+static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
+{
+ return 0;
+}
+
+static void pxa_cpufreq_init_voltages(void) { }
+#endif
+
+static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
+ const struct pxa_freqs **pxa_freqs)
+{
+ if (cpu_is_pxa25x()) {
+ if (!pxa255_turbo_table) {
+ *pxa_freqs = pxa255_run_freqs;
+ *freq_table = pxa255_run_freq_table;
+ } else {
+ *pxa_freqs = pxa255_turbo_freqs;
+ *freq_table = pxa255_turbo_freq_table;
+ }
+ } else if (cpu_is_pxa27x()) {
+ *pxa_freqs = pxa27x_freqs;
+ *freq_table = pxa27x_freq_table;
+ } else {
+ BUG();
+ }
+}
+
+static void pxa27x_guess_max_freq(void)
+{
+ if (!pxa27x_maxfreq) {
+ pxa27x_maxfreq = 416000;
+ pr_info("PXA CPU 27x max frequency not defined (pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
+ pxa27x_maxfreq);
+ } else {
+ pxa27x_maxfreq *= 1000;
+ }
+}
+
+static unsigned int pxa_cpufreq_get(unsigned int cpu)
+{
+ struct pxa_cpufreq_data *data = cpufreq_get_driver_data();
+
+ return (unsigned int) clk_get_rate(data->clk_core) / 1000;
+}
+
+static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
+{
+ struct cpufreq_frequency_table *pxa_freqs_table;
+ const struct pxa_freqs *pxa_freq_settings;
+ struct pxa_cpufreq_data *data = cpufreq_get_driver_data();
+ unsigned int new_freq_cpu;
+ int ret = 0;
+
+ /* Get the current policy */
+ find_freq_tables(&pxa_freqs_table, &pxa_freq_settings);
+
+ new_freq_cpu = pxa_freq_settings[idx].khz;
+
+ if (freq_debug)
+ pr_debug("Changing CPU frequency from %d Mhz to %d Mhz\n",
+ policy->cur / 1000, new_freq_cpu / 1000);
+
+ if (vcc_core && new_freq_cpu > policy->cur) {
+ ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
+ if (ret)
+ return ret;
+ }
+
+ clk_set_rate(data->clk_core, new_freq_cpu * 1000);
+
+ /*
+ * Even if voltage setting fails, we don't report it, as the frequency
+ * change succeeded. The voltage reduction is not a critical failure,
+ * only power savings will suffer from this.
+ *
+ * Note: if the voltage change fails, and a return value is returned, a
+ * bug is triggered (seems a deadlock). Should anybody find out where,
+ * the "return 0" should become a "return ret".
+ */
+ if (vcc_core && new_freq_cpu < policy->cur)
+ ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
+
+ return 0;
+}
+
+static int pxa_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int i;
+ unsigned int freq;
+ struct cpufreq_frequency_table *pxa255_freq_table;
+ const struct pxa_freqs *pxa255_freqs;
+
+ /* try to guess pxa27x cpu */
+ if (cpu_is_pxa27x())
+ pxa27x_guess_max_freq();
+
+ pxa_cpufreq_init_voltages();
+
+ /* set default policy and cpuinfo */
+ policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
+
+ /* Generate pxa25x the run cpufreq_frequency_table struct */
+ for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
+ pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz;
+ pxa255_run_freq_table[i].driver_data = i;
+ }
+ pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ /* Generate pxa25x the turbo cpufreq_frequency_table struct */
+ for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) {
+ pxa255_turbo_freq_table[i].frequency =
+ pxa255_turbo_freqs[i].khz;
+ pxa255_turbo_freq_table[i].driver_data = i;
+ }
+ pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ pxa255_turbo_table = !!pxa255_turbo_table;
+
+ /* Generate the pxa27x cpufreq_frequency_table struct */
+ for (i = 0; i < NUM_PXA27x_FREQS; i++) {
+ freq = pxa27x_freqs[i].khz;
+ if (freq > pxa27x_maxfreq)
+ break;
+ pxa27x_freq_table[i].frequency = freq;
+ pxa27x_freq_table[i].driver_data = i;
+ }
+ pxa27x_freq_table[i].driver_data = i;
+ pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ /*
+ * Set the policy's minimum and maximum frequencies from the tables
+ * just constructed. This sets cpuinfo.mxx_freq, min and max.
+ */
+ if (cpu_is_pxa25x()) {
+ find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
+ pr_info("using %s frequency table\n",
+ pxa255_turbo_table ? "turbo" : "run");
+
+ policy->freq_table = pxa255_freq_table;
+ }
+ else if (cpu_is_pxa27x()) {
+ policy->freq_table = pxa27x_freq_table;
+ }
+
+ pr_info("frequency change support initialized\n");
+
+ return 0;
+}
+
+static struct cpufreq_driver pxa_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pxa_set_target,
+ .init = pxa_cpufreq_init,
+ .get = pxa_cpufreq_get,
+ .name = "PXA2xx",
+ .driver_data = &pxa_cpufreq_data,
+};
+
+static int __init pxa_cpu_init(void)
+{
+ int ret = -ENODEV;
+
+ pxa_cpufreq_data.clk_core = clk_get_sys(NULL, "core");
+ if (IS_ERR(pxa_cpufreq_data.clk_core))
+ return PTR_ERR(pxa_cpufreq_data.clk_core);
+
+ if (cpu_is_pxa25x() || cpu_is_pxa27x())
+ ret = cpufreq_register_driver(&pxa_cpufreq_driver);
+ return ret;
+}
+
+static void __exit pxa_cpu_exit(void)
+{
+ cpufreq_unregister_driver(&pxa_cpufreq_driver);
+}
+
+
+MODULE_AUTHOR("Intrinsyc Software Inc.");
+MODULE_DESCRIPTION("CPU frequency changing driver for the PXA architecture");
+MODULE_LICENSE("GPL");
+module_init(pxa_cpu_init);
+module_exit(pxa_cpu_exit);
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
new file mode 100644
index 000000000..4afa48d17
--- /dev/null
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2008 Marvell International Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/soc/pxa/cpu.h>
+#include <linux/clk/pxa.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#define HSS_104M (0)
+#define HSS_156M (1)
+#define HSS_208M (2)
+#define HSS_312M (3)
+
+#define SMCFS_78M (0)
+#define SMCFS_104M (2)
+#define SMCFS_208M (5)
+
+#define SFLFS_104M (0)
+#define SFLFS_156M (1)
+#define SFLFS_208M (2)
+#define SFLFS_312M (3)
+
+#define XSPCLK_156M (0)
+#define XSPCLK_NONE (3)
+
+#define DMCFS_26M (0)
+#define DMCFS_260M (3)
+
+#define ACCR_XPDIS (1 << 31) /* Core PLL Output Disable */
+#define ACCR_SPDIS (1 << 30) /* System PLL Output Disable */
+#define ACCR_D0CS (1 << 26) /* D0 Mode Clock Select */
+#define ACCR_PCCE (1 << 11) /* Power Mode Change Clock Enable */
+#define ACCR_DDR_D0CS (1 << 7) /* DDR SDRAM clock frequency in D0CS (PXA31x only) */
+
+#define ACCR_SMCFS_MASK (0x7 << 23) /* Static Memory Controller Frequency Select */
+#define ACCR_SFLFS_MASK (0x3 << 18) /* Frequency Select for Internal Memory Controller */
+#define ACCR_XSPCLK_MASK (0x3 << 16) /* Core Frequency during Frequency Change */
+#define ACCR_HSS_MASK (0x3 << 14) /* System Bus-Clock Frequency Select */
+#define ACCR_DMCFS_MASK (0x3 << 12) /* Dynamic Memory Controller Clock Frequency Select */
+#define ACCR_XN_MASK (0x7 << 8) /* Core PLL Turbo-Mode-to-Run-Mode Ratio */
+#define ACCR_XL_MASK (0x1f) /* Core PLL Run-Mode-to-Oscillator Ratio */
+
+#define ACCR_SMCFS(x) (((x) & 0x7) << 23)
+#define ACCR_SFLFS(x) (((x) & 0x3) << 18)
+#define ACCR_XSPCLK(x) (((x) & 0x3) << 16)
+#define ACCR_HSS(x) (((x) & 0x3) << 14)
+#define ACCR_DMCFS(x) (((x) & 0x3) << 12)
+#define ACCR_XN(x) (((x) & 0x7) << 8)
+#define ACCR_XL(x) ((x) & 0x1f)
+
+struct pxa3xx_freq_info {
+ unsigned int cpufreq_mhz;
+ unsigned int core_xl : 5;
+ unsigned int core_xn : 3;
+ unsigned int hss : 2;
+ unsigned int dmcfs : 2;
+ unsigned int smcfs : 3;
+ unsigned int sflfs : 2;
+ unsigned int df_clkdiv : 3;
+
+ int vcc_core; /* in mV */
+ int vcc_sram; /* in mV */
+};
+
+#define OP(cpufreq, _xl, _xn, _hss, _dmc, _smc, _sfl, _dfi, vcore, vsram) \
+{ \
+ .cpufreq_mhz = cpufreq, \
+ .core_xl = _xl, \
+ .core_xn = _xn, \
+ .hss = HSS_##_hss##M, \
+ .dmcfs = DMCFS_##_dmc##M, \
+ .smcfs = SMCFS_##_smc##M, \
+ .sflfs = SFLFS_##_sfl##M, \
+ .df_clkdiv = _dfi, \
+ .vcc_core = vcore, \
+ .vcc_sram = vsram, \
+}
+
+static struct pxa3xx_freq_info pxa300_freqs[] = {
+ /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */
+ OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */
+ OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */
+ OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */
+ OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */
+};
+
+static struct pxa3xx_freq_info pxa320_freqs[] = {
+ /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */
+ OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */
+ OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */
+ OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */
+ OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */
+ OP(806, 31, 2, 208, 260, 208, 312, 3, 1400, 1400), /* 806MHz */
+};
+
+static unsigned int pxa3xx_freqs_num;
+static struct pxa3xx_freq_info *pxa3xx_freqs;
+static struct cpufreq_frequency_table *pxa3xx_freqs_table;
+
+static int setup_freqs_table(struct cpufreq_policy *policy,
+ struct pxa3xx_freq_info *freqs, int num)
+{
+ struct cpufreq_frequency_table *table;
+ int i;
+
+ table = kcalloc(num + 1, sizeof(*table), GFP_KERNEL);
+ if (table == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ table[i].driver_data = i;
+ table[i].frequency = freqs[i].cpufreq_mhz * 1000;
+ }
+ table[num].driver_data = i;
+ table[num].frequency = CPUFREQ_TABLE_END;
+
+ pxa3xx_freqs = freqs;
+ pxa3xx_freqs_num = num;
+ pxa3xx_freqs_table = table;
+
+ policy->freq_table = table;
+
+ return 0;
+}
+
+static void __update_core_freq(struct pxa3xx_freq_info *info)
+{
+ u32 mask, disable, enable, xclkcfg;
+
+ mask = ACCR_XN_MASK | ACCR_XL_MASK;
+ disable = mask | ACCR_XSPCLK_MASK;
+ enable = ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl);
+ /* No clock until core PLL is re-locked */
+ enable |= ACCR_XSPCLK(XSPCLK_NONE);
+ xclkcfg = (info->core_xn == 2) ? 0x3 : 0x2; /* turbo bit */
+
+ pxa3xx_clk_update_accr(disable, enable, xclkcfg, mask);
+}
+
+static void __update_bus_freq(struct pxa3xx_freq_info *info)
+{
+ u32 mask, disable, enable;
+
+ mask = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK |
+ ACCR_DMCFS_MASK;
+ disable = mask;
+ enable = ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) |
+ ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs);
+
+ pxa3xx_clk_update_accr(disable, enable, 0, mask);
+}
+
+static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
+{
+ return pxa3xx_get_clk_frequency_khz(0);
+}
+
+static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct pxa3xx_freq_info *next;
+ unsigned long flags;
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ next = &pxa3xx_freqs[index];
+
+ local_irq_save(flags);
+ __update_core_freq(next);
+ __update_bus_freq(next);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int ret = -EINVAL;
+
+ /* set default policy and cpuinfo */
+ policy->min = policy->cpuinfo.min_freq = 104000;
+ policy->max = policy->cpuinfo.max_freq =
+ (cpu_is_pxa320()) ? 806000 : 624000;
+ policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
+
+ if (cpu_is_pxa300() || cpu_is_pxa310())
+ ret = setup_freqs_table(policy, pxa300_freqs,
+ ARRAY_SIZE(pxa300_freqs));
+
+ if (cpu_is_pxa320())
+ ret = setup_freqs_table(policy, pxa320_freqs,
+ ARRAY_SIZE(pxa320_freqs));
+
+ if (ret) {
+ pr_err("failed to setup frequency table\n");
+ return ret;
+ }
+
+ pr_info("CPUFREQ support for PXA3xx initialized\n");
+ return 0;
+}
+
+static struct cpufreq_driver pxa3xx_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pxa3xx_cpufreq_set,
+ .init = pxa3xx_cpufreq_init,
+ .get = pxa3xx_cpufreq_get,
+ .name = "pxa3xx-cpufreq",
+};
+
+static int __init cpufreq_init(void)
+{
+ if (cpu_is_pxa3xx())
+ return cpufreq_register_driver(&pxa3xx_cpufreq_driver);
+
+ return 0;
+}
+module_init(cpufreq_init);
+
+static void __exit cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&pxa3xx_cpufreq_driver);
+}
+module_exit(cpufreq_exit);
+
+MODULE_DESCRIPTION("CPU frequency scaling driver for PXA3xx");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
new file mode 100644
index 000000000..749b60c78
--- /dev/null
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -0,0 +1,733 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/units.h>
+
+#define LUT_MAX_ENTRIES 40U
+#define LUT_SRC GENMASK(31, 30)
+#define LUT_L_VAL GENMASK(7, 0)
+#define LUT_CORE_COUNT GENMASK(18, 16)
+#define LUT_VOLT GENMASK(11, 0)
+#define CLK_HW_DIV 2
+#define LUT_TURBO_IND 1
+
+#define GT_IRQ_STATUS BIT(2)
+
+struct qcom_cpufreq_soc_data {
+ u32 reg_enable;
+ u32 reg_domain_state;
+ u32 reg_dcvs_ctrl;
+ u32 reg_freq_lut;
+ u32 reg_volt_lut;
+ u32 reg_intr_clr;
+ u32 reg_current_vote;
+ u32 reg_perf_state;
+ u8 lut_row_size;
+};
+
+struct qcom_cpufreq_data {
+ void __iomem *base;
+ struct resource *res;
+ const struct qcom_cpufreq_soc_data *soc_data;
+
+ /*
+ * Mutex to synchronize between de-init sequence and re-starting LMh
+ * polling/interrupts
+ */
+ struct mutex throttle_lock;
+ int throttle_irq;
+ char irq_name[15];
+ bool cancel_throttle;
+ struct delayed_work throttle_work;
+ struct cpufreq_policy *policy;
+
+ bool per_core_dcvs;
+};
+
+static unsigned long cpu_hw_rate, xo_rate;
+static bool icc_scaling_enabled;
+
+static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy,
+ unsigned long freq_khz)
+{
+ unsigned long freq_hz = freq_khz * 1000;
+ struct dev_pm_opp *opp;
+ struct device *dev;
+ int ret;
+
+ dev = get_cpu_device(policy->cpu);
+ if (!dev)
+ return -ENODEV;
+
+ opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ ret = dev_pm_opp_set_opp(dev, opp);
+ dev_pm_opp_put(opp);
+ return ret;
+}
+
+static int qcom_cpufreq_update_opp(struct device *cpu_dev,
+ unsigned long freq_khz,
+ unsigned long volt)
+{
+ unsigned long freq_hz = freq_khz * 1000;
+ int ret;
+
+ /* Skip voltage update if the opp table is not available */
+ if (!icc_scaling_enabled)
+ return dev_pm_opp_add(cpu_dev, freq_hz, volt);
+
+ ret = dev_pm_opp_adjust_voltage(cpu_dev, freq_hz, volt, volt, volt);
+ if (ret) {
+ dev_err(cpu_dev, "Voltage update failed freq=%ld\n", freq_khz);
+ return ret;
+ }
+
+ return dev_pm_opp_enable(cpu_dev, freq_hz);
+}
+
+static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct qcom_cpufreq_data *data = policy->driver_data;
+ const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
+ unsigned long freq = policy->freq_table[index].frequency;
+ unsigned int i;
+
+ writel_relaxed(index, data->base + soc_data->reg_perf_state);
+
+ if (data->per_core_dcvs)
+ for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
+ writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
+
+ if (icc_scaling_enabled)
+ qcom_cpufreq_set_bw(policy, freq);
+
+ return 0;
+}
+
+static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
+{
+ unsigned int lval;
+
+ if (data->soc_data->reg_current_vote)
+ lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
+ else
+ lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
+
+ return lval * xo_rate;
+}
+
+/* Get the frequency requested by the cpufreq core for the CPU */
+static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+{
+ struct qcom_cpufreq_data *data;
+ const struct qcom_cpufreq_soc_data *soc_data;
+ struct cpufreq_policy *policy;
+ unsigned int index;
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (!policy)
+ return 0;
+
+ data = policy->driver_data;
+ soc_data = data->soc_data;
+
+ index = readl_relaxed(data->base + soc_data->reg_perf_state);
+ index = min(index, LUT_MAX_ENTRIES - 1);
+
+ return policy->freq_table[index].frequency;
+}
+
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+{
+ struct qcom_cpufreq_data *data;
+ struct cpufreq_policy *policy;
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (!policy)
+ return 0;
+
+ data = policy->driver_data;
+
+ if (data->throttle_irq >= 0)
+ return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
+
+ return qcom_cpufreq_get_freq(cpu);
+}
+
+static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct qcom_cpufreq_data *data = policy->driver_data;
+ const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
+ unsigned int index;
+ unsigned int i;
+
+ index = policy->cached_resolved_idx;
+ writel_relaxed(index, data->base + soc_data->reg_perf_state);
+
+ if (data->per_core_dcvs)
+ for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
+ writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
+
+ return policy->freq_table[index].frequency;
+}
+
+static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
+ struct cpufreq_policy *policy)
+{
+ u32 data, src, lval, i, core_count, prev_freq = 0, freq;
+ u32 volt;
+ struct cpufreq_frequency_table *table;
+ struct dev_pm_opp *opp;
+ unsigned long rate;
+ int ret;
+ struct qcom_cpufreq_data *drv_data = policy->driver_data;
+ const struct qcom_cpufreq_soc_data *soc_data = drv_data->soc_data;
+
+ table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ ret = dev_pm_opp_of_add_table(cpu_dev);
+ if (!ret) {
+ /* Disable all opps and cross-validate against LUT later */
+ icc_scaling_enabled = true;
+ for (rate = 0; ; rate++) {
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
+ if (IS_ERR(opp))
+ break;
+
+ dev_pm_opp_put(opp);
+ dev_pm_opp_disable(cpu_dev, rate);
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(cpu_dev, "Invalid opp table in device tree\n");
+ kfree(table);
+ return ret;
+ } else {
+ policy->fast_switch_possible = true;
+ icc_scaling_enabled = false;
+ }
+
+ for (i = 0; i < LUT_MAX_ENTRIES; i++) {
+ data = readl_relaxed(drv_data->base + soc_data->reg_freq_lut +
+ i * soc_data->lut_row_size);
+ src = FIELD_GET(LUT_SRC, data);
+ lval = FIELD_GET(LUT_L_VAL, data);
+ core_count = FIELD_GET(LUT_CORE_COUNT, data);
+
+ data = readl_relaxed(drv_data->base + soc_data->reg_volt_lut +
+ i * soc_data->lut_row_size);
+ volt = FIELD_GET(LUT_VOLT, data) * 1000;
+
+ if (src)
+ freq = xo_rate * lval / 1000;
+ else
+ freq = cpu_hw_rate / 1000;
+
+ if (freq != prev_freq && core_count != LUT_TURBO_IND) {
+ if (!qcom_cpufreq_update_opp(cpu_dev, freq, volt)) {
+ table[i].frequency = freq;
+ dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
+ freq, core_count);
+ } else {
+ dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", freq);
+ table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ }
+
+ } else if (core_count == LUT_TURBO_IND) {
+ table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ }
+
+ /*
+ * Two of the same frequencies with the same core counts means
+ * end of table
+ */
+ if (i > 0 && prev_freq == freq) {
+ struct cpufreq_frequency_table *prev = &table[i - 1];
+
+ /*
+ * Only treat the last frequency that might be a boost
+ * as the boost frequency
+ */
+ if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
+ if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) {
+ prev->frequency = prev_freq;
+ prev->flags = CPUFREQ_BOOST_FREQ;
+ } else {
+ dev_warn(cpu_dev, "failed to update OPP for freq=%d\n",
+ freq);
+ }
+ }
+
+ break;
+ }
+
+ prev_freq = freq;
+ }
+
+ table[i].frequency = CPUFREQ_TABLE_END;
+ policy->freq_table = table;
+ dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
+
+ return 0;
+}
+
+static void qcom_get_related_cpus(int index, struct cpumask *m)
+{
+ struct device_node *cpu_np;
+ struct of_phandle_args args;
+ int cpu, ret;
+
+ for_each_possible_cpu(cpu) {
+ cpu_np = of_cpu_device_node_get(cpu);
+ if (!cpu_np)
+ continue;
+
+ ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
+ "#freq-domain-cells", 0,
+ &args);
+ of_node_put(cpu_np);
+ if (ret < 0)
+ continue;
+
+ if (index == args.args[0])
+ cpumask_set_cpu(cpu, m);
+ }
+}
+
+static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
+{
+ struct cpufreq_policy *policy = data->policy;
+ int cpu = cpumask_first(policy->related_cpus);
+ struct device *dev = get_cpu_device(cpu);
+ unsigned long freq_hz, throttled_freq;
+ struct dev_pm_opp *opp;
+
+ /*
+ * Get the h/w throttled frequency, normalize it using the
+ * registered opp table and use it to calculate thermal pressure.
+ */
+ freq_hz = qcom_lmh_get_throttle_freq(data);
+
+ opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
+ if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq_hz);
+
+ if (IS_ERR(opp)) {
+ dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
+ } else {
+ dev_pm_opp_put(opp);
+ }
+
+ throttled_freq = freq_hz / HZ_PER_KHZ;
+
+ /* Update thermal pressure (the boost frequencies are accepted) */
+ arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
+
+ /*
+ * In the unlikely case policy is unregistered do not enable
+ * polling or h/w interrupt
+ */
+ mutex_lock(&data->throttle_lock);
+ if (data->cancel_throttle)
+ goto out;
+
+ /*
+ * If h/w throttled frequency is higher than what cpufreq has requested
+ * for, then stop polling and switch back to interrupt mechanism.
+ */
+ if (throttled_freq >= qcom_cpufreq_get_freq(cpu))
+ enable_irq(data->throttle_irq);
+ else
+ mod_delayed_work(system_highpri_wq, &data->throttle_work,
+ msecs_to_jiffies(10));
+
+out:
+ mutex_unlock(&data->throttle_lock);
+}
+
+static void qcom_lmh_dcvs_poll(struct work_struct *work)
+{
+ struct qcom_cpufreq_data *data;
+
+ data = container_of(work, struct qcom_cpufreq_data, throttle_work.work);
+ qcom_lmh_dcvs_notify(data);
+}
+
+static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
+{
+ struct qcom_cpufreq_data *c_data = data;
+
+ /* Disable interrupt and enable polling */
+ disable_irq_nosync(c_data->throttle_irq);
+ schedule_delayed_work(&c_data->throttle_work, 0);
+
+ if (c_data->soc_data->reg_intr_clr)
+ writel_relaxed(GT_IRQ_STATUS,
+ c_data->base + c_data->soc_data->reg_intr_clr);
+
+ return IRQ_HANDLED;
+}
+
+static const struct qcom_cpufreq_soc_data qcom_soc_data = {
+ .reg_enable = 0x0,
+ .reg_dcvs_ctrl = 0xbc,
+ .reg_freq_lut = 0x110,
+ .reg_volt_lut = 0x114,
+ .reg_current_vote = 0x704,
+ .reg_perf_state = 0x920,
+ .lut_row_size = 32,
+};
+
+static const struct qcom_cpufreq_soc_data epss_soc_data = {
+ .reg_enable = 0x0,
+ .reg_domain_state = 0x20,
+ .reg_dcvs_ctrl = 0xb0,
+ .reg_freq_lut = 0x100,
+ .reg_volt_lut = 0x200,
+ .reg_intr_clr = 0x308,
+ .reg_perf_state = 0x320,
+ .lut_row_size = 4,
+};
+
+static const struct of_device_id qcom_cpufreq_hw_match[] = {
+ { .compatible = "qcom,cpufreq-hw", .data = &qcom_soc_data },
+ { .compatible = "qcom,cpufreq-epss", .data = &epss_soc_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
+
+static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
+{
+ struct qcom_cpufreq_data *data = policy->driver_data;
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ int ret;
+
+ /*
+ * Look for LMh interrupt. If no interrupt line is specified /
+ * if there is an error, allow cpufreq to be enabled as usual.
+ */
+ data->throttle_irq = platform_get_irq_optional(pdev, index);
+ if (data->throttle_irq == -ENXIO)
+ return 0;
+ if (data->throttle_irq < 0)
+ return data->throttle_irq;
+
+ data->cancel_throttle = false;
+ data->policy = policy;
+
+ mutex_init(&data->throttle_lock);
+ INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
+
+ snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu);
+ ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN, data->irq_name, data);
+ if (ret) {
+ dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret);
+ return 0;
+ }
+
+ ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
+ data->irq_name, data->throttle_irq);
+
+ return 0;
+}
+
+static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
+{
+ struct qcom_cpufreq_data *data = policy->driver_data;
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ int ret;
+
+ if (data->throttle_irq <= 0)
+ return 0;
+
+ mutex_lock(&data->throttle_lock);
+ data->cancel_throttle = false;
+ mutex_unlock(&data->throttle_lock);
+
+ ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
+ data->irq_name, data->throttle_irq);
+
+ return ret;
+}
+
+static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
+{
+ struct qcom_cpufreq_data *data = policy->driver_data;
+
+ if (data->throttle_irq <= 0)
+ return 0;
+
+ mutex_lock(&data->throttle_lock);
+ data->cancel_throttle = true;
+ mutex_unlock(&data->throttle_lock);
+
+ cancel_delayed_work_sync(&data->throttle_work);
+ irq_set_affinity_and_hint(data->throttle_irq, NULL);
+ disable_irq_nosync(data->throttle_irq);
+
+ return 0;
+}
+
+static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
+{
+ if (data->throttle_irq <= 0)
+ return;
+
+ free_irq(data->throttle_irq, data);
+}
+
+static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+{
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ struct device *dev = &pdev->dev;
+ struct of_phandle_args args;
+ struct device_node *cpu_np;
+ struct device *cpu_dev;
+ struct resource *res;
+ void __iomem *base;
+ struct qcom_cpufreq_data *data;
+ int ret, index;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ cpu_np = of_cpu_device_node_get(policy->cpu);
+ if (!cpu_np)
+ return -EINVAL;
+
+ ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
+ "#freq-domain-cells", 0, &args);
+ of_node_put(cpu_np);
+ if (ret)
+ return ret;
+
+ index = args.args[0];
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ if (!res) {
+ dev_err(dev, "failed to get mem resource %d\n", index);
+ return -ENODEV;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), res->name)) {
+ dev_err(dev, "failed to request resource %pR\n", res);
+ return -EBUSY;
+ }
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base) {
+ dev_err(dev, "failed to map resource %pR\n", res);
+ ret = -ENOMEM;
+ goto release_region;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto unmap_base;
+ }
+
+ data->soc_data = of_device_get_match_data(&pdev->dev);
+ data->base = base;
+ data->res = res;
+
+ /* HW should be in enabled state to proceed */
+ if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
+ dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ if (readl_relaxed(base + data->soc_data->reg_dcvs_ctrl) & 0x1)
+ data->per_core_dcvs = true;
+
+ qcom_get_related_cpus(index, policy->cpus);
+ if (cpumask_empty(policy->cpus)) {
+ dev_err(dev, "Domain-%d failed to get related CPUs\n", index);
+ ret = -ENOENT;
+ goto error;
+ }
+
+ policy->driver_data = data;
+ policy->dvfs_possible_from_any_cpu = true;
+
+ ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
+ if (ret) {
+ dev_err(dev, "Domain-%d failed to read LUT\n", index);
+ goto error;
+ }
+
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ dev_err(cpu_dev, "Failed to add OPPs\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ if (policy_has_boost_freq(policy)) {
+ ret = cpufreq_enable_boost_support();
+ if (ret)
+ dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
+ }
+
+ ret = qcom_cpufreq_hw_lmh_init(policy, index);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ kfree(data);
+unmap_base:
+ iounmap(base);
+release_region:
+ release_mem_region(res->start, resource_size(res));
+ return ret;
+}
+
+static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct device *cpu_dev = get_cpu_device(policy->cpu);
+ struct qcom_cpufreq_data *data = policy->driver_data;
+ struct resource *res = data->res;
+ void __iomem *base = data->base;
+
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
+ dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
+ qcom_cpufreq_hw_lmh_exit(data);
+ kfree(policy->freq_table);
+ kfree(data);
+ iounmap(base);
+ release_mem_region(res->start, resource_size(res));
+
+ return 0;
+}
+
+static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ struct qcom_cpufreq_data *data = policy->driver_data;
+
+ if (data->throttle_irq >= 0)
+ enable_irq(data->throttle_irq);
+}
+
+static struct freq_attr *qcom_cpufreq_hw_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ &cpufreq_freq_attr_scaling_boost_freqs,
+ NULL
+};
+
+static struct cpufreq_driver cpufreq_qcom_hw_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_IS_COOLING_DEV,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = qcom_cpufreq_hw_target_index,
+ .get = qcom_cpufreq_hw_get,
+ .init = qcom_cpufreq_hw_cpu_init,
+ .exit = qcom_cpufreq_hw_cpu_exit,
+ .online = qcom_cpufreq_hw_cpu_online,
+ .offline = qcom_cpufreq_hw_cpu_offline,
+ .register_em = cpufreq_register_em_with_opp,
+ .fast_switch = qcom_cpufreq_hw_fast_switch,
+ .name = "qcom-cpufreq-hw",
+ .attr = qcom_cpufreq_hw_attr,
+ .ready = qcom_cpufreq_ready,
+};
+
+static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
+{
+ struct device *cpu_dev;
+ struct clk *clk;
+ int ret;
+
+ clk = clk_get(&pdev->dev, "xo");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ xo_rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ clk = clk_get(&pdev->dev, "alternate");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
+ clk_put(clk);
+
+ cpufreq_qcom_hw_driver.driver_data = pdev;
+
+ /* Check for optional interconnect paths on CPU0 */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return -EPROBE_DEFER;
+
+ ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
+ if (ret)
+ return ret;
+
+ ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver);
+ if (ret)
+ dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
+ else
+ dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n");
+
+ return ret;
+}
+
+static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
+{
+ return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
+}
+
+static struct platform_driver qcom_cpufreq_hw_driver = {
+ .probe = qcom_cpufreq_hw_driver_probe,
+ .remove = qcom_cpufreq_hw_driver_remove,
+ .driver = {
+ .name = "qcom-cpufreq-hw",
+ .of_match_table = qcom_cpufreq_hw_match,
+ },
+};
+
+static int __init qcom_cpufreq_hw_init(void)
+{
+ return platform_driver_register(&qcom_cpufreq_hw_driver);
+}
+postcore_initcall(qcom_cpufreq_hw_init);
+
+static void __exit qcom_cpufreq_hw_exit(void)
+{
+ platform_driver_unregister(&qcom_cpufreq_hw_driver);
+}
+module_exit(qcom_cpufreq_hw_exit);
+
+MODULE_DESCRIPTION("QCOM CPUFREQ HW Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
new file mode 100644
index 000000000..a577586b2
--- /dev/null
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * In Certain QCOM SoCs like apq8096 and msm8996 that have KRYO processors,
+ * the CPU frequency subset and voltage value of each OPP varies
+ * based on the silicon variant in use. Qualcomm Process Voltage Scaling Tables
+ * defines the voltage and frequency value based on the msm-id in SMEM
+ * and speedbin blown in the efuse combination.
+ * The qcom-cpufreq-nvmem driver reads the msm-id and efuse value from the SoC
+ * to provide the OPP framework with required information.
+ * This is used to determine the voltage and frequency value for each OPP of
+ * operating-points-v2 table when it is parsed by the OPP framework.
+ */
+
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+
+#define MSM_ID_SMEM 137
+
+enum _msm_id {
+ MSM8996V3 = 0xF6ul,
+ APQ8096V3 = 0x123ul,
+ MSM8996SG = 0x131ul,
+ APQ8096SG = 0x138ul,
+};
+
+enum _msm8996_version {
+ MSM8996_V3,
+ MSM8996_SG,
+ NUM_OF_MSM8996_VERSIONS,
+};
+
+struct qcom_cpufreq_drv;
+
+struct qcom_cpufreq_match_data {
+ int (*get_version)(struct device *cpu_dev,
+ struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
+ struct qcom_cpufreq_drv *drv);
+ const char **genpd_names;
+};
+
+struct qcom_cpufreq_drv {
+ int *opp_tokens;
+ u32 versions;
+ const struct qcom_cpufreq_match_data *data;
+};
+
+static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev;
+
+static void get_krait_bin_format_a(struct device *cpu_dev,
+ int *speed, int *pvs, int *pvs_ver,
+ u8 *buf)
+{
+ u32 pte_efuse;
+
+ pte_efuse = *((u32 *)buf);
+
+ *speed = pte_efuse & 0xf;
+ if (*speed == 0xf)
+ *speed = (pte_efuse >> 4) & 0xf;
+
+ if (*speed == 0xf) {
+ *speed = 0;
+ dev_warn(cpu_dev, "Speed bin: Defaulting to %d\n", *speed);
+ } else {
+ dev_dbg(cpu_dev, "Speed bin: %d\n", *speed);
+ }
+
+ *pvs = (pte_efuse >> 10) & 0x7;
+ if (*pvs == 0x7)
+ *pvs = (pte_efuse >> 13) & 0x7;
+
+ if (*pvs == 0x7) {
+ *pvs = 0;
+ dev_warn(cpu_dev, "PVS bin: Defaulting to %d\n", *pvs);
+ } else {
+ dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs);
+ }
+}
+
+static void get_krait_bin_format_b(struct device *cpu_dev,
+ int *speed, int *pvs, int *pvs_ver,
+ u8 *buf)
+{
+ u32 pte_efuse, redundant_sel;
+
+ pte_efuse = *((u32 *)buf);
+ redundant_sel = (pte_efuse >> 24) & 0x7;
+
+ *pvs_ver = (pte_efuse >> 4) & 0x3;
+
+ switch (redundant_sel) {
+ case 1:
+ *pvs = ((pte_efuse >> 28) & 0x8) | ((pte_efuse >> 6) & 0x7);
+ *speed = (pte_efuse >> 27) & 0xf;
+ break;
+ case 2:
+ *pvs = (pte_efuse >> 27) & 0xf;
+ *speed = pte_efuse & 0x7;
+ break;
+ default:
+ /* 4 bits of PVS are in efuse register bits 31, 8-6. */
+ *pvs = ((pte_efuse >> 28) & 0x8) | ((pte_efuse >> 6) & 0x7);
+ *speed = pte_efuse & 0x7;
+ }
+
+ /* Check SPEED_BIN_BLOW_STATUS */
+ if (pte_efuse & BIT(3)) {
+ dev_dbg(cpu_dev, "Speed bin: %d\n", *speed);
+ } else {
+ dev_warn(cpu_dev, "Speed bin not set. Defaulting to 0!\n");
+ *speed = 0;
+ }
+
+ /* Check PVS_BLOW_STATUS */
+ pte_efuse = *(((u32 *)buf) + 1);
+ pte_efuse &= BIT(21);
+ if (pte_efuse) {
+ dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs);
+ } else {
+ dev_warn(cpu_dev, "PVS bin not set. Defaulting to 0!\n");
+ *pvs = 0;
+ }
+
+ dev_dbg(cpu_dev, "PVS version: %d\n", *pvs_ver);
+}
+
+static enum _msm8996_version qcom_cpufreq_get_msm_id(void)
+{
+ size_t len;
+ u32 *msm_id;
+ enum _msm8996_version version;
+
+ msm_id = qcom_smem_get(QCOM_SMEM_HOST_ANY, MSM_ID_SMEM, &len);
+ if (IS_ERR(msm_id))
+ return NUM_OF_MSM8996_VERSIONS;
+
+ /* The first 4 bytes are format, next to them is the actual msm-id */
+ msm_id++;
+
+ switch ((enum _msm_id)*msm_id) {
+ case MSM8996V3:
+ case APQ8096V3:
+ version = MSM8996_V3;
+ break;
+ case MSM8996SG:
+ case APQ8096SG:
+ version = MSM8996_SG;
+ break;
+ default:
+ version = NUM_OF_MSM8996_VERSIONS;
+ }
+
+ return version;
+}
+
+static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
+ struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
+ struct qcom_cpufreq_drv *drv)
+{
+ size_t len;
+ u8 *speedbin;
+ enum _msm8996_version msm8996_version;
+ *pvs_name = NULL;
+
+ msm8996_version = qcom_cpufreq_get_msm_id();
+ if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
+ dev_err(cpu_dev, "Not Snapdragon 820/821!");
+ return -ENODEV;
+ }
+
+ speedbin = nvmem_cell_read(speedbin_nvmem, &len);
+ if (IS_ERR(speedbin))
+ return PTR_ERR(speedbin);
+
+ switch (msm8996_version) {
+ case MSM8996_V3:
+ drv->versions = 1 << (unsigned int)(*speedbin);
+ break;
+ case MSM8996_SG:
+ drv->versions = 1 << ((unsigned int)(*speedbin) + 4);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ kfree(speedbin);
+ return 0;
+}
+
+static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
+ struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
+ struct qcom_cpufreq_drv *drv)
+{
+ int speed = 0, pvs = 0, pvs_ver = 0;
+ u8 *speedbin;
+ size_t len;
+ int ret = 0;
+
+ speedbin = nvmem_cell_read(speedbin_nvmem, &len);
+
+ if (IS_ERR(speedbin))
+ return PTR_ERR(speedbin);
+
+ switch (len) {
+ case 4:
+ get_krait_bin_format_a(cpu_dev, &speed, &pvs, &pvs_ver,
+ speedbin);
+ break;
+ case 8:
+ get_krait_bin_format_b(cpu_dev, &speed, &pvs, &pvs_ver,
+ speedbin);
+ break;
+ default:
+ dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n");
+ ret = -ENODEV;
+ goto len_error;
+ }
+
+ snprintf(*pvs_name, sizeof("speedXX-pvsXX-vXX"), "speed%d-pvs%d-v%d",
+ speed, pvs, pvs_ver);
+
+ drv->versions = (1 << speed);
+
+len_error:
+ kfree(speedbin);
+ return ret;
+}
+
+static const struct qcom_cpufreq_match_data match_data_kryo = {
+ .get_version = qcom_cpufreq_kryo_name_version,
+};
+
+static const struct qcom_cpufreq_match_data match_data_krait = {
+ .get_version = qcom_cpufreq_krait_name_version,
+};
+
+static const char *qcs404_genpd_names[] = { "cpr", NULL };
+
+static const struct qcom_cpufreq_match_data match_data_qcs404 = {
+ .genpd_names = qcs404_genpd_names,
+};
+
+static int qcom_cpufreq_probe(struct platform_device *pdev)
+{
+ struct qcom_cpufreq_drv *drv;
+ struct nvmem_cell *speedbin_nvmem;
+ struct device_node *np;
+ struct device *cpu_dev;
+ char pvs_name_buffer[] = "speedXX-pvsXX-vXX";
+ char *pvs_name = pvs_name_buffer;
+ unsigned cpu;
+ const struct of_device_id *match;
+ int ret;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ if (!np)
+ return -ENOENT;
+
+ ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
+ if (!ret) {
+ of_node_put(np);
+ return -ENOENT;
+ }
+
+ drv = kzalloc(sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ match = pdev->dev.platform_data;
+ drv->data = match->data;
+ if (!drv->data) {
+ ret = -ENODEV;
+ goto free_drv;
+ }
+
+ if (drv->data->get_version) {
+ speedbin_nvmem = of_nvmem_cell_get(np, NULL);
+ if (IS_ERR(speedbin_nvmem)) {
+ ret = dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
+ "Could not get nvmem cell\n");
+ goto free_drv;
+ }
+
+ ret = drv->data->get_version(cpu_dev,
+ speedbin_nvmem, &pvs_name, drv);
+ if (ret) {
+ nvmem_cell_put(speedbin_nvmem);
+ goto free_drv;
+ }
+ nvmem_cell_put(speedbin_nvmem);
+ }
+ of_node_put(np);
+
+ drv->opp_tokens = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tokens),
+ GFP_KERNEL);
+ if (!drv->opp_tokens) {
+ ret = -ENOMEM;
+ goto free_drv;
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct dev_pm_opp_config config = {
+ .supported_hw = NULL,
+ };
+
+ cpu_dev = get_cpu_device(cpu);
+ if (NULL == cpu_dev) {
+ ret = -ENODEV;
+ goto free_opp;
+ }
+
+ if (drv->data->get_version) {
+ config.supported_hw = &drv->versions;
+ config.supported_hw_count = 1;
+
+ if (pvs_name)
+ config.prop_name = pvs_name;
+ }
+
+ if (drv->data->genpd_names) {
+ config.genpd_names = drv->data->genpd_names;
+ config.virt_devs = NULL;
+ }
+
+ if (config.supported_hw || config.genpd_names) {
+ drv->opp_tokens[cpu] = dev_pm_opp_set_config(cpu_dev, &config);
+ if (drv->opp_tokens[cpu] < 0) {
+ ret = drv->opp_tokens[cpu];
+ dev_err(cpu_dev, "Failed to set OPP config\n");
+ goto free_opp;
+ }
+ }
+ }
+
+ cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
+ NULL, 0);
+ if (!IS_ERR(cpufreq_dt_pdev)) {
+ platform_set_drvdata(pdev, drv);
+ return 0;
+ }
+
+ ret = PTR_ERR(cpufreq_dt_pdev);
+ dev_err(cpu_dev, "Failed to register platform device\n");
+
+free_opp:
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
+ kfree(drv->opp_tokens);
+free_drv:
+ kfree(drv);
+
+ return ret;
+}
+
+static int qcom_cpufreq_remove(struct platform_device *pdev)
+{
+ struct qcom_cpufreq_drv *drv = platform_get_drvdata(pdev);
+ unsigned int cpu;
+
+ platform_device_unregister(cpufreq_dt_pdev);
+
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
+
+ kfree(drv->opp_tokens);
+ kfree(drv);
+
+ return 0;
+}
+
+static struct platform_driver qcom_cpufreq_driver = {
+ .probe = qcom_cpufreq_probe,
+ .remove = qcom_cpufreq_remove,
+ .driver = {
+ .name = "qcom-cpufreq-nvmem",
+ },
+};
+
+static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
+ { .compatible = "qcom,apq8096", .data = &match_data_kryo },
+ { .compatible = "qcom,msm8996", .data = &match_data_kryo },
+ { .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
+ { .compatible = "qcom,ipq8064", .data = &match_data_krait },
+ { .compatible = "qcom,apq8064", .data = &match_data_krait },
+ { .compatible = "qcom,msm8974", .data = &match_data_krait },
+ { .compatible = "qcom,msm8960", .data = &match_data_krait },
+ {},
+};
+MODULE_DEVICE_TABLE(of, qcom_cpufreq_match_list);
+
+/*
+ * Since the driver depends on smem and nvmem drivers, which may
+ * return EPROBE_DEFER, all the real activity is done in the probe,
+ * which may be defered as well. The init here is only registering
+ * the driver and the platform device.
+ */
+static int __init qcom_cpufreq_init(void)
+{
+ struct device_node *np = of_find_node_by_path("/");
+ const struct of_device_id *match;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(qcom_cpufreq_match_list, np);
+ of_node_put(np);
+ if (!match)
+ return -ENODEV;
+
+ ret = platform_driver_register(&qcom_cpufreq_driver);
+ if (unlikely(ret < 0))
+ return ret;
+
+ cpufreq_pdev = platform_device_register_data(NULL, "qcom-cpufreq-nvmem",
+ -1, match, sizeof(*match));
+ ret = PTR_ERR_OR_ZERO(cpufreq_pdev);
+ if (0 == ret)
+ return 0;
+
+ platform_driver_unregister(&qcom_cpufreq_driver);
+ return ret;
+}
+module_init(qcom_cpufreq_init);
+
+static void __exit qcom_cpufreq_exit(void)
+{
+ platform_device_unregister(cpufreq_pdev);
+ platform_driver_unregister(&qcom_cpufreq_driver);
+}
+module_exit(qcom_cpufreq_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. CPUfreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
new file mode 100644
index 000000000..573b417e1
--- /dev/null
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * CPU Frequency Scaling driver for Freescale QorIQ SoCs.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpufreq.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/platform_device.h>
+
+/**
+ * struct cpu_data
+ * @pclk: the parent clock of cpu
+ * @table: frequency table
+ */
+struct cpu_data {
+ struct clk **pclk;
+ struct cpufreq_frequency_table *table;
+};
+
+/**
+ * struct soc_data - SoC specific data
+ * @flags: SOC_xxx
+ */
+struct soc_data {
+ u32 flags;
+};
+
+static u32 get_bus_freq(void)
+{
+ struct device_node *soc;
+ u32 sysfreq;
+ struct clk *pltclk;
+ int ret;
+
+ /* get platform freq by searching bus-frequency property */
+ soc = of_find_node_by_type(NULL, "soc");
+ if (soc) {
+ ret = of_property_read_u32(soc, "bus-frequency", &sysfreq);
+ of_node_put(soc);
+ if (!ret)
+ return sysfreq;
+ }
+
+ /* get platform freq by its clock name */
+ pltclk = clk_get(NULL, "cg-pll0-div1");
+ if (IS_ERR(pltclk)) {
+ pr_err("%s: can't get bus frequency %ld\n",
+ __func__, PTR_ERR(pltclk));
+ return PTR_ERR(pltclk);
+ }
+
+ return clk_get_rate(pltclk);
+}
+
+static struct clk *cpu_to_clk(int cpu)
+{
+ struct device_node *np;
+ struct clk *clk;
+
+ if (!cpu_present(cpu))
+ return NULL;
+
+ np = of_get_cpu_node(cpu, NULL);
+ if (!np)
+ return NULL;
+
+ clk = of_clk_get(np, 0);
+ of_node_put(np);
+ return clk;
+}
+
+/* traverse cpu nodes to get cpu mask of sharing clock wire */
+static void set_affected_cpus(struct cpufreq_policy *policy)
+{
+ struct cpumask *dstp = policy->cpus;
+ struct clk *clk;
+ int i;
+
+ for_each_present_cpu(i) {
+ clk = cpu_to_clk(i);
+ if (IS_ERR(clk)) {
+ pr_err("%s: no clock for cpu %d\n", __func__, i);
+ continue;
+ }
+
+ if (clk_is_match(policy->clk, clk))
+ cpumask_set_cpu(i, dstp);
+ }
+}
+
+/* reduce the duplicated frequencies in frequency table */
+static void freq_table_redup(struct cpufreq_frequency_table *freq_table,
+ int count)
+{
+ int i, j;
+
+ for (i = 1; i < count; i++) {
+ for (j = 0; j < i; j++) {
+ if (freq_table[j].frequency == CPUFREQ_ENTRY_INVALID ||
+ freq_table[j].frequency !=
+ freq_table[i].frequency)
+ continue;
+
+ freq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ break;
+ }
+ }
+}
+
+/* sort the frequencies in frequency table in descenting order */
+static void freq_table_sort(struct cpufreq_frequency_table *freq_table,
+ int count)
+{
+ int i, j, ind;
+ unsigned int freq, max_freq;
+ struct cpufreq_frequency_table table;
+
+ for (i = 0; i < count - 1; i++) {
+ max_freq = freq_table[i].frequency;
+ ind = i;
+ for (j = i + 1; j < count; j++) {
+ freq = freq_table[j].frequency;
+ if (freq == CPUFREQ_ENTRY_INVALID ||
+ freq <= max_freq)
+ continue;
+ ind = j;
+ max_freq = freq;
+ }
+
+ if (ind != i) {
+ /* exchange the frequencies */
+ table.driver_data = freq_table[i].driver_data;
+ table.frequency = freq_table[i].frequency;
+ freq_table[i].driver_data = freq_table[ind].driver_data;
+ freq_table[i].frequency = freq_table[ind].frequency;
+ freq_table[ind].driver_data = table.driver_data;
+ freq_table[ind].frequency = table.frequency;
+ }
+ }
+}
+
+static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct device_node *np;
+ int i, count;
+ u32 freq;
+ struct clk *clk;
+ const struct clk_hw *hwclk;
+ struct cpufreq_frequency_table *table;
+ struct cpu_data *data;
+ unsigned int cpu = policy->cpu;
+ u64 u64temp;
+
+ np = of_get_cpu_node(cpu, NULL);
+ if (!np)
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto err_np;
+
+ policy->clk = of_clk_get(np, 0);
+ if (IS_ERR(policy->clk)) {
+ pr_err("%s: no clock information\n", __func__);
+ goto err_nomem2;
+ }
+
+ hwclk = __clk_get_hw(policy->clk);
+ count = clk_hw_get_num_parents(hwclk);
+
+ data->pclk = kcalloc(count, sizeof(struct clk *), GFP_KERNEL);
+ if (!data->pclk)
+ goto err_nomem2;
+
+ table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ goto err_pclk;
+
+ for (i = 0; i < count; i++) {
+ clk = clk_hw_get_parent_by_index(hwclk, i)->clk;
+ data->pclk[i] = clk;
+ freq = clk_get_rate(clk);
+ table[i].frequency = freq / 1000;
+ table[i].driver_data = i;
+ }
+ freq_table_redup(table, count);
+ freq_table_sort(table, count);
+ table[i].frequency = CPUFREQ_TABLE_END;
+ policy->freq_table = table;
+ data->table = table;
+
+ /* update ->cpus if we have cluster, no harm if not */
+ set_affected_cpus(policy);
+ policy->driver_data = data;
+
+ /* Minimum transition latency is 12 platform clocks */
+ u64temp = 12ULL * NSEC_PER_SEC;
+ do_div(u64temp, get_bus_freq());
+ policy->cpuinfo.transition_latency = u64temp + 1;
+
+ of_node_put(np);
+
+ return 0;
+
+err_pclk:
+ kfree(data->pclk);
+err_nomem2:
+ kfree(data);
+err_np:
+ of_node_put(np);
+
+ return -ENODEV;
+}
+
+static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct cpu_data *data = policy->driver_data;
+
+ kfree(data->pclk);
+ kfree(data->table);
+ kfree(data);
+ policy->driver_data = NULL;
+
+ return 0;
+}
+
+static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct clk *parent;
+ struct cpu_data *data = policy->driver_data;
+
+ parent = data->pclk[data->table[index].driver_data];
+ return clk_set_parent(policy->clk, parent);
+}
+
+static struct cpufreq_driver qoriq_cpufreq_driver = {
+ .name = "qoriq_cpufreq",
+ .flags = CPUFREQ_CONST_LOOPS |
+ CPUFREQ_IS_COOLING_DEV,
+ .init = qoriq_cpufreq_cpu_init,
+ .exit = qoriq_cpufreq_cpu_exit,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = qoriq_cpufreq_target,
+ .get = cpufreq_generic_get,
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct of_device_id qoriq_cpufreq_blacklist[] = {
+ /* e6500 cannot use cpufreq due to erratum A-008083 */
+ { .compatible = "fsl,b4420-clockgen", },
+ { .compatible = "fsl,b4860-clockgen", },
+ { .compatible = "fsl,t2080-clockgen", },
+ { .compatible = "fsl,t4240-clockgen", },
+ {}
+};
+
+static int qoriq_cpufreq_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, qoriq_cpufreq_blacklist);
+ if (np) {
+ of_node_put(np);
+ dev_info(&pdev->dev, "Disabling due to erratum A-008083");
+ return -ENODEV;
+ }
+
+ ret = cpufreq_register_driver(&qoriq_cpufreq_driver);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "Freescale QorIQ CPU frequency scaling driver\n");
+ return 0;
+}
+
+static int qoriq_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&qoriq_cpufreq_driver);
+
+ return 0;
+}
+
+static struct platform_driver qoriq_cpufreq_platform_driver = {
+ .driver = {
+ .name = "qoriq-cpufreq",
+ },
+ .probe = qoriq_cpufreq_probe,
+ .remove = qoriq_cpufreq_remove,
+};
+module_platform_driver(qoriq_cpufreq_platform_driver);
+
+MODULE_ALIAS("platform:qoriq-cpufreq");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tang Yuantian <Yuantian.Tang@freescale.com>");
+MODULE_DESCRIPTION("cpufreq driver for Freescale QorIQ series SoCs");
diff --git a/drivers/cpufreq/raspberrypi-cpufreq.c b/drivers/cpufreq/raspberrypi-cpufreq.c
new file mode 100644
index 000000000..2bc7d9734
--- /dev/null
+++ b/drivers/cpufreq/raspberrypi-cpufreq.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Raspberry Pi cpufreq driver
+ *
+ * Copyright (C) 2019, Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+
+#define RASPBERRYPI_FREQ_INTERVAL 100000000
+
+static struct platform_device *cpufreq_dt;
+
+static int raspberrypi_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device *cpu_dev;
+ unsigned long min, max;
+ unsigned long rate;
+ struct clk *clk;
+ int ret;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ pr_err("Cannot get CPU for cpufreq driver\n");
+ return -ENODEV;
+ }
+
+ clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(cpu_dev, "Cannot get clock for CPU0\n");
+ return PTR_ERR(clk);
+ }
+
+ /*
+ * The max and min frequencies are configurable in the Raspberry Pi
+ * firmware, so we query them at runtime.
+ */
+ min = roundup(clk_round_rate(clk, 0), RASPBERRYPI_FREQ_INTERVAL);
+ max = roundup(clk_round_rate(clk, ULONG_MAX), RASPBERRYPI_FREQ_INTERVAL);
+ clk_put(clk);
+
+ for (rate = min; rate <= max; rate += RASPBERRYPI_FREQ_INTERVAL) {
+ ret = dev_pm_opp_add(cpu_dev, rate, 0);
+ if (ret)
+ goto remove_opp;
+ }
+
+ cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ ret = PTR_ERR_OR_ZERO(cpufreq_dt);
+ if (ret) {
+ dev_err(cpu_dev, "Failed to create platform device, %d\n", ret);
+ goto remove_opp;
+ }
+
+ return 0;
+
+remove_opp:
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
+
+ return ret;
+}
+
+static int raspberrypi_cpufreq_remove(struct platform_device *pdev)
+{
+ struct device *cpu_dev;
+
+ cpu_dev = get_cpu_device(0);
+ if (cpu_dev)
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
+
+ platform_device_unregister(cpufreq_dt);
+
+ return 0;
+}
+
+/*
+ * Since the driver depends on clk-raspberrypi, which may return EPROBE_DEFER,
+ * all the activity is performed in the probe, which may be defered as well.
+ */
+static struct platform_driver raspberrypi_cpufreq_driver = {
+ .driver = {
+ .name = "raspberrypi-cpufreq",
+ },
+ .probe = raspberrypi_cpufreq_probe,
+ .remove = raspberrypi_cpufreq_remove,
+};
+module_platform_driver(raspberrypi_cpufreq_driver);
+
+MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de");
+MODULE_DESCRIPTION("Raspberry Pi cpufreq driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:raspberrypi-cpufreq");
diff --git a/drivers/cpufreq/s3c2410-cpufreq.c b/drivers/cpufreq/s3c2410-cpufreq.c
new file mode 100644
index 000000000..5dcfbf0bf
--- /dev/null
+++ b/drivers/cpufreq/s3c2410-cpufreq.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2006-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C2410 CPU Frequency scaling
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/soc/samsung/s3c-cpufreq-core.h>
+#include <linux/soc/samsung/s3c-pm.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#define S3C2410_CLKDIVN_PDIVN (1<<0)
+#define S3C2410_CLKDIVN_HDIVN (1<<1)
+
+/* Note, 2410A has an extra mode for 1:4:4 ratio, bit 2 of CLKDIV */
+
+static void s3c2410_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
+{
+ u32 clkdiv = 0;
+
+ if (cfg->divs.h_divisor == 2)
+ clkdiv |= S3C2410_CLKDIVN_HDIVN;
+
+ if (cfg->divs.p_divisor != cfg->divs.h_divisor)
+ clkdiv |= S3C2410_CLKDIVN_PDIVN;
+
+ s3c24xx_write_clkdivn(clkdiv);
+}
+
+static int s3c2410_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
+{
+ unsigned long hclk, fclk, pclk;
+ unsigned int hdiv, pdiv;
+ unsigned long hclk_max;
+
+ fclk = cfg->freq.fclk;
+ hclk_max = cfg->max.hclk;
+
+ cfg->freq.armclk = fclk;
+
+ s3c_freq_dbg("%s: fclk is %lu, max hclk %lu\n",
+ __func__, fclk, hclk_max);
+
+ hdiv = (fclk > cfg->max.hclk) ? 2 : 1;
+ hclk = fclk / hdiv;
+
+ if (hclk > cfg->max.hclk) {
+ s3c_freq_dbg("%s: hclk too big\n", __func__);
+ return -EINVAL;
+ }
+
+ pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
+ pclk = hclk / pdiv;
+
+ if (pclk > cfg->max.pclk) {
+ s3c_freq_dbg("%s: pclk too big\n", __func__);
+ return -EINVAL;
+ }
+
+ pdiv *= hdiv;
+
+ /* record the result */
+ cfg->divs.p_divisor = pdiv;
+ cfg->divs.h_divisor = hdiv;
+
+ return 0;
+}
+
+static struct s3c_cpufreq_info s3c2410_cpufreq_info = {
+ .max = {
+ .fclk = 200000000,
+ .hclk = 100000000,
+ .pclk = 50000000,
+ },
+
+ /* transition latency is about 5ms worst-case, so
+ * set 10ms to be sure */
+ .latency = 10000000,
+
+ .locktime_m = 150,
+ .locktime_u = 150,
+ .locktime_bits = 12,
+
+ .need_pll = 1,
+
+ .name = "s3c2410",
+ .calc_iotiming = s3c2410_iotiming_calc,
+ .set_iotiming = s3c2410_iotiming_set,
+ .get_iotiming = s3c2410_iotiming_get,
+
+ .set_fvco = s3c2410_set_fvco,
+ .set_refresh = s3c2410_cpufreq_setrefresh,
+ .set_divs = s3c2410_cpufreq_setdivs,
+ .calc_divs = s3c2410_cpufreq_calcdivs,
+
+ .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
+};
+
+static int s3c2410_cpufreq_add(struct device *dev,
+ struct subsys_interface *sif)
+{
+ return s3c_cpufreq_register(&s3c2410_cpufreq_info);
+}
+
+static struct subsys_interface s3c2410_cpufreq_interface = {
+ .name = "s3c2410_cpufreq",
+ .subsys = &s3c2410_subsys,
+ .add_dev = s3c2410_cpufreq_add,
+};
+
+static int __init s3c2410_cpufreq_init(void)
+{
+ return subsys_interface_register(&s3c2410_cpufreq_interface);
+}
+arch_initcall(s3c2410_cpufreq_init);
+
+static int s3c2410a_cpufreq_add(struct device *dev,
+ struct subsys_interface *sif)
+{
+ /* alter the maximum freq settings for S3C2410A. If a board knows
+ * it only has a maximum of 200, then it should register its own
+ * limits. */
+
+ s3c2410_cpufreq_info.max.fclk = 266000000;
+ s3c2410_cpufreq_info.max.hclk = 133000000;
+ s3c2410_cpufreq_info.max.pclk = 66500000;
+ s3c2410_cpufreq_info.name = "s3c2410a";
+
+ return s3c2410_cpufreq_add(dev, sif);
+}
+
+static struct subsys_interface s3c2410a_cpufreq_interface = {
+ .name = "s3c2410a_cpufreq",
+ .subsys = &s3c2410a_subsys,
+ .add_dev = s3c2410a_cpufreq_add,
+};
+
+static int __init s3c2410a_cpufreq_init(void)
+{
+ return subsys_interface_register(&s3c2410a_cpufreq_interface);
+}
+arch_initcall(s3c2410a_cpufreq_init);
diff --git a/drivers/cpufreq/s3c2412-cpufreq.c b/drivers/cpufreq/s3c2412-cpufreq.c
new file mode 100644
index 000000000..5945945ea
--- /dev/null
+++ b/drivers/cpufreq/s3c2412-cpufreq.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C2412 CPU Frequency scalling
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/soc/samsung/s3c-cpufreq-core.h>
+#include <linux/soc/samsung/s3c-pm.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#define S3C2412_CLKDIVN_PDIVN (1<<2)
+#define S3C2412_CLKDIVN_HDIVN_MASK (3<<0)
+#define S3C2412_CLKDIVN_ARMDIVN (1<<3)
+#define S3C2412_CLKDIVN_DVSEN (1<<4)
+#define S3C2412_CLKDIVN_HALFHCLK (1<<5)
+#define S3C2412_CLKDIVN_USB48DIV (1<<6)
+#define S3C2412_CLKDIVN_UARTDIV_MASK (15<<8)
+#define S3C2412_CLKDIVN_UARTDIV_SHIFT (8)
+#define S3C2412_CLKDIVN_I2SDIV_MASK (15<<12)
+#define S3C2412_CLKDIVN_I2SDIV_SHIFT (12)
+#define S3C2412_CLKDIVN_CAMDIV_MASK (15<<16)
+#define S3C2412_CLKDIVN_CAMDIV_SHIFT (16)
+
+/* our clock resources. */
+static struct clk *xtal;
+static struct clk *fclk;
+static struct clk *hclk;
+static struct clk *armclk;
+
+/* HDIV: 1, 2, 3, 4, 6, 8 */
+
+static int s3c2412_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
+{
+ unsigned int hdiv, pdiv, armdiv, dvs;
+ unsigned long hclk, fclk, armclk, armdiv_clk;
+ unsigned long hclk_max;
+
+ fclk = cfg->freq.fclk;
+ armclk = cfg->freq.armclk;
+ hclk_max = cfg->max.hclk;
+
+ /* We can't run hclk above armclk as at the best we have to
+ * have armclk and hclk in dvs mode. */
+
+ if (hclk_max > armclk)
+ hclk_max = armclk;
+
+ s3c_freq_dbg("%s: fclk=%lu, armclk=%lu, hclk_max=%lu\n",
+ __func__, fclk, armclk, hclk_max);
+ s3c_freq_dbg("%s: want f=%lu, arm=%lu, h=%lu, p=%lu\n",
+ __func__, cfg->freq.fclk, cfg->freq.armclk,
+ cfg->freq.hclk, cfg->freq.pclk);
+
+ armdiv = fclk / armclk;
+
+ if (armdiv < 1)
+ armdiv = 1;
+ if (armdiv > 2)
+ armdiv = 2;
+
+ cfg->divs.arm_divisor = armdiv;
+ armdiv_clk = fclk / armdiv;
+
+ hdiv = armdiv_clk / hclk_max;
+ if (hdiv < 1)
+ hdiv = 1;
+
+ cfg->freq.hclk = hclk = armdiv_clk / hdiv;
+
+ /* set dvs depending on whether we reached armclk or not. */
+ cfg->divs.dvs = dvs = armclk < armdiv_clk;
+
+ /* update the actual armclk we achieved. */
+ cfg->freq.armclk = dvs ? hclk : armdiv_clk;
+
+ s3c_freq_dbg("%s: armclk %lu, hclk %lu, armdiv %d, hdiv %d, dvs %d\n",
+ __func__, armclk, hclk, armdiv, hdiv, cfg->divs.dvs);
+
+ if (hdiv > 4)
+ goto invalid;
+
+ pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
+
+ if ((hclk / pdiv) > cfg->max.pclk)
+ pdiv++;
+
+ cfg->freq.pclk = hclk / pdiv;
+
+ s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv);
+
+ if (pdiv > 2)
+ goto invalid;
+
+ pdiv *= hdiv;
+
+ /* store the result, and then return */
+
+ cfg->divs.h_divisor = hdiv * armdiv;
+ cfg->divs.p_divisor = pdiv * armdiv;
+
+ return 0;
+
+invalid:
+ return -EINVAL;
+}
+
+static void s3c2412_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
+{
+ unsigned long clkdiv;
+ unsigned long olddiv;
+
+ olddiv = clkdiv = s3c24xx_read_clkdivn();
+
+ /* clear off current clock info */
+
+ clkdiv &= ~S3C2412_CLKDIVN_ARMDIVN;
+ clkdiv &= ~S3C2412_CLKDIVN_HDIVN_MASK;
+ clkdiv &= ~S3C2412_CLKDIVN_PDIVN;
+
+ if (cfg->divs.arm_divisor == 2)
+ clkdiv |= S3C2412_CLKDIVN_ARMDIVN;
+
+ clkdiv |= ((cfg->divs.h_divisor / cfg->divs.arm_divisor) - 1);
+
+ if (cfg->divs.p_divisor != cfg->divs.h_divisor)
+ clkdiv |= S3C2412_CLKDIVN_PDIVN;
+
+ s3c_freq_dbg("%s: div %08lx => %08lx\n", __func__, olddiv, clkdiv);
+ s3c24xx_write_clkdivn(clkdiv);
+
+ clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk);
+}
+
+/* set the default cpu frequency information, based on an 200MHz part
+ * as we have no other way of detecting the speed rating in software.
+ */
+
+static struct s3c_cpufreq_info s3c2412_cpufreq_info = {
+ .max = {
+ .fclk = 200000000,
+ .hclk = 100000000,
+ .pclk = 50000000,
+ },
+
+ .latency = 5000000, /* 5ms */
+
+ .locktime_m = 150,
+ .locktime_u = 150,
+ .locktime_bits = 16,
+
+ .name = "s3c2412",
+ .set_refresh = s3c2412_cpufreq_setrefresh,
+ .set_divs = s3c2412_cpufreq_setdivs,
+ .calc_divs = s3c2412_cpufreq_calcdivs,
+
+ .calc_iotiming = s3c2412_iotiming_calc,
+ .set_iotiming = s3c2412_iotiming_set,
+ .get_iotiming = s3c2412_iotiming_get,
+
+ .debug_io_show = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs),
+};
+
+static int s3c2412_cpufreq_add(struct device *dev,
+ struct subsys_interface *sif)
+{
+ unsigned long fclk_rate;
+
+ hclk = clk_get(NULL, "hclk");
+ if (IS_ERR(hclk)) {
+ pr_err("cannot find hclk clock\n");
+ return -ENOENT;
+ }
+
+ fclk = clk_get(NULL, "fclk");
+ if (IS_ERR(fclk)) {
+ pr_err("cannot find fclk clock\n");
+ goto err_fclk;
+ }
+
+ fclk_rate = clk_get_rate(fclk);
+ if (fclk_rate > 200000000) {
+ pr_info("fclk %ld MHz, assuming 266MHz capable part\n",
+ fclk_rate / 1000000);
+ s3c2412_cpufreq_info.max.fclk = 266000000;
+ s3c2412_cpufreq_info.max.hclk = 133000000;
+ s3c2412_cpufreq_info.max.pclk = 66000000;
+ }
+
+ armclk = clk_get(NULL, "armclk");
+ if (IS_ERR(armclk)) {
+ pr_err("cannot find arm clock\n");
+ goto err_armclk;
+ }
+
+ xtal = clk_get(NULL, "xtal");
+ if (IS_ERR(xtal)) {
+ pr_err("cannot find xtal clock\n");
+ goto err_xtal;
+ }
+
+ return s3c_cpufreq_register(&s3c2412_cpufreq_info);
+
+err_xtal:
+ clk_put(armclk);
+err_armclk:
+ clk_put(fclk);
+err_fclk:
+ clk_put(hclk);
+
+ return -ENOENT;
+}
+
+static struct subsys_interface s3c2412_cpufreq_interface = {
+ .name = "s3c2412_cpufreq",
+ .subsys = &s3c2412_subsys,
+ .add_dev = s3c2412_cpufreq_add,
+};
+
+static int s3c2412_cpufreq_init(void)
+{
+ return subsys_interface_register(&s3c2412_cpufreq_interface);
+}
+arch_initcall(s3c2412_cpufreq_init);
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
new file mode 100644
index 000000000..5c221bc90
--- /dev/null
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * S3C2416/2450 CPUfreq Support
+ *
+ * Copyright 2011 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on s3c64xx_cpufreq.c
+ *
+ * Copyright 2009 Wolfson Microelectronics plc
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reboot.h>
+#include <linux/module.h>
+
+static DEFINE_MUTEX(cpufreq_lock);
+
+struct s3c2416_data {
+ struct clk *armdiv;
+ struct clk *armclk;
+ struct clk *hclk;
+
+ unsigned long regulator_latency;
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+ struct regulator *vddarm;
+#endif
+
+ struct cpufreq_frequency_table *freq_table;
+
+ bool is_dvs;
+ bool disable_dvs;
+};
+
+static struct s3c2416_data s3c2416_cpufreq;
+
+struct s3c2416_dvfs {
+ unsigned int vddarm_min;
+ unsigned int vddarm_max;
+};
+
+/* pseudo-frequency for dvs mode */
+#define FREQ_DVS 132333
+
+/* frequency to sleep and reboot in
+ * it's essential to leave dvs, as some boards do not reconfigure the
+ * regulator on reboot
+ */
+#define FREQ_SLEEP 133333
+
+/* Sources for the ARMCLK */
+#define SOURCE_HCLK 0
+#define SOURCE_ARMDIV 1
+
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+/* S3C2416 only supports changing the voltage in the dvs-mode.
+ * Voltages down to 1.0V seem to work, so we take what the regulator
+ * can get us.
+ */
+static struct s3c2416_dvfs s3c2416_dvfs_table[] = {
+ [SOURCE_HCLK] = { 950000, 1250000 },
+ [SOURCE_ARMDIV] = { 1250000, 1350000 },
+};
+#endif
+
+static struct cpufreq_frequency_table s3c2416_freq_table[] = {
+ { 0, SOURCE_HCLK, FREQ_DVS },
+ { 0, SOURCE_ARMDIV, 133333 },
+ { 0, SOURCE_ARMDIV, 266666 },
+ { 0, SOURCE_ARMDIV, 400000 },
+ { 0, 0, CPUFREQ_TABLE_END },
+};
+
+static struct cpufreq_frequency_table s3c2450_freq_table[] = {
+ { 0, SOURCE_HCLK, FREQ_DVS },
+ { 0, SOURCE_ARMDIV, 133500 },
+ { 0, SOURCE_ARMDIV, 267000 },
+ { 0, SOURCE_ARMDIV, 534000 },
+ { 0, 0, CPUFREQ_TABLE_END },
+};
+
+static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu)
+{
+ struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
+
+ if (cpu != 0)
+ return 0;
+
+ /* return our pseudo-frequency when in dvs mode */
+ if (s3c_freq->is_dvs)
+ return FREQ_DVS;
+
+ return clk_get_rate(s3c_freq->armclk) / 1000;
+}
+
+static int s3c2416_cpufreq_set_armdiv(struct s3c2416_data *s3c_freq,
+ unsigned int freq)
+{
+ int ret;
+
+ if (clk_get_rate(s3c_freq->armdiv) / 1000 != freq) {
+ ret = clk_set_rate(s3c_freq->armdiv, freq * 1000);
+ if (ret < 0) {
+ pr_err("cpufreq: Failed to set armdiv rate %dkHz: %d\n",
+ freq, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int s3c2416_cpufreq_enter_dvs(struct s3c2416_data *s3c_freq, int idx)
+{
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+ struct s3c2416_dvfs *dvfs;
+#endif
+ int ret;
+
+ if (s3c_freq->is_dvs) {
+ pr_debug("cpufreq: already in dvs mode, nothing to do\n");
+ return 0;
+ }
+
+ pr_debug("cpufreq: switching armclk to hclk (%lukHz)\n",
+ clk_get_rate(s3c_freq->hclk) / 1000);
+ ret = clk_set_parent(s3c_freq->armclk, s3c_freq->hclk);
+ if (ret < 0) {
+ pr_err("cpufreq: Failed to switch armclk to hclk: %d\n", ret);
+ return ret;
+ }
+
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+ /* changing the core voltage is only allowed when in dvs mode */
+ if (s3c_freq->vddarm) {
+ dvfs = &s3c2416_dvfs_table[idx];
+
+ pr_debug("cpufreq: setting regulator to %d-%d\n",
+ dvfs->vddarm_min, dvfs->vddarm_max);
+ ret = regulator_set_voltage(s3c_freq->vddarm,
+ dvfs->vddarm_min,
+ dvfs->vddarm_max);
+
+ /* when lowering the voltage failed, there is nothing to do */
+ if (ret != 0)
+ pr_err("cpufreq: Failed to set VDDARM: %d\n", ret);
+ }
+#endif
+
+ s3c_freq->is_dvs = 1;
+
+ return 0;
+}
+
+static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx)
+{
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+ struct s3c2416_dvfs *dvfs;
+#endif
+ int ret;
+
+ if (!s3c_freq->is_dvs) {
+ pr_debug("cpufreq: not in dvs mode, so can't leave\n");
+ return 0;
+ }
+
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+ if (s3c_freq->vddarm) {
+ dvfs = &s3c2416_dvfs_table[idx];
+
+ pr_debug("cpufreq: setting regulator to %d-%d\n",
+ dvfs->vddarm_min, dvfs->vddarm_max);
+ ret = regulator_set_voltage(s3c_freq->vddarm,
+ dvfs->vddarm_min,
+ dvfs->vddarm_max);
+ if (ret != 0) {
+ pr_err("cpufreq: Failed to set VDDARM: %d\n", ret);
+ return ret;
+ }
+ }
+#endif
+
+ /* force armdiv to hclk frequency for transition from dvs*/
+ if (clk_get_rate(s3c_freq->armdiv) > clk_get_rate(s3c_freq->hclk)) {
+ pr_debug("cpufreq: force armdiv to hclk frequency (%lukHz)\n",
+ clk_get_rate(s3c_freq->hclk) / 1000);
+ ret = s3c2416_cpufreq_set_armdiv(s3c_freq,
+ clk_get_rate(s3c_freq->hclk) / 1000);
+ if (ret < 0) {
+ pr_err("cpufreq: Failed to set the armdiv to %lukHz: %d\n",
+ clk_get_rate(s3c_freq->hclk) / 1000, ret);
+ return ret;
+ }
+ }
+
+ pr_debug("cpufreq: switching armclk parent to armdiv (%lukHz)\n",
+ clk_get_rate(s3c_freq->armdiv) / 1000);
+
+ ret = clk_set_parent(s3c_freq->armclk, s3c_freq->armdiv);
+ if (ret < 0) {
+ pr_err("cpufreq: Failed to switch armclk clock parent to armdiv: %d\n",
+ ret);
+ return ret;
+ }
+
+ s3c_freq->is_dvs = 0;
+
+ return 0;
+}
+
+static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
+ unsigned int new_freq;
+ int idx, ret, to_dvs = 0;
+
+ mutex_lock(&cpufreq_lock);
+
+ idx = s3c_freq->freq_table[index].driver_data;
+
+ if (idx == SOURCE_HCLK)
+ to_dvs = 1;
+
+ /* switching to dvs when it's not allowed */
+ if (to_dvs && s3c_freq->disable_dvs) {
+ pr_debug("cpufreq: entering dvs mode not allowed\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* When leavin dvs mode, always switch the armdiv to the hclk rate
+ * The S3C2416 has stability issues when switching directly to
+ * higher frequencies.
+ */
+ new_freq = (s3c_freq->is_dvs && !to_dvs)
+ ? clk_get_rate(s3c_freq->hclk) / 1000
+ : s3c_freq->freq_table[index].frequency;
+
+ if (to_dvs) {
+ pr_debug("cpufreq: enter dvs\n");
+ ret = s3c2416_cpufreq_enter_dvs(s3c_freq, idx);
+ } else if (s3c_freq->is_dvs) {
+ pr_debug("cpufreq: leave dvs\n");
+ ret = s3c2416_cpufreq_leave_dvs(s3c_freq, idx);
+ } else {
+ pr_debug("cpufreq: change armdiv to %dkHz\n", new_freq);
+ ret = s3c2416_cpufreq_set_armdiv(s3c_freq, new_freq);
+ }
+
+out:
+ mutex_unlock(&cpufreq_lock);
+
+ return ret;
+}
+
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
+{
+ int count, v, i, found;
+ struct cpufreq_frequency_table *pos;
+ struct s3c2416_dvfs *dvfs;
+
+ count = regulator_count_voltages(s3c_freq->vddarm);
+ if (count < 0) {
+ pr_err("cpufreq: Unable to check supported voltages\n");
+ return;
+ }
+
+ if (!count)
+ goto out;
+
+ cpufreq_for_each_valid_entry(pos, s3c_freq->freq_table) {
+ dvfs = &s3c2416_dvfs_table[pos->driver_data];
+ found = 0;
+
+ /* Check only the min-voltage, more is always ok on S3C2416 */
+ for (i = 0; i < count; i++) {
+ v = regulator_list_voltage(s3c_freq->vddarm, i);
+ if (v >= dvfs->vddarm_min)
+ found = 1;
+ }
+
+ if (!found) {
+ pr_debug("cpufreq: %dkHz unsupported by regulator\n",
+ pos->frequency);
+ pos->frequency = CPUFREQ_ENTRY_INVALID;
+ }
+ }
+
+out:
+ /* Guessed */
+ s3c_freq->regulator_latency = 1 * 1000 * 1000;
+}
+#endif
+
+static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
+ int ret;
+ struct cpufreq_policy *policy;
+
+ mutex_lock(&cpufreq_lock);
+
+ /* disable further changes */
+ s3c_freq->disable_dvs = 1;
+
+ mutex_unlock(&cpufreq_lock);
+
+ /* some boards don't reconfigure the regulator on reboot, which
+ * could lead to undervolting the cpu when the clock is reset.
+ * Therefore we always leave the DVS mode on reboot.
+ */
+ if (s3c_freq->is_dvs) {
+ pr_debug("cpufreq: leave dvs on reboot\n");
+
+ policy = cpufreq_cpu_get(0);
+ if (!policy) {
+ pr_debug("cpufreq: get no policy for cpu0\n");
+ return NOTIFY_BAD;
+ }
+
+ ret = cpufreq_driver_target(policy, FREQ_SLEEP, 0);
+ cpufreq_cpu_put(policy);
+
+ if (ret < 0)
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
+ .notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
+};
+
+static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
+{
+ struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
+ struct cpufreq_frequency_table *pos;
+ struct clk *msysclk;
+ unsigned long rate;
+ int ret;
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ msysclk = clk_get(NULL, "msysclk");
+ if (IS_ERR(msysclk)) {
+ ret = PTR_ERR(msysclk);
+ pr_err("cpufreq: Unable to obtain msysclk: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * S3C2416 and S3C2450 share the same processor-ID and also provide no
+ * other means to distinguish them other than through the rate of
+ * msysclk. On S3C2416 msysclk runs at 800MHz and on S3C2450 at 533MHz.
+ */
+ rate = clk_get_rate(msysclk);
+ if (rate == 800 * 1000 * 1000) {
+ pr_info("cpufreq: msysclk running at %lukHz, using S3C2416 frequency table\n",
+ rate / 1000);
+ s3c_freq->freq_table = s3c2416_freq_table;
+ policy->cpuinfo.max_freq = 400000;
+ } else if (rate / 1000 == 534000) {
+ pr_info("cpufreq: msysclk running at %lukHz, using S3C2450 frequency table\n",
+ rate / 1000);
+ s3c_freq->freq_table = s3c2450_freq_table;
+ policy->cpuinfo.max_freq = 534000;
+ }
+
+ /* not needed anymore */
+ clk_put(msysclk);
+
+ if (s3c_freq->freq_table == NULL) {
+ pr_err("cpufreq: No frequency information for this CPU, msysclk at %lukHz\n",
+ rate / 1000);
+ return -ENODEV;
+ }
+
+ s3c_freq->is_dvs = 0;
+
+ s3c_freq->armdiv = clk_get(NULL, "armdiv");
+ if (IS_ERR(s3c_freq->armdiv)) {
+ ret = PTR_ERR(s3c_freq->armdiv);
+ pr_err("cpufreq: Unable to obtain ARMDIV: %d\n", ret);
+ return ret;
+ }
+
+ s3c_freq->hclk = clk_get(NULL, "hclk");
+ if (IS_ERR(s3c_freq->hclk)) {
+ ret = PTR_ERR(s3c_freq->hclk);
+ pr_err("cpufreq: Unable to obtain HCLK: %d\n", ret);
+ goto err_hclk;
+ }
+
+ /* chech hclk rate, we only support the common 133MHz for now
+ * hclk could also run at 66MHz, but this not often used
+ */
+ rate = clk_get_rate(s3c_freq->hclk);
+ if (rate < 133 * 1000 * 1000) {
+ pr_err("cpufreq: HCLK not at 133MHz\n");
+ ret = -EINVAL;
+ goto err_armclk;
+ }
+
+ s3c_freq->armclk = clk_get(NULL, "armclk");
+ if (IS_ERR(s3c_freq->armclk)) {
+ ret = PTR_ERR(s3c_freq->armclk);
+ pr_err("cpufreq: Unable to obtain ARMCLK: %d\n", ret);
+ goto err_armclk;
+ }
+
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+ s3c_freq->vddarm = regulator_get(NULL, "vddarm");
+ if (IS_ERR(s3c_freq->vddarm)) {
+ ret = PTR_ERR(s3c_freq->vddarm);
+ pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret);
+ goto err_vddarm;
+ }
+
+ s3c2416_cpufreq_cfg_regulator(s3c_freq);
+#else
+ s3c_freq->regulator_latency = 0;
+#endif
+
+ cpufreq_for_each_entry(pos, s3c_freq->freq_table) {
+ /* special handling for dvs mode */
+ if (pos->driver_data == 0) {
+ if (!s3c_freq->hclk) {
+ pr_debug("cpufreq: %dkHz unsupported as it would need unavailable dvs mode\n",
+ pos->frequency);
+ pos->frequency = CPUFREQ_ENTRY_INVALID;
+ } else {
+ continue;
+ }
+ }
+
+ /* Check for frequencies we can generate */
+ rate = clk_round_rate(s3c_freq->armdiv,
+ pos->frequency * 1000);
+ rate /= 1000;
+ if (rate != pos->frequency) {
+ pr_debug("cpufreq: %dkHz unsupported by clock (clk_round_rate return %lu)\n",
+ pos->frequency, rate);
+ pos->frequency = CPUFREQ_ENTRY_INVALID;
+ }
+ }
+
+ /* Datasheet says PLL stabalisation time must be at least 300us,
+ * so but add some fudge. (reference in LOCKCON0 register description)
+ */
+ cpufreq_generic_init(policy, s3c_freq->freq_table,
+ (500 * 1000) + s3c_freq->regulator_latency);
+ register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier);
+
+ return 0;
+
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+err_vddarm:
+ clk_put(s3c_freq->armclk);
+#endif
+err_armclk:
+ clk_put(s3c_freq->hclk);
+err_hclk:
+ clk_put(s3c_freq->armdiv);
+
+ return ret;
+}
+
+static struct cpufreq_driver s3c2416_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = s3c2416_cpufreq_set_target,
+ .get = s3c2416_cpufreq_get_speed,
+ .init = s3c2416_cpufreq_driver_init,
+ .name = "s3c2416",
+ .attr = cpufreq_generic_attr,
+};
+
+static int __init s3c2416_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&s3c2416_cpufreq_driver);
+}
+module_init(s3c2416_cpufreq_init);
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
new file mode 100644
index 000000000..2011fb9c0
--- /dev/null
+++ b/drivers/cpufreq/s3c2440-cpufreq.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2006-2009 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ * Vincent Sanders <vince@simtec.co.uk>
+ *
+ * S3C2440/S3C2442 CPU Frequency scaling
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/soc/samsung/s3c-cpufreq-core.h>
+#include <linux/soc/samsung/s3c-pm.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#define S3C2440_CLKDIVN_PDIVN (1<<0)
+#define S3C2440_CLKDIVN_HDIVN_MASK (3<<1)
+#define S3C2440_CLKDIVN_HDIVN_1 (0<<1)
+#define S3C2440_CLKDIVN_HDIVN_2 (1<<1)
+#define S3C2440_CLKDIVN_HDIVN_4_8 (2<<1)
+#define S3C2440_CLKDIVN_HDIVN_3_6 (3<<1)
+#define S3C2440_CLKDIVN_UCLK (1<<3)
+
+#define S3C2440_CAMDIVN_CAMCLK_MASK (0xf<<0)
+#define S3C2440_CAMDIVN_CAMCLK_SEL (1<<4)
+#define S3C2440_CAMDIVN_HCLK3_HALF (1<<8)
+#define S3C2440_CAMDIVN_HCLK4_HALF (1<<9)
+#define S3C2440_CAMDIVN_DVSEN (1<<12)
+
+#define S3C2442_CAMDIVN_CAMCLK_DIV3 (1<<5)
+
+static struct clk *xtal;
+static struct clk *fclk;
+static struct clk *hclk;
+static struct clk *armclk;
+
+/* HDIV: 1, 2, 3, 4, 6, 8 */
+
+static inline int within_khz(unsigned long a, unsigned long b)
+{
+ long diff = a - b;
+
+ return (diff >= -1000 && diff <= 1000);
+}
+
+/**
+ * s3c2440_cpufreq_calcdivs - calculate divider settings
+ * @cfg: The cpu frequency settings.
+ *
+ * Calcualte the divider values for the given frequency settings
+ * specified in @cfg. The values are stored in @cfg for later use
+ * by the relevant set routine if the request settings can be reached.
+ */
+static int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
+{
+ unsigned int hdiv, pdiv;
+ unsigned long hclk, fclk, armclk;
+ unsigned long hclk_max;
+
+ fclk = cfg->freq.fclk;
+ armclk = cfg->freq.armclk;
+ hclk_max = cfg->max.hclk;
+
+ s3c_freq_dbg("%s: fclk is %lu, armclk %lu, max hclk %lu\n",
+ __func__, fclk, armclk, hclk_max);
+
+ if (armclk > fclk) {
+ pr_warn("%s: armclk > fclk\n", __func__);
+ armclk = fclk;
+ }
+
+ /* if we are in DVS, we need HCLK to be <= ARMCLK */
+ if (armclk < fclk && armclk < hclk_max)
+ hclk_max = armclk;
+
+ for (hdiv = 1; hdiv < 9; hdiv++) {
+ if (hdiv == 5 || hdiv == 7)
+ hdiv++;
+
+ hclk = (fclk / hdiv);
+ if (hclk <= hclk_max || within_khz(hclk, hclk_max))
+ break;
+ }
+
+ s3c_freq_dbg("%s: hclk %lu, div %d\n", __func__, hclk, hdiv);
+
+ if (hdiv > 8)
+ goto invalid;
+
+ pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
+
+ if ((hclk / pdiv) > cfg->max.pclk)
+ pdiv++;
+
+ s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv);
+
+ if (pdiv > 2)
+ goto invalid;
+
+ pdiv *= hdiv;
+
+ /* calculate a valid armclk */
+
+ if (armclk < hclk)
+ armclk = hclk;
+
+ /* if we're running armclk lower than fclk, this really means
+ * that the system should go into dvs mode, which means that
+ * armclk is connected to hclk. */
+ if (armclk < fclk) {
+ cfg->divs.dvs = 1;
+ armclk = hclk;
+ } else
+ cfg->divs.dvs = 0;
+
+ cfg->freq.armclk = armclk;
+
+ /* store the result, and then return */
+
+ cfg->divs.h_divisor = hdiv;
+ cfg->divs.p_divisor = pdiv;
+
+ return 0;
+
+ invalid:
+ return -EINVAL;
+}
+
+#define CAMDIVN_HCLK_HALF (S3C2440_CAMDIVN_HCLK3_HALF | \
+ S3C2440_CAMDIVN_HCLK4_HALF)
+
+/**
+ * s3c2440_cpufreq_setdivs - set the cpu frequency divider settings
+ * @cfg: The cpu frequency settings.
+ *
+ * Set the divisors from the settings in @cfg, which where generated
+ * during the calculation phase by s3c2440_cpufreq_calcdivs().
+ */
+static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
+{
+ unsigned long clkdiv, camdiv;
+
+ s3c_freq_dbg("%s: divisors: h=%d, p=%d\n", __func__,
+ cfg->divs.h_divisor, cfg->divs.p_divisor);
+
+ clkdiv = s3c24xx_read_clkdivn();
+ camdiv = s3c2440_read_camdivn();
+
+ clkdiv &= ~(S3C2440_CLKDIVN_HDIVN_MASK | S3C2440_CLKDIVN_PDIVN);
+ camdiv &= ~CAMDIVN_HCLK_HALF;
+
+ switch (cfg->divs.h_divisor) {
+ case 1:
+ clkdiv |= S3C2440_CLKDIVN_HDIVN_1;
+ break;
+
+ case 2:
+ clkdiv |= S3C2440_CLKDIVN_HDIVN_2;
+ break;
+
+ case 6:
+ camdiv |= S3C2440_CAMDIVN_HCLK3_HALF;
+ fallthrough;
+ case 3:
+ clkdiv |= S3C2440_CLKDIVN_HDIVN_3_6;
+ break;
+
+ case 8:
+ camdiv |= S3C2440_CAMDIVN_HCLK4_HALF;
+ fallthrough;
+ case 4:
+ clkdiv |= S3C2440_CLKDIVN_HDIVN_4_8;
+ break;
+
+ default:
+ BUG(); /* we don't expect to get here. */
+ }
+
+ if (cfg->divs.p_divisor != cfg->divs.h_divisor)
+ clkdiv |= S3C2440_CLKDIVN_PDIVN;
+
+ /* todo - set pclk. */
+
+ /* Write the divisors first with hclk intentionally halved so that
+ * when we write clkdiv we will under-frequency instead of over. We
+ * then make a short delay and remove the hclk halving if necessary.
+ */
+
+ s3c2440_write_camdivn(camdiv | CAMDIVN_HCLK_HALF);
+ s3c24xx_write_clkdivn(clkdiv);
+
+ ndelay(20);
+ s3c2440_write_camdivn(camdiv);
+
+ clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk);
+}
+
+static int run_freq_for(unsigned long max_hclk, unsigned long fclk,
+ int *divs,
+ struct cpufreq_frequency_table *table,
+ size_t table_size)
+{
+ unsigned long freq;
+ int index = 0;
+ int div;
+
+ for (div = *divs; div > 0; div = *divs++) {
+ freq = fclk / div;
+
+ if (freq > max_hclk && div != 1)
+ continue;
+
+ freq /= 1000; /* table is in kHz */
+ index = s3c_cpufreq_addfreq(table, index, table_size, freq);
+ if (index < 0)
+ break;
+ }
+
+ return index;
+}
+
+static int hclk_divs[] = { 1, 2, 3, 4, 6, 8, -1 };
+
+static int s3c2440_cpufreq_calctable(struct s3c_cpufreq_config *cfg,
+ struct cpufreq_frequency_table *table,
+ size_t table_size)
+{
+ int ret;
+
+ WARN_ON(cfg->info == NULL);
+ WARN_ON(cfg->board == NULL);
+
+ ret = run_freq_for(cfg->info->max.hclk,
+ cfg->info->max.fclk,
+ hclk_divs,
+ table, table_size);
+
+ s3c_freq_dbg("%s: returning %d\n", __func__, ret);
+
+ return ret;
+}
+
+static struct s3c_cpufreq_info s3c2440_cpufreq_info = {
+ .max = {
+ .fclk = 400000000,
+ .hclk = 133333333,
+ .pclk = 66666666,
+ },
+
+ .locktime_m = 300,
+ .locktime_u = 300,
+ .locktime_bits = 16,
+
+ .name = "s3c244x",
+ .calc_iotiming = s3c2410_iotiming_calc,
+ .set_iotiming = s3c2410_iotiming_set,
+ .get_iotiming = s3c2410_iotiming_get,
+ .set_fvco = s3c2410_set_fvco,
+
+ .set_refresh = s3c2410_cpufreq_setrefresh,
+ .set_divs = s3c2440_cpufreq_setdivs,
+ .calc_divs = s3c2440_cpufreq_calcdivs,
+ .calc_freqtable = s3c2440_cpufreq_calctable,
+
+ .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
+};
+
+static int s3c2440_cpufreq_add(struct device *dev,
+ struct subsys_interface *sif)
+{
+ xtal = s3c_cpufreq_clk_get(NULL, "xtal");
+ hclk = s3c_cpufreq_clk_get(NULL, "hclk");
+ fclk = s3c_cpufreq_clk_get(NULL, "fclk");
+ armclk = s3c_cpufreq_clk_get(NULL, "armclk");
+
+ if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) {
+ pr_err("%s: failed to get clocks\n", __func__);
+ return -ENOENT;
+ }
+
+ return s3c_cpufreq_register(&s3c2440_cpufreq_info);
+}
+
+static struct subsys_interface s3c2440_cpufreq_interface = {
+ .name = "s3c2440_cpufreq",
+ .subsys = &s3c2440_subsys,
+ .add_dev = s3c2440_cpufreq_add,
+};
+
+static int s3c2440_cpufreq_init(void)
+{
+ return subsys_interface_register(&s3c2440_cpufreq_interface);
+}
+
+/* arch_initcall adds the clocks we need, so use subsys_initcall. */
+subsys_initcall(s3c2440_cpufreq_init);
+
+static struct subsys_interface s3c2442_cpufreq_interface = {
+ .name = "s3c2442_cpufreq",
+ .subsys = &s3c2442_subsys,
+ .add_dev = s3c2440_cpufreq_add,
+};
+
+static int s3c2442_cpufreq_init(void)
+{
+ return subsys_interface_register(&s3c2442_cpufreq_interface);
+}
+subsys_initcall(s3c2442_cpufreq_init);
diff --git a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
new file mode 100644
index 000000000..93971dfe7
--- /dev/null
+++ b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2009 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C24XX CPU Frequency scaling - debugfs status support
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/cpufreq.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+
+#include <linux/soc/samsung/s3c-cpufreq-core.h>
+
+static struct dentry *dbgfs_root;
+static struct dentry *dbgfs_file_io;
+static struct dentry *dbgfs_file_info;
+static struct dentry *dbgfs_file_board;
+
+#define print_ns(x) ((x) / 10), ((x) % 10)
+
+static void show_max(struct seq_file *seq, struct s3c_freq *f)
+{
+ seq_printf(seq, "MAX: F=%lu, H=%lu, P=%lu, A=%lu\n",
+ f->fclk, f->hclk, f->pclk, f->armclk);
+}
+
+static int board_show(struct seq_file *seq, void *p)
+{
+ struct s3c_cpufreq_config *cfg;
+ struct s3c_cpufreq_board *brd;
+
+ cfg = s3c_cpufreq_getconfig();
+ if (!cfg) {
+ seq_printf(seq, "no configuration registered\n");
+ return 0;
+ }
+
+ brd = cfg->board;
+ if (!brd) {
+ seq_printf(seq, "no board definition set?\n");
+ return 0;
+ }
+
+ seq_printf(seq, "SDRAM refresh %u ns\n", brd->refresh);
+ seq_printf(seq, "auto_io=%u\n", brd->auto_io);
+ seq_printf(seq, "need_io=%u\n", brd->need_io);
+
+ show_max(seq, &brd->max);
+
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(board);
+
+static int info_show(struct seq_file *seq, void *p)
+{
+ struct s3c_cpufreq_config *cfg;
+
+ cfg = s3c_cpufreq_getconfig();
+ if (!cfg) {
+ seq_printf(seq, "no configuration registered\n");
+ return 0;
+ }
+
+ seq_printf(seq, " FCLK %ld Hz\n", cfg->freq.fclk);
+ seq_printf(seq, " HCLK %ld Hz (%lu.%lu ns)\n",
+ cfg->freq.hclk, print_ns(cfg->freq.hclk_tns));
+ seq_printf(seq, " PCLK %ld Hz\n", cfg->freq.hclk);
+ seq_printf(seq, "ARMCLK %ld Hz\n", cfg->freq.armclk);
+ seq_printf(seq, "\n");
+
+ show_max(seq, &cfg->max);
+
+ seq_printf(seq, "Divisors: P=%d, H=%d, A=%d, dvs=%s\n",
+ cfg->divs.h_divisor, cfg->divs.p_divisor,
+ cfg->divs.arm_divisor, cfg->divs.dvs ? "on" : "off");
+ seq_printf(seq, "\n");
+
+ seq_printf(seq, "lock_pll=%u\n", cfg->lock_pll);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(info);
+
+static int io_show(struct seq_file *seq, void *p)
+{
+ void (*show_bank)(struct seq_file *, struct s3c_cpufreq_config *, union s3c_iobank *);
+ struct s3c_cpufreq_config *cfg;
+ struct s3c_iotimings *iot;
+ union s3c_iobank *iob;
+ int bank;
+
+ cfg = s3c_cpufreq_getconfig();
+ if (!cfg) {
+ seq_printf(seq, "no configuration registered\n");
+ return 0;
+ }
+
+ show_bank = cfg->info->debug_io_show;
+ if (!show_bank) {
+ seq_printf(seq, "no code to show bank timing\n");
+ return 0;
+ }
+
+ iot = s3c_cpufreq_getiotimings();
+ if (!iot) {
+ seq_printf(seq, "no io timings registered\n");
+ return 0;
+ }
+
+ seq_printf(seq, "hclk period is %lu.%lu ns\n", print_ns(cfg->freq.hclk_tns));
+
+ for (bank = 0; bank < MAX_BANKS; bank++) {
+ iob = &iot->bank[bank];
+
+ seq_printf(seq, "bank %d: ", bank);
+
+ if (!iob->io_2410) {
+ seq_printf(seq, "nothing set\n");
+ continue;
+ }
+
+ show_bank(seq, cfg, iob);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(io);
+
+static int __init s3c_freq_debugfs_init(void)
+{
+ dbgfs_root = debugfs_create_dir("s3c-cpufreq", NULL);
+ if (IS_ERR(dbgfs_root)) {
+ pr_err("%s: error creating debugfs root\n", __func__);
+ return PTR_ERR(dbgfs_root);
+ }
+
+ dbgfs_file_io = debugfs_create_file("io-timing", S_IRUGO, dbgfs_root,
+ NULL, &io_fops);
+
+ dbgfs_file_info = debugfs_create_file("info", S_IRUGO, dbgfs_root,
+ NULL, &info_fops);
+
+ dbgfs_file_board = debugfs_create_file("board", S_IRUGO, dbgfs_root,
+ NULL, &board_fops);
+
+ return 0;
+}
+
+late_initcall(s3c_freq_debugfs_init);
+
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
new file mode 100644
index 000000000..7380c32b2
--- /dev/null
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2006-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C24XX CPU Frequency scaling
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+#include <linux/soc/samsung/s3c-cpufreq-core.h>
+#include <linux/soc/samsung/s3c-pm.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+/* note, cpufreq support deals in kHz, no Hz */
+static struct cpufreq_driver s3c24xx_driver;
+static struct s3c_cpufreq_config cpu_cur;
+static struct s3c_iotimings s3c24xx_iotiming;
+static struct cpufreq_frequency_table *pll_reg;
+static unsigned int last_target = ~0;
+static unsigned int ftab_size;
+static struct cpufreq_frequency_table *ftab;
+
+static struct clk *_clk_mpll;
+static struct clk *_clk_xtal;
+static struct clk *clk_fclk;
+static struct clk *clk_hclk;
+static struct clk *clk_pclk;
+static struct clk *clk_arm;
+
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS
+struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void)
+{
+ return &cpu_cur;
+}
+
+struct s3c_iotimings *s3c_cpufreq_getiotimings(void)
+{
+ return &s3c24xx_iotiming;
+}
+#endif /* CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS */
+
+static void s3c_cpufreq_getcur(struct s3c_cpufreq_config *cfg)
+{
+ unsigned long fclk, pclk, hclk, armclk;
+
+ cfg->freq.fclk = fclk = clk_get_rate(clk_fclk);
+ cfg->freq.hclk = hclk = clk_get_rate(clk_hclk);
+ cfg->freq.pclk = pclk = clk_get_rate(clk_pclk);
+ cfg->freq.armclk = armclk = clk_get_rate(clk_arm);
+
+ cfg->pll.driver_data = s3c24xx_read_mpllcon();
+ cfg->pll.frequency = fclk;
+
+ cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
+
+ cfg->divs.h_divisor = fclk / hclk;
+ cfg->divs.p_divisor = fclk / pclk;
+}
+
+static inline void s3c_cpufreq_calc(struct s3c_cpufreq_config *cfg)
+{
+ unsigned long pll = cfg->pll.frequency;
+
+ cfg->freq.fclk = pll;
+ cfg->freq.hclk = pll / cfg->divs.h_divisor;
+ cfg->freq.pclk = pll / cfg->divs.p_divisor;
+
+ /* convert hclk into 10ths of nanoseconds for io calcs */
+ cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
+}
+
+static inline int closer(unsigned int target, unsigned int n, unsigned int c)
+{
+ int diff_cur = abs(target - c);
+ int diff_new = abs(target - n);
+
+ return (diff_new < diff_cur);
+}
+
+static void s3c_cpufreq_show(const char *pfx,
+ struct s3c_cpufreq_config *cfg)
+{
+ s3c_freq_dbg("%s: Fvco=%u, F=%lu, A=%lu, H=%lu (%u), P=%lu (%u)\n",
+ pfx, cfg->pll.frequency, cfg->freq.fclk, cfg->freq.armclk,
+ cfg->freq.hclk, cfg->divs.h_divisor,
+ cfg->freq.pclk, cfg->divs.p_divisor);
+}
+
+/* functions to wrapper the driver info calls to do the cpu specific work */
+
+static void s3c_cpufreq_setio(struct s3c_cpufreq_config *cfg)
+{
+ if (cfg->info->set_iotiming)
+ (cfg->info->set_iotiming)(cfg, &s3c24xx_iotiming);
+}
+
+static int s3c_cpufreq_calcio(struct s3c_cpufreq_config *cfg)
+{
+ if (cfg->info->calc_iotiming)
+ return (cfg->info->calc_iotiming)(cfg, &s3c24xx_iotiming);
+
+ return 0;
+}
+
+static void s3c_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg)
+{
+ (cfg->info->set_refresh)(cfg);
+}
+
+static void s3c_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
+{
+ (cfg->info->set_divs)(cfg);
+}
+
+static int s3c_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
+{
+ return (cfg->info->calc_divs)(cfg);
+}
+
+static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
+{
+ cfg->mpll = _clk_mpll;
+ (cfg->info->set_fvco)(cfg);
+}
+
+static inline void s3c_cpufreq_updateclk(struct clk *clk,
+ unsigned int freq)
+{
+ clk_set_rate(clk, freq);
+}
+
+static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ struct cpufreq_frequency_table *pll)
+{
+ struct s3c_cpufreq_freqs freqs;
+ struct s3c_cpufreq_config cpu_new;
+ unsigned long flags;
+
+ cpu_new = cpu_cur; /* copy new from current */
+
+ s3c_cpufreq_show("cur", &cpu_cur);
+
+ /* TODO - check for DMA currently outstanding */
+
+ cpu_new.pll = pll ? *pll : cpu_cur.pll;
+
+ if (pll)
+ freqs.pll_changing = 1;
+
+ /* update our frequencies */
+
+ cpu_new.freq.armclk = target_freq;
+ cpu_new.freq.fclk = cpu_new.pll.frequency;
+
+ if (s3c_cpufreq_calcdivs(&cpu_new) < 0) {
+ pr_err("no divisors for %d\n", target_freq);
+ goto err_notpossible;
+ }
+
+ s3c_freq_dbg("%s: got divs\n", __func__);
+
+ s3c_cpufreq_calc(&cpu_new);
+
+ s3c_freq_dbg("%s: calculated frequencies for new\n", __func__);
+
+ if (cpu_new.freq.hclk != cpu_cur.freq.hclk) {
+ if (s3c_cpufreq_calcio(&cpu_new) < 0) {
+ pr_err("%s: no IO timings\n", __func__);
+ goto err_notpossible;
+ }
+ }
+
+ s3c_cpufreq_show("new", &cpu_new);
+
+ /* setup our cpufreq parameters */
+
+ freqs.old = cpu_cur.freq;
+ freqs.new = cpu_new.freq;
+
+ freqs.freqs.old = cpu_cur.freq.armclk / 1000;
+ freqs.freqs.new = cpu_new.freq.armclk / 1000;
+
+ /* update f/h/p clock settings before we issue the change
+ * notification, so that drivers do not need to do anything
+ * special if they want to recalculate on CPUFREQ_PRECHANGE. */
+
+ s3c_cpufreq_updateclk(_clk_mpll, cpu_new.pll.frequency);
+ s3c_cpufreq_updateclk(clk_fclk, cpu_new.freq.fclk);
+ s3c_cpufreq_updateclk(clk_hclk, cpu_new.freq.hclk);
+ s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk);
+
+ /* start the frequency change */
+ cpufreq_freq_transition_begin(policy, &freqs.freqs);
+
+ /* If hclk is staying the same, then we do not need to
+ * re-write the IO or the refresh timings whilst we are changing
+ * speed. */
+
+ local_irq_save(flags);
+
+ /* is our memory clock slowing down? */
+ if (cpu_new.freq.hclk < cpu_cur.freq.hclk) {
+ s3c_cpufreq_setrefresh(&cpu_new);
+ s3c_cpufreq_setio(&cpu_new);
+ }
+
+ if (cpu_new.freq.fclk == cpu_cur.freq.fclk) {
+ /* not changing PLL, just set the divisors */
+
+ s3c_cpufreq_setdivs(&cpu_new);
+ } else {
+ if (cpu_new.freq.fclk < cpu_cur.freq.fclk) {
+ /* slow the cpu down, then set divisors */
+
+ s3c_cpufreq_setfvco(&cpu_new);
+ s3c_cpufreq_setdivs(&cpu_new);
+ } else {
+ /* set the divisors, then speed up */
+
+ s3c_cpufreq_setdivs(&cpu_new);
+ s3c_cpufreq_setfvco(&cpu_new);
+ }
+ }
+
+ /* did our memory clock speed up */
+ if (cpu_new.freq.hclk > cpu_cur.freq.hclk) {
+ s3c_cpufreq_setrefresh(&cpu_new);
+ s3c_cpufreq_setio(&cpu_new);
+ }
+
+ /* update our current settings */
+ cpu_cur = cpu_new;
+
+ local_irq_restore(flags);
+
+ /* notify everyone we've done this */
+ cpufreq_freq_transition_end(policy, &freqs.freqs, 0);
+
+ s3c_freq_dbg("%s: finished\n", __func__);
+ return 0;
+
+ err_notpossible:
+ pr_err("no compatible settings for %d\n", target_freq);
+ return -EINVAL;
+}
+
+/* s3c_cpufreq_target
+ *
+ * called by the cpufreq core to adjust the frequency that the CPU
+ * is currently running at.
+ */
+
+static int s3c_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_frequency_table *pll;
+ unsigned int index;
+
+ /* avoid repeated calls which cause a needless amout of duplicated
+ * logging output (and CPU time as the calculation process is
+ * done) */
+ if (target_freq == last_target)
+ return 0;
+
+ last_target = target_freq;
+
+ s3c_freq_dbg("%s: policy %p, target %u, relation %u\n",
+ __func__, policy, target_freq, relation);
+
+ if (ftab) {
+ index = cpufreq_frequency_table_target(policy, target_freq,
+ relation);
+
+ s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__,
+ target_freq, index, ftab[index].frequency);
+ target_freq = ftab[index].frequency;
+ }
+
+ target_freq *= 1000; /* convert target to Hz */
+
+ /* find the settings for our new frequency */
+
+ if (!pll_reg || cpu_cur.lock_pll) {
+ /* either we've not got any PLL values, or we've locked
+ * to the current one. */
+ pll = NULL;
+ } else {
+ struct cpufreq_policy tmp_policy;
+
+ /* we keep the cpu pll table in Hz, to ensure we get an
+ * accurate value for the PLL output. */
+
+ tmp_policy.min = policy->min * 1000;
+ tmp_policy.max = policy->max * 1000;
+ tmp_policy.cpu = policy->cpu;
+ tmp_policy.freq_table = pll_reg;
+
+ /* cpufreq_frequency_table_target returns the index
+ * of the table entry, not the value of
+ * the table entry's index field. */
+
+ index = cpufreq_frequency_table_target(&tmp_policy, target_freq,
+ relation);
+ pll = pll_reg + index;
+
+ s3c_freq_dbg("%s: target %u => %u\n",
+ __func__, target_freq, pll->frequency);
+
+ target_freq = pll->frequency;
+ }
+
+ return s3c_cpufreq_settarget(policy, target_freq, pll);
+}
+
+struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
+{
+ struct clk *clk;
+
+ clk = clk_get(dev, name);
+ if (IS_ERR(clk))
+ pr_err("failed to get clock '%s'\n", name);
+
+ return clk;
+}
+
+static int s3c_cpufreq_init(struct cpufreq_policy *policy)
+{
+ policy->clk = clk_arm;
+ policy->cpuinfo.transition_latency = cpu_cur.info->latency;
+ policy->freq_table = ftab;
+
+ return 0;
+}
+
+static int __init s3c_cpufreq_initclks(void)
+{
+ _clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll");
+ _clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal");
+ clk_fclk = s3c_cpufreq_clk_get(NULL, "fclk");
+ clk_hclk = s3c_cpufreq_clk_get(NULL, "hclk");
+ clk_pclk = s3c_cpufreq_clk_get(NULL, "pclk");
+ clk_arm = s3c_cpufreq_clk_get(NULL, "armclk");
+
+ if (IS_ERR(clk_fclk) || IS_ERR(clk_hclk) || IS_ERR(clk_pclk) ||
+ IS_ERR(_clk_mpll) || IS_ERR(clk_arm) || IS_ERR(_clk_xtal)) {
+ pr_err("%s: could not get clock(s)\n", __func__);
+ return -ENOENT;
+ }
+
+ pr_info("%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n",
+ __func__,
+ clk_get_rate(clk_fclk) / 1000,
+ clk_get_rate(clk_hclk) / 1000,
+ clk_get_rate(clk_pclk) / 1000,
+ clk_get_rate(clk_arm) / 1000);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static struct cpufreq_frequency_table suspend_pll;
+static unsigned int suspend_freq;
+
+static int s3c_cpufreq_suspend(struct cpufreq_policy *policy)
+{
+ suspend_pll.frequency = clk_get_rate(_clk_mpll);
+ suspend_pll.driver_data = s3c24xx_read_mpllcon();
+ suspend_freq = clk_get_rate(clk_arm);
+
+ return 0;
+}
+
+static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ s3c_freq_dbg("%s: resuming with policy %p\n", __func__, policy);
+
+ last_target = ~0; /* invalidate last_target setting */
+
+ /* whilst we will be called later on, we try and re-set the
+ * cpu frequencies as soon as possible so that we do not end
+ * up resuming devices and then immediately having to re-set
+ * a number of settings once these devices have restarted.
+ *
+ * as a note, it is expected devices are not used until they
+ * have been un-suspended and at that time they should have
+ * used the updated clock settings.
+ */
+
+ ret = s3c_cpufreq_settarget(NULL, suspend_freq, &suspend_pll);
+ if (ret) {
+ pr_err("%s: failed to reset pll/freq\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+#else
+#define s3c_cpufreq_resume NULL
+#define s3c_cpufreq_suspend NULL
+#endif
+
+static struct cpufreq_driver s3c24xx_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .target = s3c_cpufreq_target,
+ .get = cpufreq_generic_get,
+ .init = s3c_cpufreq_init,
+ .suspend = s3c_cpufreq_suspend,
+ .resume = s3c_cpufreq_resume,
+ .name = "s3c24xx",
+};
+
+
+int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
+{
+ if (!info || !info->name) {
+ pr_err("%s: failed to pass valid information\n", __func__);
+ return -EINVAL;
+ }
+
+ pr_info("S3C24XX CPU Frequency driver, %s cpu support\n",
+ info->name);
+
+ /* check our driver info has valid data */
+
+ BUG_ON(info->set_refresh == NULL);
+ BUG_ON(info->set_divs == NULL);
+ BUG_ON(info->calc_divs == NULL);
+
+ /* info->set_fvco is optional, depending on whether there
+ * is a need to set the clock code. */
+
+ cpu_cur.info = info;
+
+ /* Note, driver registering should probably update locktime */
+
+ return 0;
+}
+
+int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
+{
+ struct s3c_cpufreq_board *ours;
+
+ if (!board) {
+ pr_info("%s: no board data\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Copy the board information so that each board can make this
+ * initdata. */
+
+ ours = kzalloc(sizeof(*ours), GFP_KERNEL);
+ if (!ours)
+ return -ENOMEM;
+
+ *ours = *board;
+ cpu_cur.board = ours;
+
+ return 0;
+}
+
+static int __init s3c_cpufreq_auto_io(void)
+{
+ int ret;
+
+ if (!cpu_cur.info->get_iotiming) {
+ pr_err("%s: get_iotiming undefined\n", __func__);
+ return -ENOENT;
+ }
+
+ pr_info("%s: working out IO settings\n", __func__);
+
+ ret = (cpu_cur.info->get_iotiming)(&cpu_cur, &s3c24xx_iotiming);
+ if (ret)
+ pr_err("%s: failed to get timings\n", __func__);
+
+ return ret;
+}
+
+/* if one or is zero, then return the other, otherwise return the min */
+#define do_min(_a, _b) ((_a) == 0 ? (_b) : (_b) == 0 ? (_a) : min(_a, _b))
+
+/**
+ * s3c_cpufreq_freq_min - find the minimum settings for the given freq.
+ * @dst: The destination structure
+ * @a: One argument.
+ * @b: The other argument.
+ *
+ * Create a minimum of each frequency entry in the 'struct s3c_freq',
+ * unless the entry is zero when it is ignored and the non-zero argument
+ * used.
+ */
+static void s3c_cpufreq_freq_min(struct s3c_freq *dst,
+ struct s3c_freq *a, struct s3c_freq *b)
+{
+ dst->fclk = do_min(a->fclk, b->fclk);
+ dst->hclk = do_min(a->hclk, b->hclk);
+ dst->pclk = do_min(a->pclk, b->pclk);
+ dst->armclk = do_min(a->armclk, b->armclk);
+}
+
+static inline u32 calc_locktime(u32 freq, u32 time_us)
+{
+ u32 result;
+
+ result = freq * time_us;
+ result = DIV_ROUND_UP(result, 1000 * 1000);
+
+ return result;
+}
+
+static void s3c_cpufreq_update_loctkime(void)
+{
+ unsigned int bits = cpu_cur.info->locktime_bits;
+ u32 rate = (u32)clk_get_rate(_clk_xtal);
+ u32 val;
+
+ if (bits == 0) {
+ WARN_ON(1);
+ return;
+ }
+
+ val = calc_locktime(rate, cpu_cur.info->locktime_u) << bits;
+ val |= calc_locktime(rate, cpu_cur.info->locktime_m);
+
+ pr_info("%s: new locktime is 0x%08x\n", __func__, val);
+ s3c24xx_write_locktime(val);
+}
+
+static int s3c_cpufreq_build_freq(void)
+{
+ int size, ret;
+
+ kfree(ftab);
+
+ size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
+ size++;
+
+ ftab = kcalloc(size, sizeof(*ftab), GFP_KERNEL);
+ if (!ftab)
+ return -ENOMEM;
+
+ ftab_size = size;
+
+ ret = cpu_cur.info->calc_freqtable(&cpu_cur, ftab, size);
+ s3c_cpufreq_addfreq(ftab, ret, size, CPUFREQ_TABLE_END);
+
+ return 0;
+}
+
+static int __init s3c_cpufreq_initcall(void)
+{
+ int ret = 0;
+
+ if (cpu_cur.info && cpu_cur.board) {
+ ret = s3c_cpufreq_initclks();
+ if (ret)
+ goto out;
+
+ /* get current settings */
+ s3c_cpufreq_getcur(&cpu_cur);
+ s3c_cpufreq_show("cur", &cpu_cur);
+
+ if (cpu_cur.board->auto_io) {
+ ret = s3c_cpufreq_auto_io();
+ if (ret) {
+ pr_err("%s: failed to get io timing\n",
+ __func__);
+ goto out;
+ }
+ }
+
+ if (cpu_cur.board->need_io && !cpu_cur.info->set_iotiming) {
+ pr_err("%s: no IO support registered\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!cpu_cur.info->need_pll)
+ cpu_cur.lock_pll = 1;
+
+ s3c_cpufreq_update_loctkime();
+
+ s3c_cpufreq_freq_min(&cpu_cur.max, &cpu_cur.board->max,
+ &cpu_cur.info->max);
+
+ if (cpu_cur.info->calc_freqtable)
+ s3c_cpufreq_build_freq();
+
+ ret = cpufreq_register_driver(&s3c24xx_driver);
+ }
+
+ out:
+ return ret;
+}
+
+late_initcall(s3c_cpufreq_initcall);
+
+/**
+ * s3c_plltab_register - register CPU PLL table.
+ * @plls: The list of PLL entries.
+ * @plls_no: The size of the PLL entries @plls.
+ *
+ * Register the given set of PLLs with the system.
+ */
+int s3c_plltab_register(struct cpufreq_frequency_table *plls,
+ unsigned int plls_no)
+{
+ struct cpufreq_frequency_table *vals;
+ unsigned int size;
+
+ size = sizeof(*vals) * (plls_no + 1);
+
+ vals = kzalloc(size, GFP_KERNEL);
+ if (vals) {
+ memcpy(vals, plls, size);
+ pll_reg = vals;
+
+ /* write a terminating entry, we don't store it in the
+ * table that is stored in the kernel */
+ vals += plls_no;
+ vals->frequency = CPUFREQ_TABLE_END;
+
+ pr_info("%d PLL entries\n", plls_no);
+ } else
+ pr_err("no memory for PLL tables\n");
+
+ return vals ? 0 : -ENOMEM;
+}
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
new file mode 100644
index 000000000..c6bdfc308
--- /dev/null
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2009 Wolfson Microelectronics plc
+ *
+ * S3C64xx CPUfreq Support
+ */
+
+#define pr_fmt(fmt) "cpufreq: " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+
+static struct regulator *vddarm;
+static unsigned long regulator_latency;
+
+struct s3c64xx_dvfs {
+ unsigned int vddarm_min;
+ unsigned int vddarm_max;
+};
+
+static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
+ [0] = { 1000000, 1150000 },
+ [1] = { 1050000, 1150000 },
+ [2] = { 1100000, 1150000 },
+ [3] = { 1200000, 1350000 },
+ [4] = { 1300000, 1350000 },
+};
+
+static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
+ { 0, 0, 66000 },
+ { 0, 0, 100000 },
+ { 0, 0, 133000 },
+ { 0, 1, 200000 },
+ { 0, 1, 222000 },
+ { 0, 1, 266000 },
+ { 0, 2, 333000 },
+ { 0, 2, 400000 },
+ { 0, 2, 532000 },
+ { 0, 2, 533000 },
+ { 0, 3, 667000 },
+ { 0, 4, 800000 },
+ { 0, 0, CPUFREQ_TABLE_END },
+};
+
+static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct s3c64xx_dvfs *dvfs;
+ unsigned int old_freq, new_freq;
+ int ret;
+
+ old_freq = clk_get_rate(policy->clk) / 1000;
+ new_freq = s3c64xx_freq_table[index].frequency;
+ dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
+
+#ifdef CONFIG_REGULATOR
+ if (vddarm && new_freq > old_freq) {
+ ret = regulator_set_voltage(vddarm,
+ dvfs->vddarm_min,
+ dvfs->vddarm_max);
+ if (ret != 0) {
+ pr_err("Failed to set VDDARM for %dkHz: %d\n",
+ new_freq, ret);
+ return ret;
+ }
+ }
+#endif
+
+ ret = clk_set_rate(policy->clk, new_freq * 1000);
+ if (ret < 0) {
+ pr_err("Failed to set rate %dkHz: %d\n",
+ new_freq, ret);
+ return ret;
+ }
+
+#ifdef CONFIG_REGULATOR
+ if (vddarm && new_freq < old_freq) {
+ ret = regulator_set_voltage(vddarm,
+ dvfs->vddarm_min,
+ dvfs->vddarm_max);
+ if (ret != 0) {
+ pr_err("Failed to set VDDARM for %dkHz: %d\n",
+ new_freq, ret);
+ if (clk_set_rate(policy->clk, old_freq * 1000) < 0)
+ pr_err("Failed to restore original clock rate\n");
+
+ return ret;
+ }
+ }
+#endif
+
+ pr_debug("Set actual frequency %lukHz\n",
+ clk_get_rate(policy->clk) / 1000);
+
+ return 0;
+}
+
+#ifdef CONFIG_REGULATOR
+static void s3c64xx_cpufreq_config_regulator(void)
+{
+ int count, v, i, found;
+ struct cpufreq_frequency_table *freq;
+ struct s3c64xx_dvfs *dvfs;
+
+ count = regulator_count_voltages(vddarm);
+ if (count < 0) {
+ pr_err("Unable to check supported voltages\n");
+ }
+
+ if (!count)
+ goto out;
+
+ cpufreq_for_each_valid_entry(freq, s3c64xx_freq_table) {
+ dvfs = &s3c64xx_dvfs_table[freq->driver_data];
+ found = 0;
+
+ for (i = 0; i < count; i++) {
+ v = regulator_list_voltage(vddarm, i);
+ if (v >= dvfs->vddarm_min && v <= dvfs->vddarm_max)
+ found = 1;
+ }
+
+ if (!found) {
+ pr_debug("%dkHz unsupported by regulator\n",
+ freq->frequency);
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+ }
+ }
+
+out:
+ /* Guess based on having to do an I2C/SPI write; in future we
+ * will be able to query the regulator performance here. */
+ regulator_latency = 1 * 1000 * 1000;
+}
+#endif
+
+static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *freq;
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ policy->clk = clk_get(NULL, "armclk");
+ if (IS_ERR(policy->clk)) {
+ pr_err("Unable to obtain ARMCLK: %ld\n",
+ PTR_ERR(policy->clk));
+ return PTR_ERR(policy->clk);
+ }
+
+#ifdef CONFIG_REGULATOR
+ vddarm = regulator_get(NULL, "vddarm");
+ if (IS_ERR(vddarm)) {
+ pr_err("Failed to obtain VDDARM: %ld\n", PTR_ERR(vddarm));
+ pr_err("Only frequency scaling available\n");
+ vddarm = NULL;
+ } else {
+ s3c64xx_cpufreq_config_regulator();
+ }
+#endif
+
+ cpufreq_for_each_entry(freq, s3c64xx_freq_table) {
+ unsigned long r;
+
+ /* Check for frequencies we can generate */
+ r = clk_round_rate(policy->clk, freq->frequency * 1000);
+ r /= 1000;
+ if (r != freq->frequency) {
+ pr_debug("%dkHz unsupported by clock\n",
+ freq->frequency);
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+ }
+
+ /* If we have no regulator then assume startup
+ * frequency is the maximum we can support. */
+ if (!vddarm && freq->frequency > clk_get_rate(policy->clk) / 1000)
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+ }
+
+ /* Datasheet says PLL stabalisation time (if we were to use
+ * the PLLs, which we don't currently) is ~300us worst case,
+ * but add some fudge.
+ */
+ cpufreq_generic_init(policy, s3c64xx_freq_table,
+ (500 * 1000) + regulator_latency);
+ return 0;
+}
+
+static struct cpufreq_driver s3c64xx_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = s3c64xx_cpufreq_set_target,
+ .get = cpufreq_generic_get,
+ .init = s3c64xx_cpufreq_driver_init,
+ .name = "s3c",
+};
+
+static int __init s3c64xx_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&s3c64xx_cpufreq_driver);
+}
+module_init(s3c64xx_cpufreq_init);
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
new file mode 100644
index 000000000..76c888ed8
--- /dev/null
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -0,0 +1,687 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * CPU frequency scaling for S5PC110/S5PV210
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/regulator/consumer.h>
+
+static void __iomem *clk_base;
+static void __iomem *dmc_base[2];
+
+#define S5P_CLKREG(x) (clk_base + (x))
+
+#define S5P_APLL_LOCK S5P_CLKREG(0x00)
+#define S5P_APLL_CON S5P_CLKREG(0x100)
+#define S5P_CLK_SRC0 S5P_CLKREG(0x200)
+#define S5P_CLK_SRC2 S5P_CLKREG(0x208)
+#define S5P_CLK_DIV0 S5P_CLKREG(0x300)
+#define S5P_CLK_DIV2 S5P_CLKREG(0x308)
+#define S5P_CLK_DIV6 S5P_CLKREG(0x318)
+#define S5P_CLKDIV_STAT0 S5P_CLKREG(0x1000)
+#define S5P_CLKDIV_STAT1 S5P_CLKREG(0x1004)
+#define S5P_CLKMUX_STAT0 S5P_CLKREG(0x1100)
+#define S5P_CLKMUX_STAT1 S5P_CLKREG(0x1104)
+
+#define S5P_ARM_MCS_CON S5P_CLKREG(0x6100)
+
+/* CLKSRC0 */
+#define S5P_CLKSRC0_MUX200_SHIFT (16)
+#define S5P_CLKSRC0_MUX200_MASK (0x1 << S5P_CLKSRC0_MUX200_SHIFT)
+#define S5P_CLKSRC0_MUX166_MASK (0x1<<20)
+#define S5P_CLKSRC0_MUX133_MASK (0x1<<24)
+
+/* CLKSRC2 */
+#define S5P_CLKSRC2_G3D_SHIFT (0)
+#define S5P_CLKSRC2_G3D_MASK (0x3 << S5P_CLKSRC2_G3D_SHIFT)
+#define S5P_CLKSRC2_MFC_SHIFT (4)
+#define S5P_CLKSRC2_MFC_MASK (0x3 << S5P_CLKSRC2_MFC_SHIFT)
+
+/* CLKDIV0 */
+#define S5P_CLKDIV0_APLL_SHIFT (0)
+#define S5P_CLKDIV0_APLL_MASK (0x7 << S5P_CLKDIV0_APLL_SHIFT)
+#define S5P_CLKDIV0_A2M_SHIFT (4)
+#define S5P_CLKDIV0_A2M_MASK (0x7 << S5P_CLKDIV0_A2M_SHIFT)
+#define S5P_CLKDIV0_HCLK200_SHIFT (8)
+#define S5P_CLKDIV0_HCLK200_MASK (0x7 << S5P_CLKDIV0_HCLK200_SHIFT)
+#define S5P_CLKDIV0_PCLK100_SHIFT (12)
+#define S5P_CLKDIV0_PCLK100_MASK (0x7 << S5P_CLKDIV0_PCLK100_SHIFT)
+#define S5P_CLKDIV0_HCLK166_SHIFT (16)
+#define S5P_CLKDIV0_HCLK166_MASK (0xF << S5P_CLKDIV0_HCLK166_SHIFT)
+#define S5P_CLKDIV0_PCLK83_SHIFT (20)
+#define S5P_CLKDIV0_PCLK83_MASK (0x7 << S5P_CLKDIV0_PCLK83_SHIFT)
+#define S5P_CLKDIV0_HCLK133_SHIFT (24)
+#define S5P_CLKDIV0_HCLK133_MASK (0xF << S5P_CLKDIV0_HCLK133_SHIFT)
+#define S5P_CLKDIV0_PCLK66_SHIFT (28)
+#define S5P_CLKDIV0_PCLK66_MASK (0x7 << S5P_CLKDIV0_PCLK66_SHIFT)
+
+/* CLKDIV2 */
+#define S5P_CLKDIV2_G3D_SHIFT (0)
+#define S5P_CLKDIV2_G3D_MASK (0xF << S5P_CLKDIV2_G3D_SHIFT)
+#define S5P_CLKDIV2_MFC_SHIFT (4)
+#define S5P_CLKDIV2_MFC_MASK (0xF << S5P_CLKDIV2_MFC_SHIFT)
+
+/* CLKDIV6 */
+#define S5P_CLKDIV6_ONEDRAM_SHIFT (28)
+#define S5P_CLKDIV6_ONEDRAM_MASK (0xF << S5P_CLKDIV6_ONEDRAM_SHIFT)
+
+static struct clk *dmc0_clk;
+static struct clk *dmc1_clk;
+static DEFINE_MUTEX(set_freq_lock);
+
+/* APLL M,P,S values for 1G/800Mhz */
+#define APLL_VAL_1000 ((1 << 31) | (125 << 16) | (3 << 8) | 1)
+#define APLL_VAL_800 ((1 << 31) | (100 << 16) | (3 << 8) | 1)
+
+/* Use 800MHz when entering sleep mode */
+#define SLEEP_FREQ (800 * 1000)
+
+/* Tracks if CPU frequency can be updated anymore */
+static bool no_cpufreq_access;
+
+/*
+ * DRAM configurations to calculate refresh counter for changing
+ * frequency of memory.
+ */
+struct dram_conf {
+ unsigned long freq; /* HZ */
+ unsigned long refresh; /* DRAM refresh counter * 1000 */
+};
+
+/* DRAM configuration (DMC0 and DMC1) */
+static struct dram_conf s5pv210_dram_conf[2];
+
+enum perf_level {
+ L0, L1, L2, L3, L4,
+};
+
+enum s5pv210_mem_type {
+ LPDDR = 0x1,
+ LPDDR2 = 0x2,
+ DDR2 = 0x4,
+};
+
+enum s5pv210_dmc_port {
+ DMC0 = 0,
+ DMC1,
+};
+
+static struct cpufreq_frequency_table s5pv210_freq_table[] = {
+ {0, L0, 1000*1000},
+ {0, L1, 800*1000},
+ {0, L2, 400*1000},
+ {0, L3, 200*1000},
+ {0, L4, 100*1000},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+static struct regulator *arm_regulator;
+static struct regulator *int_regulator;
+
+struct s5pv210_dvs_conf {
+ int arm_volt; /* uV */
+ int int_volt; /* uV */
+};
+
+static const int arm_volt_max = 1350000;
+static const int int_volt_max = 1250000;
+
+static struct s5pv210_dvs_conf dvs_conf[] = {
+ [L0] = {
+ .arm_volt = 1250000,
+ .int_volt = 1100000,
+ },
+ [L1] = {
+ .arm_volt = 1200000,
+ .int_volt = 1100000,
+ },
+ [L2] = {
+ .arm_volt = 1050000,
+ .int_volt = 1100000,
+ },
+ [L3] = {
+ .arm_volt = 950000,
+ .int_volt = 1100000,
+ },
+ [L4] = {
+ .arm_volt = 950000,
+ .int_volt = 1000000,
+ },
+};
+
+static u32 clkdiv_val[5][11] = {
+ /*
+ * Clock divider value for following
+ * { APLL, A2M, HCLK_MSYS, PCLK_MSYS,
+ * HCLK_DSYS, PCLK_DSYS, HCLK_PSYS, PCLK_PSYS,
+ * ONEDRAM, MFC, G3D }
+ */
+
+ /* L0 : [1000/200/100][166/83][133/66][200/200] */
+ {0, 4, 4, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L1 : [800/200/100][166/83][133/66][200/200] */
+ {0, 3, 3, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L2 : [400/200/100][166/83][133/66][200/200] */
+ {1, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L3 : [200/200/100][166/83][133/66][200/200] */
+ {3, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L4 : [100/100/100][83/83][66/66][100/100] */
+ {7, 7, 0, 0, 7, 0, 9, 0, 7, 0, 0},
+};
+
+/*
+ * This function set DRAM refresh counter
+ * according to operating frequency of DRAM
+ * ch: DMC port number 0 or 1
+ * freq: Operating frequency of DRAM(KHz)
+ */
+static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
+{
+ unsigned long tmp, tmp1;
+ void __iomem *reg = NULL;
+
+ if (ch == DMC0) {
+ reg = (dmc_base[0] + 0x30);
+ } else if (ch == DMC1) {
+ reg = (dmc_base[1] + 0x30);
+ } else {
+ pr_err("Cannot find DMC port\n");
+ return;
+ }
+
+ /* Find current DRAM frequency */
+ tmp = s5pv210_dram_conf[ch].freq;
+
+ tmp /= freq;
+
+ tmp1 = s5pv210_dram_conf[ch].refresh;
+
+ tmp1 /= tmp;
+
+ writel_relaxed(tmp1, reg);
+}
+
+static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ unsigned long reg;
+ unsigned int priv_index;
+ unsigned int pll_changing = 0;
+ unsigned int bus_speed_changing = 0;
+ unsigned int old_freq, new_freq;
+ int arm_volt, int_volt;
+ int ret = 0;
+
+ mutex_lock(&set_freq_lock);
+
+ if (no_cpufreq_access) {
+ pr_err("Denied access to %s as it is disabled temporarily\n",
+ __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ old_freq = policy->cur;
+ new_freq = s5pv210_freq_table[index].frequency;
+
+ /* Finding current running level index */
+ priv_index = cpufreq_table_find_index_h(policy, old_freq, false);
+
+ arm_volt = dvs_conf[index].arm_volt;
+ int_volt = dvs_conf[index].int_volt;
+
+ if (new_freq > old_freq) {
+ ret = regulator_set_voltage(arm_regulator,
+ arm_volt, arm_volt_max);
+ if (ret)
+ goto exit;
+
+ ret = regulator_set_voltage(int_regulator,
+ int_volt, int_volt_max);
+ if (ret)
+ goto exit;
+ }
+
+ /* Check if there need to change PLL */
+ if ((index == L0) || (priv_index == L0))
+ pll_changing = 1;
+
+ /* Check if there need to change System bus clock */
+ if ((index == L4) || (priv_index == L4))
+ bus_speed_changing = 1;
+
+ if (bus_speed_changing) {
+ /*
+ * Reconfigure DRAM refresh counter value for minimum
+ * temporary clock while changing divider.
+ * expected clock is 83Mhz : 7.8usec/(1/83Mhz) = 0x287
+ */
+ if (pll_changing)
+ s5pv210_set_refresh(DMC1, 83000);
+ else
+ s5pv210_set_refresh(DMC1, 100000);
+
+ s5pv210_set_refresh(DMC0, 83000);
+ }
+
+ /*
+ * APLL should be changed in this level
+ * APLL -> MPLL(for stable transition) -> APLL
+ * Some clock source's clock API are not prepared.
+ * Do not use clock API in below code.
+ */
+ if (pll_changing) {
+ /*
+ * 1. Temporary Change divider for MFC and G3D
+ * SCLKA2M(200/1=200)->(200/4=50)Mhz
+ */
+ reg = readl_relaxed(S5P_CLK_DIV2);
+ reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
+ reg |= (3 << S5P_CLKDIV2_G3D_SHIFT) |
+ (3 << S5P_CLKDIV2_MFC_SHIFT);
+ writel_relaxed(reg, S5P_CLK_DIV2);
+
+ /* For MFC, G3D dividing */
+ do {
+ reg = readl_relaxed(S5P_CLKDIV_STAT0);
+ } while (reg & ((1 << 16) | (1 << 17)));
+
+ /*
+ * 2. Change SCLKA2M(200Mhz)to SCLKMPLL in MFC_MUX, G3D MUX
+ * (200/4=50)->(667/4=166)Mhz
+ */
+ reg = readl_relaxed(S5P_CLK_SRC2);
+ reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
+ reg |= (1 << S5P_CLKSRC2_G3D_SHIFT) |
+ (1 << S5P_CLKSRC2_MFC_SHIFT);
+ writel_relaxed(reg, S5P_CLK_SRC2);
+
+ do {
+ reg = readl_relaxed(S5P_CLKMUX_STAT1);
+ } while (reg & ((1 << 7) | (1 << 3)));
+
+ /*
+ * 3. DMC1 refresh count for 133Mhz if (index == L4) is
+ * true refresh counter is already programmed in upper
+ * code. 0x287@83Mhz
+ */
+ if (!bus_speed_changing)
+ s5pv210_set_refresh(DMC1, 133000);
+
+ /* 4. SCLKAPLL -> SCLKMPLL */
+ reg = readl_relaxed(S5P_CLK_SRC0);
+ reg &= ~(S5P_CLKSRC0_MUX200_MASK);
+ reg |= (0x1 << S5P_CLKSRC0_MUX200_SHIFT);
+ writel_relaxed(reg, S5P_CLK_SRC0);
+
+ do {
+ reg = readl_relaxed(S5P_CLKMUX_STAT0);
+ } while (reg & (0x1 << 18));
+
+ }
+
+ /* Change divider */
+ reg = readl_relaxed(S5P_CLK_DIV0);
+
+ reg &= ~(S5P_CLKDIV0_APLL_MASK | S5P_CLKDIV0_A2M_MASK |
+ S5P_CLKDIV0_HCLK200_MASK | S5P_CLKDIV0_PCLK100_MASK |
+ S5P_CLKDIV0_HCLK166_MASK | S5P_CLKDIV0_PCLK83_MASK |
+ S5P_CLKDIV0_HCLK133_MASK | S5P_CLKDIV0_PCLK66_MASK);
+
+ reg |= ((clkdiv_val[index][0] << S5P_CLKDIV0_APLL_SHIFT) |
+ (clkdiv_val[index][1] << S5P_CLKDIV0_A2M_SHIFT) |
+ (clkdiv_val[index][2] << S5P_CLKDIV0_HCLK200_SHIFT) |
+ (clkdiv_val[index][3] << S5P_CLKDIV0_PCLK100_SHIFT) |
+ (clkdiv_val[index][4] << S5P_CLKDIV0_HCLK166_SHIFT) |
+ (clkdiv_val[index][5] << S5P_CLKDIV0_PCLK83_SHIFT) |
+ (clkdiv_val[index][6] << S5P_CLKDIV0_HCLK133_SHIFT) |
+ (clkdiv_val[index][7] << S5P_CLKDIV0_PCLK66_SHIFT));
+
+ writel_relaxed(reg, S5P_CLK_DIV0);
+
+ do {
+ reg = readl_relaxed(S5P_CLKDIV_STAT0);
+ } while (reg & 0xff);
+
+ /* ARM MCS value changed */
+ reg = readl_relaxed(S5P_ARM_MCS_CON);
+ reg &= ~0x3;
+ if (index >= L3)
+ reg |= 0x3;
+ else
+ reg |= 0x1;
+
+ writel_relaxed(reg, S5P_ARM_MCS_CON);
+
+ if (pll_changing) {
+ /* 5. Set Lock time = 30us*24Mhz = 0x2cf */
+ writel_relaxed(0x2cf, S5P_APLL_LOCK);
+
+ /*
+ * 6. Turn on APLL
+ * 6-1. Set PMS values
+ * 6-2. Wait until the PLL is locked
+ */
+ if (index == L0)
+ writel_relaxed(APLL_VAL_1000, S5P_APLL_CON);
+ else
+ writel_relaxed(APLL_VAL_800, S5P_APLL_CON);
+
+ do {
+ reg = readl_relaxed(S5P_APLL_CON);
+ } while (!(reg & (0x1 << 29)));
+
+ /*
+ * 7. Change source clock from SCLKMPLL(667Mhz)
+ * to SCLKA2M(200Mhz) in MFC_MUX and G3D MUX
+ * (667/4=166)->(200/4=50)Mhz
+ */
+ reg = readl_relaxed(S5P_CLK_SRC2);
+ reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
+ reg |= (0 << S5P_CLKSRC2_G3D_SHIFT) |
+ (0 << S5P_CLKSRC2_MFC_SHIFT);
+ writel_relaxed(reg, S5P_CLK_SRC2);
+
+ do {
+ reg = readl_relaxed(S5P_CLKMUX_STAT1);
+ } while (reg & ((1 << 7) | (1 << 3)));
+
+ /*
+ * 8. Change divider for MFC and G3D
+ * (200/4=50)->(200/1=200)Mhz
+ */
+ reg = readl_relaxed(S5P_CLK_DIV2);
+ reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
+ reg |= (clkdiv_val[index][10] << S5P_CLKDIV2_G3D_SHIFT) |
+ (clkdiv_val[index][9] << S5P_CLKDIV2_MFC_SHIFT);
+ writel_relaxed(reg, S5P_CLK_DIV2);
+
+ /* For MFC, G3D dividing */
+ do {
+ reg = readl_relaxed(S5P_CLKDIV_STAT0);
+ } while (reg & ((1 << 16) | (1 << 17)));
+
+ /* 9. Change MPLL to APLL in MSYS_MUX */
+ reg = readl_relaxed(S5P_CLK_SRC0);
+ reg &= ~(S5P_CLKSRC0_MUX200_MASK);
+ reg |= (0x0 << S5P_CLKSRC0_MUX200_SHIFT);
+ writel_relaxed(reg, S5P_CLK_SRC0);
+
+ do {
+ reg = readl_relaxed(S5P_CLKMUX_STAT0);
+ } while (reg & (0x1 << 18));
+
+ /*
+ * 10. DMC1 refresh counter
+ * L4 : DMC1 = 100Mhz 7.8us/(1/100) = 0x30c
+ * Others : DMC1 = 200Mhz 7.8us/(1/200) = 0x618
+ */
+ if (!bus_speed_changing)
+ s5pv210_set_refresh(DMC1, 200000);
+ }
+
+ /*
+ * L4 level needs to change memory bus speed, hence ONEDRAM clock
+ * divider and memory refresh parameter should be changed
+ */
+ if (bus_speed_changing) {
+ reg = readl_relaxed(S5P_CLK_DIV6);
+ reg &= ~S5P_CLKDIV6_ONEDRAM_MASK;
+ reg |= (clkdiv_val[index][8] << S5P_CLKDIV6_ONEDRAM_SHIFT);
+ writel_relaxed(reg, S5P_CLK_DIV6);
+
+ do {
+ reg = readl_relaxed(S5P_CLKDIV_STAT1);
+ } while (reg & (1 << 15));
+
+ /* Reconfigure DRAM refresh counter value */
+ if (index != L4) {
+ /*
+ * DMC0 : 166Mhz
+ * DMC1 : 200Mhz
+ */
+ s5pv210_set_refresh(DMC0, 166000);
+ s5pv210_set_refresh(DMC1, 200000);
+ } else {
+ /*
+ * DMC0 : 83Mhz
+ * DMC1 : 100Mhz
+ */
+ s5pv210_set_refresh(DMC0, 83000);
+ s5pv210_set_refresh(DMC1, 100000);
+ }
+ }
+
+ if (new_freq < old_freq) {
+ regulator_set_voltage(int_regulator,
+ int_volt, int_volt_max);
+
+ regulator_set_voltage(arm_regulator,
+ arm_volt, arm_volt_max);
+ }
+
+ pr_debug("Perf changed[L%d]\n", index);
+
+exit:
+ mutex_unlock(&set_freq_lock);
+ return ret;
+}
+
+static int check_mem_type(void __iomem *dmc_reg)
+{
+ unsigned long val;
+
+ val = readl_relaxed(dmc_reg + 0x4);
+ val = (val & (0xf << 8));
+
+ return val >> 8;
+}
+
+static int s5pv210_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned long mem_type;
+ int ret;
+
+ policy->clk = clk_get(NULL, "armclk");
+ if (IS_ERR(policy->clk))
+ return PTR_ERR(policy->clk);
+
+ dmc0_clk = clk_get(NULL, "sclk_dmc0");
+ if (IS_ERR(dmc0_clk)) {
+ ret = PTR_ERR(dmc0_clk);
+ goto out_dmc0;
+ }
+
+ dmc1_clk = clk_get(NULL, "hclk_msys");
+ if (IS_ERR(dmc1_clk)) {
+ ret = PTR_ERR(dmc1_clk);
+ goto out_dmc1;
+ }
+
+ if (policy->cpu != 0) {
+ ret = -EINVAL;
+ goto out_dmc1;
+ }
+
+ /*
+ * check_mem_type : This driver only support LPDDR & LPDDR2.
+ * other memory type is not supported.
+ */
+ mem_type = check_mem_type(dmc_base[0]);
+
+ if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
+ pr_err("CPUFreq doesn't support this memory type\n");
+ ret = -EINVAL;
+ goto out_dmc1;
+ }
+
+ /* Find current refresh counter and frequency each DMC */
+ s5pv210_dram_conf[0].refresh = (readl_relaxed(dmc_base[0] + 0x30) * 1000);
+ s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk);
+
+ s5pv210_dram_conf[1].refresh = (readl_relaxed(dmc_base[1] + 0x30) * 1000);
+ s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
+
+ policy->suspend_freq = SLEEP_FREQ;
+ cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
+ return 0;
+
+out_dmc1:
+ clk_put(dmc0_clk);
+out_dmc0:
+ clk_put(policy->clk);
+ return ret;
+}
+
+static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ int ret;
+ struct cpufreq_policy *policy;
+
+ policy = cpufreq_cpu_get(0);
+ if (!policy) {
+ pr_debug("cpufreq: get no policy for cpu0\n");
+ return NOTIFY_BAD;
+ }
+
+ ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0);
+ cpufreq_cpu_put(policy);
+
+ if (ret < 0)
+ return NOTIFY_BAD;
+
+ no_cpufreq_access = true;
+ return NOTIFY_DONE;
+}
+
+static struct cpufreq_driver s5pv210_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = s5pv210_target,
+ .get = cpufreq_generic_get,
+ .init = s5pv210_cpu_init,
+ .name = "s5pv210",
+ .suspend = cpufreq_generic_suspend,
+ .resume = cpufreq_generic_suspend, /* We need to set SLEEP FREQ again */
+};
+
+static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
+ .notifier_call = s5pv210_cpufreq_reboot_notifier_event,
+};
+
+static int s5pv210_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ int id, result = 0;
+
+ /*
+ * HACK: This is a temporary workaround to get access to clock
+ * and DMC controller registers directly and remove static mappings
+ * and dependencies on platform headers. It is necessary to enable
+ * S5PV210 multi-platform support and will be removed together with
+ * this whole driver as soon as S5PV210 gets migrated to use
+ * cpufreq-dt driver.
+ */
+ arm_regulator = regulator_get(NULL, "vddarm");
+ if (IS_ERR(arm_regulator))
+ return dev_err_probe(dev, PTR_ERR(arm_regulator),
+ "failed to get regulator vddarm\n");
+
+ int_regulator = regulator_get(NULL, "vddint");
+ if (IS_ERR(int_regulator)) {
+ result = dev_err_probe(dev, PTR_ERR(int_regulator),
+ "failed to get regulator vddint\n");
+ goto err_int_regulator;
+ }
+
+ np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
+ if (!np) {
+ dev_err(dev, "failed to find clock controller DT node\n");
+ result = -ENODEV;
+ goto err_clock;
+ }
+
+ clk_base = of_iomap(np, 0);
+ of_node_put(np);
+ if (!clk_base) {
+ dev_err(dev, "failed to map clock registers\n");
+ result = -EFAULT;
+ goto err_clock;
+ }
+
+ for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") {
+ id = of_alias_get_id(np, "dmc");
+ if (id < 0 || id >= ARRAY_SIZE(dmc_base)) {
+ dev_err(dev, "failed to get alias of dmc node '%pOFn'\n", np);
+ of_node_put(np);
+ result = id;
+ goto err_clk_base;
+ }
+
+ dmc_base[id] = of_iomap(np, 0);
+ if (!dmc_base[id]) {
+ dev_err(dev, "failed to map dmc%d registers\n", id);
+ of_node_put(np);
+ result = -EFAULT;
+ goto err_dmc;
+ }
+ }
+
+ for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) {
+ if (!dmc_base[id]) {
+ dev_err(dev, "failed to find dmc%d node\n", id);
+ result = -ENODEV;
+ goto err_dmc;
+ }
+ }
+
+ register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
+
+ return cpufreq_register_driver(&s5pv210_driver);
+
+err_dmc:
+ for (id = 0; id < ARRAY_SIZE(dmc_base); ++id)
+ if (dmc_base[id]) {
+ iounmap(dmc_base[id]);
+ dmc_base[id] = NULL;
+ }
+
+err_clk_base:
+ iounmap(clk_base);
+
+err_clock:
+ regulator_put(int_regulator);
+
+err_int_regulator:
+ regulator_put(arm_regulator);
+
+ return result;
+}
+
+static struct platform_driver s5pv210_cpufreq_platdrv = {
+ .driver = {
+ .name = "s5pv210-cpufreq",
+ },
+ .probe = s5pv210_cpufreq_probe,
+};
+builtin_platform_driver(s5pv210_cpufreq_platdrv);
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
new file mode 100644
index 000000000..252b9fc26
--- /dev/null
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * cpu-sa1100.c: clock scaling for the SA1100
+ *
+ * Copyright (C) 2000 2001, The Delft University of Technology
+ *
+ * Authors:
+ * - Johan Pouwelse (J.A.Pouwelse@its.tudelft.nl): initial version
+ * - Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
+ * - major rewrite for linux-2.3.99
+ * - rewritten for the more generic power management scheme in
+ * linux-2.4.5-rmk1
+ *
+ * This software has been developed while working on the LART
+ * computing board (http://www.lartmaker.nl/), which is
+ * sponsored by the Mobile Multi-media Communications
+ * (http://www.mobimedia.org/) and Ubiquitous Communications
+ * (http://www.ubicom.tudelft.nl/) projects.
+ *
+ * The authors can be reached at:
+ *
+ * Erik Mouw
+ * Information and Communication Theory Group
+ * Faculty of Information Technology and Systems
+ * Delft University of Technology
+ * P.O. Box 5031
+ * 2600 GA Delft
+ * The Netherlands
+ *
+ * Theory of operations
+ * ====================
+ *
+ * Clock scaling can be used to lower the power consumption of the CPU
+ * core. This will give you a somewhat longer running time.
+ *
+ * The SA-1100 has a single register to change the core clock speed:
+ *
+ * PPCR 0x90020014 PLL config
+ *
+ * However, the DRAM timings are closely related to the core clock
+ * speed, so we need to change these, too. The used registers are:
+ *
+ * MDCNFG 0xA0000000 DRAM config
+ * MDCAS0 0xA0000004 Access waveform
+ * MDCAS1 0xA0000008 Access waveform
+ * MDCAS2 0xA000000C Access waveform
+ *
+ * Care must be taken to change the DRAM parameters the correct way,
+ * because otherwise the DRAM becomes unusable and the kernel will
+ * crash.
+ *
+ * The simple solution to avoid a kernel crash is to put the actual
+ * clock change in ROM and jump to that code from the kernel. The main
+ * disadvantage is that the ROM has to be modified, which is not
+ * possible on all SA-1100 platforms. Another disadvantage is that
+ * jumping to ROM makes clock switching unnecessary complicated.
+ *
+ * The idea behind this driver is that the memory configuration can be
+ * changed while running from DRAM (even with interrupts turned on!)
+ * as long as all re-configuration steps yield a valid DRAM
+ * configuration. The advantages are clear: it will run on all SA-1100
+ * platforms, and the code is very simple.
+ *
+ * If you really want to understand what is going on in
+ * sa1100_update_dram_timings(), you'll have to read sections 8.2,
+ * 9.5.7.3, and 10.2 from the "Intel StrongARM SA-1100 Microprocessor
+ * Developers Manual" (available for free from Intel).
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/io.h>
+
+#include <asm/cputype.h>
+
+#include <mach/generic.h>
+#include <mach/hardware.h>
+
+struct sa1100_dram_regs {
+ int speed;
+ u32 mdcnfg;
+ u32 mdcas0;
+ u32 mdcas1;
+ u32 mdcas2;
+};
+
+
+static struct cpufreq_driver sa1100_driver;
+
+static struct sa1100_dram_regs sa1100_dram_settings[] = {
+ /*speed, mdcnfg, mdcas0, mdcas1, mdcas2, clock freq */
+ { 59000, 0x00dc88a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 59.0 MHz */
+ { 73700, 0x011490a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 73.7 MHz */
+ { 88500, 0x014e90a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 88.5 MHz */
+ {103200, 0x01889923, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 103.2 MHz */
+ {118000, 0x01c29923, 0x9999998f, 0xfffffff9, 0xffffffff},/* 118.0 MHz */
+ {132700, 0x01fb2123, 0x9999998f, 0xfffffff9, 0xffffffff},/* 132.7 MHz */
+ {147500, 0x02352123, 0x3333330f, 0xfffffff3, 0xffffffff},/* 147.5 MHz */
+ {162200, 0x026b29a3, 0x38e38e1f, 0xfff8e38e, 0xffffffff},/* 162.2 MHz */
+ {176900, 0x02a329a3, 0x71c71c1f, 0xfff1c71c, 0xffffffff},/* 176.9 MHz */
+ {191700, 0x02dd31a3, 0xe38e383f, 0xffe38e38, 0xffffffff},/* 191.7 MHz */
+ {206400, 0x03153223, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 206.4 MHz */
+ {221200, 0x034fba23, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 221.2 MHz */
+ {235900, 0x03853a23, 0xe1e1e07f, 0xe1e1e1e1, 0xffffffe1},/* 235.9 MHz */
+ {250700, 0x03bf3aa3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 250.7 MHz */
+ {265400, 0x03f7c2a3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 265.4 MHz */
+ {280200, 0x0431c2a3, 0x878780ff, 0x87878787, 0xffffff87},/* 280.2 MHz */
+ { 0, 0, 0, 0, 0 } /* last entry */
+};
+
+static void sa1100_update_dram_timings(int current_speed, int new_speed)
+{
+ struct sa1100_dram_regs *settings = sa1100_dram_settings;
+
+ /* find speed */
+ while (settings->speed != 0) {
+ if (new_speed == settings->speed)
+ break;
+
+ settings++;
+ }
+
+ if (settings->speed == 0) {
+ panic("%s: couldn't find dram setting for speed %d\n",
+ __func__, new_speed);
+ }
+
+ /* No risk, no fun: run with interrupts on! */
+ if (new_speed > current_speed) {
+ /* We're going FASTER, so first relax the memory
+ * timings before changing the core frequency
+ */
+
+ /* Half the memory access clock */
+ MDCNFG |= MDCNFG_CDB2;
+
+ /* The order of these statements IS important, keep 8
+ * pulses!!
+ */
+ MDCAS2 = settings->mdcas2;
+ MDCAS1 = settings->mdcas1;
+ MDCAS0 = settings->mdcas0;
+ MDCNFG = settings->mdcnfg;
+ } else {
+ /* We're going SLOWER: first decrease the core
+ * frequency and then tighten the memory settings.
+ */
+
+ /* Half the memory access clock */
+ MDCNFG |= MDCNFG_CDB2;
+
+ /* The order of these statements IS important, keep 8
+ * pulses!!
+ */
+ MDCAS0 = settings->mdcas0;
+ MDCAS1 = settings->mdcas1;
+ MDCAS2 = settings->mdcas2;
+ MDCNFG = settings->mdcnfg;
+ }
+}
+
+static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr)
+{
+ unsigned int cur = sa11x0_getspeed(0);
+ unsigned int new_freq;
+
+ new_freq = sa11x0_freq_table[ppcr].frequency;
+
+ if (new_freq > cur)
+ sa1100_update_dram_timings(cur, new_freq);
+
+ PPCR = ppcr;
+
+ if (new_freq < cur)
+ sa1100_update_dram_timings(cur, new_freq);
+
+ return 0;
+}
+
+static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
+{
+ cpufreq_generic_init(policy, sa11x0_freq_table, 0);
+ return 0;
+}
+
+static struct cpufreq_driver sa1100_driver __refdata = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sa1100_target,
+ .get = sa11x0_getspeed,
+ .init = sa1100_cpu_init,
+ .name = "sa1100",
+};
+
+static int __init sa1100_dram_init(void)
+{
+ if (cpu_is_sa1100())
+ return cpufreq_register_driver(&sa1100_driver);
+ else
+ return -ENODEV;
+}
+
+arch_initcall(sa1100_dram_init);
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
new file mode 100644
index 000000000..1a83c8678
--- /dev/null
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/arch/arm/mach-sa1100/cpu-sa1110.c
+ *
+ * Copyright (C) 2001 Russell King
+ *
+ * Note: there are two erratas that apply to the SA1110 here:
+ * 7 - SDRAM auto-power-up failure (rev A0)
+ * 13 - Corruption of internal register reads/writes following
+ * SDRAM reads (rev A0, B0, B1)
+ *
+ * We ignore rev. A0 and B0 devices; I don't think they're worth supporting.
+ *
+ * The SDRAM type can be passed on the command line as cpu_sa1110.sdram=type
+ */
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+
+#include <asm/cputype.h>
+#include <asm/mach-types.h>
+
+#include <mach/generic.h>
+#include <mach/hardware.h>
+
+#undef DEBUG
+
+struct sdram_params {
+ const char name[20];
+ u_char rows; /* bits */
+ u_char cas_latency; /* cycles */
+ u_char tck; /* clock cycle time (ns) */
+ u_char trcd; /* activate to r/w (ns) */
+ u_char trp; /* precharge to activate (ns) */
+ u_char twr; /* write recovery time (ns) */
+ u_short refresh; /* refresh time for array (us) */
+};
+
+struct sdram_info {
+ u_int mdcnfg;
+ u_int mdrefr;
+ u_int mdcas[3];
+};
+
+static struct sdram_params sdram_tbl[] __initdata = {
+ { /* Toshiba TC59SM716 CL2 */
+ .name = "TC59SM716-CL2",
+ .rows = 12,
+ .tck = 10,
+ .trcd = 20,
+ .trp = 20,
+ .twr = 10,
+ .refresh = 64000,
+ .cas_latency = 2,
+ }, { /* Toshiba TC59SM716 CL3 */
+ .name = "TC59SM716-CL3",
+ .rows = 12,
+ .tck = 8,
+ .trcd = 20,
+ .trp = 20,
+ .twr = 8,
+ .refresh = 64000,
+ .cas_latency = 3,
+ }, { /* Samsung K4S641632D TC75 */
+ .name = "K4S641632D",
+ .rows = 14,
+ .tck = 9,
+ .trcd = 27,
+ .trp = 20,
+ .twr = 9,
+ .refresh = 64000,
+ .cas_latency = 3,
+ }, { /* Samsung K4S281632B-1H */
+ .name = "K4S281632B-1H",
+ .rows = 12,
+ .tck = 10,
+ .trp = 20,
+ .twr = 10,
+ .refresh = 64000,
+ .cas_latency = 3,
+ }, { /* Samsung KM416S4030CT */
+ .name = "KM416S4030CT",
+ .rows = 13,
+ .tck = 8,
+ .trcd = 24, /* 3 CLKs */
+ .trp = 24, /* 3 CLKs */
+ .twr = 16, /* Trdl: 2 CLKs */
+ .refresh = 64000,
+ .cas_latency = 3,
+ }, { /* Winbond W982516AH75L CL3 */
+ .name = "W982516AH75L",
+ .rows = 16,
+ .tck = 8,
+ .trcd = 20,
+ .trp = 20,
+ .twr = 8,
+ .refresh = 64000,
+ .cas_latency = 3,
+ }, { /* Micron MT48LC8M16A2TG-75 */
+ .name = "MT48LC8M16A2TG-75",
+ .rows = 12,
+ .tck = 8,
+ .trcd = 20,
+ .trp = 20,
+ .twr = 8,
+ .refresh = 64000,
+ .cas_latency = 3,
+ },
+};
+
+static struct sdram_params sdram_params;
+
+/*
+ * Given a period in ns and frequency in khz, calculate the number of
+ * cycles of frequency in period. Note that we round up to the next
+ * cycle, even if we are only slightly over.
+ */
+static inline u_int ns_to_cycles(u_int ns, u_int khz)
+{
+ return (ns * khz + 999999) / 1000000;
+}
+
+/*
+ * Create the MDCAS register bit pattern.
+ */
+static inline void set_mdcas(u_int *mdcas, int delayed, u_int rcd)
+{
+ u_int shift;
+
+ rcd = 2 * rcd - 1;
+ shift = delayed + 1 + rcd;
+
+ mdcas[0] = (1 << rcd) - 1;
+ mdcas[0] |= 0x55555555 << shift;
+ mdcas[1] = mdcas[2] = 0x55555555 << (shift & 1);
+}
+
+static void
+sdram_calculate_timing(struct sdram_info *sd, u_int cpu_khz,
+ struct sdram_params *sdram)
+{
+ u_int mem_khz, sd_khz, trp, twr;
+
+ mem_khz = cpu_khz / 2;
+ sd_khz = mem_khz;
+
+ /*
+ * If SDCLK would invalidate the SDRAM timings,
+ * run SDCLK at half speed.
+ *
+ * CPU steppings prior to B2 must either run the memory at
+ * half speed or use delayed read latching (errata 13).
+ */
+ if ((ns_to_cycles(sdram->tck, sd_khz) > 1) ||
+ (read_cpuid_revision() < ARM_CPU_REV_SA1110_B2 && sd_khz < 62000))
+ sd_khz /= 2;
+
+ sd->mdcnfg = MDCNFG & 0x007f007f;
+
+ twr = ns_to_cycles(sdram->twr, mem_khz);
+
+ /* trp should always be >1 */
+ trp = ns_to_cycles(sdram->trp, mem_khz) - 1;
+ if (trp < 1)
+ trp = 1;
+
+ sd->mdcnfg |= trp << 8;
+ sd->mdcnfg |= trp << 24;
+ sd->mdcnfg |= sdram->cas_latency << 12;
+ sd->mdcnfg |= sdram->cas_latency << 28;
+ sd->mdcnfg |= twr << 14;
+ sd->mdcnfg |= twr << 30;
+
+ sd->mdrefr = MDREFR & 0xffbffff0;
+ sd->mdrefr |= 7;
+
+ if (sd_khz != mem_khz)
+ sd->mdrefr |= MDREFR_K1DB2;
+
+ /* initial number of '1's in MDCAS + 1 */
+ set_mdcas(sd->mdcas, sd_khz >= 62000,
+ ns_to_cycles(sdram->trcd, mem_khz));
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "MDCNFG: %08x MDREFR: %08x MDCAS0: %08x MDCAS1: %08x MDCAS2: %08x\n",
+ sd->mdcnfg, sd->mdrefr, sd->mdcas[0], sd->mdcas[1],
+ sd->mdcas[2]);
+#endif
+}
+
+/*
+ * Set the SDRAM refresh rate.
+ */
+static inline void sdram_set_refresh(u_int dri)
+{
+ MDREFR = (MDREFR & 0xffff000f) | (dri << 4);
+ (void) MDREFR;
+}
+
+/*
+ * Update the refresh period. We do this such that we always refresh
+ * the SDRAMs within their permissible period. The refresh period is
+ * always a multiple of the memory clock (fixed at cpu_clock / 2).
+ *
+ * FIXME: we don't currently take account of burst accesses here,
+ * but neither do Intels DM nor Angel.
+ */
+static void
+sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram)
+{
+ u_int ns_row = (sdram->refresh * 1000) >> sdram->rows;
+ u_int dri = ns_to_cycles(ns_row, cpu_khz / 2) / 32;
+
+#ifdef DEBUG
+ mdelay(250);
+ printk(KERN_DEBUG "new dri value = %d\n", dri);
+#endif
+
+ sdram_set_refresh(dri);
+}
+
+/*
+ * Ok, set the CPU frequency.
+ */
+static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr)
+{
+ struct sdram_params *sdram = &sdram_params;
+ struct sdram_info sd;
+ unsigned long flags;
+ unsigned int unused;
+
+ sdram_calculate_timing(&sd, sa11x0_freq_table[ppcr].frequency, sdram);
+
+#if 0
+ /*
+ * These values are wrong according to the SA1110 documentation
+ * and errata, but they seem to work. Need to get a storage
+ * scope on to the SDRAM signals to work out why.
+ */
+ if (policy->max < 147500) {
+ sd.mdrefr |= MDREFR_K1DB2;
+ sd.mdcas[0] = 0xaaaaaa7f;
+ } else {
+ sd.mdrefr &= ~MDREFR_K1DB2;
+ sd.mdcas[0] = 0xaaaaaa9f;
+ }
+ sd.mdcas[1] = 0xaaaaaaaa;
+ sd.mdcas[2] = 0xaaaaaaaa;
+#endif
+
+ /*
+ * The clock could be going away for some time. Set the SDRAMs
+ * to refresh rapidly (every 64 memory clock cycles). To get
+ * through the whole array, we need to wait 262144 mclk cycles.
+ * We wait 20ms to be safe.
+ */
+ sdram_set_refresh(2);
+ if (!irqs_disabled())
+ msleep(20);
+ else
+ mdelay(20);
+
+ /*
+ * Reprogram the DRAM timings with interrupts disabled, and
+ * ensure that we are doing this within a complete cache line.
+ * This means that we won't access SDRAM for the duration of
+ * the programming.
+ */
+ local_irq_save(flags);
+ asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
+ udelay(10);
+ __asm__ __volatile__("\n\
+ b 2f \n\
+ .align 5 \n\
+1: str %3, [%1, #0] @ MDCNFG \n\
+ str %4, [%1, #28] @ MDREFR \n\
+ str %5, [%1, #4] @ MDCAS0 \n\
+ str %6, [%1, #8] @ MDCAS1 \n\
+ str %7, [%1, #12] @ MDCAS2 \n\
+ str %8, [%2, #0] @ PPCR \n\
+ ldr %0, [%1, #0] \n\
+ b 3f \n\
+2: b 1b \n\
+3: nop \n\
+ nop"
+ : "=&r" (unused)
+ : "r" (&MDCNFG), "r" (&PPCR), "0" (sd.mdcnfg),
+ "r" (sd.mdrefr), "r" (sd.mdcas[0]),
+ "r" (sd.mdcas[1]), "r" (sd.mdcas[2]), "r" (ppcr));
+ local_irq_restore(flags);
+
+ /*
+ * Now, return the SDRAM refresh back to normal.
+ */
+ sdram_update_refresh(sa11x0_freq_table[ppcr].frequency, sdram);
+
+ return 0;
+}
+
+static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
+{
+ cpufreq_generic_init(policy, sa11x0_freq_table, 0);
+ return 0;
+}
+
+/* sa1110_driver needs __refdata because it must remain after init registers
+ * it with cpufreq_register_driver() */
+static struct cpufreq_driver sa1110_driver __refdata = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sa1110_target,
+ .get = sa11x0_getspeed,
+ .init = sa1110_cpu_init,
+ .name = "sa1110",
+};
+
+static struct sdram_params *sa1110_find_sdram(const char *name)
+{
+ struct sdram_params *sdram;
+
+ for (sdram = sdram_tbl; sdram < sdram_tbl + ARRAY_SIZE(sdram_tbl);
+ sdram++)
+ if (strcmp(name, sdram->name) == 0)
+ return sdram;
+
+ return NULL;
+}
+
+static char sdram_name[16];
+
+static int __init sa1110_clk_init(void)
+{
+ struct sdram_params *sdram;
+ const char *name = sdram_name;
+
+ if (!cpu_is_sa1110())
+ return -ENODEV;
+
+ if (!name[0]) {
+ if (machine_is_assabet())
+ name = "TC59SM716-CL3";
+ if (machine_is_pt_system3())
+ name = "K4S641632D";
+ if (machine_is_h3100())
+ name = "KM416S4030CT";
+ if (machine_is_jornada720() || machine_is_h3600())
+ name = "K4S281632B-1H";
+ if (machine_is_nanoengine())
+ name = "MT48LC8M16A2TG-75";
+ }
+
+ sdram = sa1110_find_sdram(name);
+ if (sdram) {
+ printk(KERN_DEBUG "SDRAM: tck: %d trcd: %d trp: %d"
+ " twr: %d refresh: %d cas_latency: %d\n",
+ sdram->tck, sdram->trcd, sdram->trp,
+ sdram->twr, sdram->refresh, sdram->cas_latency);
+
+ memcpy(&sdram_params, sdram, sizeof(sdram_params));
+
+ return cpufreq_register_driver(&sa1110_driver);
+ }
+
+ return 0;
+}
+
+module_param_string(sdram, sdram_name, sizeof(sdram_name), 0);
+arch_initcall(sa1110_clk_init);
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
new file mode 100644
index 000000000..330c8d6cf
--- /dev/null
+++ b/drivers/cpufreq/sc520_freq.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * sc520_freq.c: cpufreq driver for the AMD Elan sc520
+ *
+ * Copyright (C) 2005 Sean Young <sean@mess.org>
+ *
+ * Based on elanfreq.c
+ *
+ * 2005-03-30: - initial revision
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/delay.h>
+#include <linux/cpufreq.h>
+#include <linux/timex.h>
+#include <linux/io.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
+
+#define MMCR_BASE 0xfffef000 /* The default base address */
+#define OFFS_CPUCTL 0x2 /* CPU Control Register */
+
+static __u8 __iomem *cpuctl;
+
+static struct cpufreq_frequency_table sc520_freq_table[] = {
+ {0, 0x01, 100000},
+ {0, 0x02, 133000},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
+{
+ u8 clockspeed_reg = *cpuctl;
+
+ switch (clockspeed_reg & 0x03) {
+ default:
+ pr_err("error: cpuctl register has unexpected value %02x\n",
+ clockspeed_reg);
+ fallthrough;
+ case 0x01:
+ return 100000;
+ case 0x02:
+ return 133000;
+ }
+}
+
+static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int state)
+{
+
+ u8 clockspeed_reg;
+
+ local_irq_disable();
+
+ clockspeed_reg = *cpuctl & ~0x03;
+ *cpuctl = clockspeed_reg | sc520_freq_table[state].driver_data;
+
+ local_irq_enable();
+
+ return 0;
+}
+
+/*
+ * Module init and exit code
+ */
+
+static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ /* capability check */
+ if (c->x86_vendor != X86_VENDOR_AMD ||
+ c->x86 != 4 || c->x86_model != 9)
+ return -ENODEV;
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.transition_latency = 1000000; /* 1ms */
+ policy->freq_table = sc520_freq_table;
+
+ return 0;
+}
+
+
+static struct cpufreq_driver sc520_freq_driver = {
+ .get = sc520_freq_get_cpu_frequency,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sc520_freq_target,
+ .init = sc520_freq_cpu_init,
+ .name = "sc520_freq",
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct x86_cpu_id sc520_ids[] = {
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 4, 9, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, sc520_ids);
+
+static int __init sc520_freq_init(void)
+{
+ int err;
+
+ if (!x86_match_cpu(sc520_ids))
+ return -ENODEV;
+
+ cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1);
+ if (!cpuctl) {
+ pr_err("sc520_freq: error: failed to remap memory\n");
+ return -ENOMEM;
+ }
+
+ err = cpufreq_register_driver(&sc520_freq_driver);
+ if (err)
+ iounmap(cpuctl);
+
+ return err;
+}
+
+
+static void __exit sc520_freq_exit(void)
+{
+ cpufreq_unregister_driver(&sc520_freq_driver);
+ iounmap(cpuctl);
+}
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sean Young <sean@mess.org>");
+MODULE_DESCRIPTION("cpufreq driver for AMD's Elan sc520 CPU");
+
+module_init(sc520_freq_init);
+module_exit(sc520_freq_exit);
+
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
new file mode 100644
index 000000000..028df8a5f
--- /dev/null
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Power Interface (SCMI) based CPUFreq Interface driver
+ *
+ * Copyright (C) 2018-2021 ARM Ltd.
+ * Sudeep Holla <sudeep.holla@arm.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/energy_model.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/scmi_protocol.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+struct scmi_data {
+ int domain_id;
+ int nr_opp;
+ struct device *cpu_dev;
+ cpumask_var_t opp_shared_cpus;
+};
+
+static struct scmi_protocol_handle *ph;
+static const struct scmi_perf_proto_ops *perf_ops;
+
+static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+ struct scmi_data *priv = policy->driver_data;
+ unsigned long rate;
+ int ret;
+
+ ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
+ if (ret)
+ return 0;
+ return rate / 1000;
+}
+
+/*
+ * perf_ops->freq_set is not a synchronous, the actual OPP change will
+ * happen asynchronously and can get notified if the events are
+ * subscribed for by the SCMI firmware
+ */
+static int
+scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct scmi_data *priv = policy->driver_data;
+ u64 freq = policy->freq_table[index].frequency;
+
+ return perf_ops->freq_set(ph, priv->domain_id, freq * 1000, false);
+}
+
+static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct scmi_data *priv = policy->driver_data;
+
+ if (!perf_ops->freq_set(ph, priv->domain_id,
+ target_freq * 1000, true))
+ return target_freq;
+
+ return 0;
+}
+
+static int
+scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
+{
+ int cpu, domain, tdomain;
+ struct device *tcpu_dev;
+
+ domain = perf_ops->device_domain_id(cpu_dev);
+ if (domain < 0)
+ return domain;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ tcpu_dev = get_cpu_device(cpu);
+ if (!tcpu_dev)
+ continue;
+
+ tdomain = perf_ops->device_domain_id(tcpu_dev);
+ if (tdomain == domain)
+ cpumask_set_cpu(cpu, cpumask);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused
+scmi_get_cpu_power(struct device *cpu_dev, unsigned long *power,
+ unsigned long *KHz)
+{
+ enum scmi_power_scale power_scale = perf_ops->power_scale_get(ph);
+ unsigned long Hz;
+ int ret, domain;
+
+ domain = perf_ops->device_domain_id(cpu_dev);
+ if (domain < 0)
+ return domain;
+
+ /* Get the power cost of the performance domain. */
+ Hz = *KHz * 1000;
+ ret = perf_ops->est_power_get(ph, domain, &Hz, power);
+ if (ret)
+ return ret;
+
+ /* Convert the power to uW if it is mW (ignore bogoW) */
+ if (power_scale == SCMI_POWER_MILLIWATTS)
+ *power *= MICROWATT_PER_MILLIWATT;
+
+ /* The EM framework specifies the frequency in KHz. */
+ *KHz = Hz / 1000;
+
+ return 0;
+}
+
+static int scmi_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int ret, nr_opp;
+ unsigned int latency;
+ struct device *cpu_dev;
+ struct scmi_data *priv;
+ struct cpufreq_frequency_table *freq_table;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", policy->cpu);
+ return -ENODEV;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (!zalloc_cpumask_var(&priv->opp_shared_cpus, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out_free_priv;
+ }
+
+ /* Obtain CPUs that share SCMI performance controls */
+ ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to get sharing cpumask\n");
+ goto out_free_cpumask;
+ }
+
+ /*
+ * Obtain CPUs that share performance levels.
+ * The OPP 'sharing cpus' info may come from DT through an empty opp
+ * table and opp-shared.
+ */
+ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
+ if (ret || cpumask_empty(priv->opp_shared_cpus)) {
+ /*
+ * Either opp-table is not set or no opp-shared was found.
+ * Use the CPU mask from SCMI to designate CPUs sharing an OPP
+ * table.
+ */
+ cpumask_copy(priv->opp_shared_cpus, policy->cpus);
+ }
+
+ /*
+ * A previous CPU may have marked OPPs as shared for a few CPUs, based on
+ * what OPP core provided. If the current CPU is part of those few, then
+ * there is no need to add OPPs again.
+ */
+ nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
+ if (nr_opp <= 0) {
+ ret = perf_ops->device_opps_add(ph, cpu_dev);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to add opps to the device\n");
+ goto out_free_cpumask;
+ }
+
+ nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
+ if (nr_opp <= 0) {
+ dev_err(cpu_dev, "%s: No OPPs for this device: %d\n",
+ __func__, nr_opp);
+
+ ret = -ENODEV;
+ goto out_free_opp;
+ }
+
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
+ if (ret) {
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+ __func__, ret);
+
+ goto out_free_opp;
+ }
+
+ priv->nr_opp = nr_opp;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto out_free_opp;
+ }
+
+ priv->cpu_dev = cpu_dev;
+ priv->domain_id = perf_ops->device_domain_id(cpu_dev);
+
+ policy->driver_data = priv;
+ policy->freq_table = freq_table;
+
+ /* SCMI allows DVFS request for any domain from any CPU */
+ policy->dvfs_possible_from_any_cpu = true;
+
+ latency = perf_ops->transition_latency_get(ph, cpu_dev);
+ if (!latency)
+ latency = CPUFREQ_ETERNAL;
+
+ policy->cpuinfo.transition_latency = latency;
+
+ policy->fast_switch_possible =
+ perf_ops->fast_switch_possible(ph, cpu_dev);
+
+ return 0;
+
+out_free_opp:
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
+
+out_free_cpumask:
+ free_cpumask_var(priv->opp_shared_cpus);
+
+out_free_priv:
+ kfree(priv);
+
+ return ret;
+}
+
+static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct scmi_data *priv = policy->driver_data;
+
+ dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+ dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+ free_cpumask_var(priv->opp_shared_cpus);
+ kfree(priv);
+
+ return 0;
+}
+
+static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
+{
+ struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
+ enum scmi_power_scale power_scale = perf_ops->power_scale_get(ph);
+ struct scmi_data *priv = policy->driver_data;
+ bool em_power_scale = false;
+
+ /*
+ * This callback will be called for each policy, but we don't need to
+ * register with EM every time. Despite not being part of the same
+ * policy, some CPUs may still share their perf-domains, and a CPU from
+ * another policy may already have registered with EM on behalf of CPUs
+ * of this policy.
+ */
+ if (!priv->nr_opp)
+ return;
+
+ if (power_scale == SCMI_POWER_MILLIWATTS
+ || power_scale == SCMI_POWER_MICROWATTS)
+ em_power_scale = true;
+
+ em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp,
+ &em_cb, priv->opp_shared_cpus,
+ em_power_scale);
+}
+
+static struct cpufreq_driver scmi_cpufreq_driver = {
+ .name = "scmi",
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_IS_COOLING_DEV,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .attr = cpufreq_generic_attr,
+ .target_index = scmi_cpufreq_set_target,
+ .fast_switch = scmi_cpufreq_fast_switch,
+ .get = scmi_cpufreq_get_rate,
+ .init = scmi_cpufreq_init,
+ .exit = scmi_cpufreq_exit,
+ .register_em = scmi_cpufreq_register_em,
+};
+
+static int scmi_cpufreq_probe(struct scmi_device *sdev)
+{
+ int ret;
+ struct device *dev = &sdev->dev;
+ const struct scmi_handle *handle;
+
+ handle = sdev->handle;
+
+ if (!handle)
+ return -ENODEV;
+
+ perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph);
+ if (IS_ERR(perf_ops))
+ return PTR_ERR(perf_ops);
+
+#ifdef CONFIG_COMMON_CLK
+ /* dummy clock provider as needed by OPP if clocks property is used */
+ if (of_property_present(dev->of_node, "#clock-cells")) {
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "%s: registering clock provider failed\n", __func__);
+ }
+#endif
+
+ ret = cpufreq_register_driver(&scmi_cpufreq_driver);
+ if (ret) {
+ dev_err(dev, "%s: registering cpufreq failed, err: %d\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+static void scmi_cpufreq_remove(struct scmi_device *sdev)
+{
+ cpufreq_unregister_driver(&scmi_cpufreq_driver);
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_PERF, "cpufreq" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_cpufreq_drv = {
+ .name = "scmi-cpufreq",
+ .probe = scmi_cpufreq_probe,
+ .remove = scmi_cpufreq_remove,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_cpufreq_drv);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCMI CPUFreq interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
new file mode 100644
index 000000000..fd2c16821
--- /dev/null
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * System Control and Power Interface (SCPI) based CPUFreq Interface driver
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ * Sudeep Holla <sudeep.holla@arm.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/scpi_protocol.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+struct scpi_data {
+ struct clk *clk;
+ struct device *cpu_dev;
+};
+
+static struct scpi_ops *scpi_ops;
+
+static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+ struct scpi_data *priv = policy->driver_data;
+ unsigned long rate = clk_get_rate(priv->clk);
+
+ return rate / 1000;
+}
+
+static int
+scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ u64 rate = policy->freq_table[index].frequency * 1000;
+ struct scpi_data *priv = policy->driver_data;
+ int ret;
+
+ ret = clk_set_rate(priv->clk, rate);
+
+ if (ret)
+ return ret;
+
+ if (clk_get_rate(priv->clk) != rate)
+ return -EIO;
+
+ return 0;
+}
+
+static int
+scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
+{
+ int cpu, domain, tdomain;
+ struct device *tcpu_dev;
+
+ domain = scpi_ops->device_domain_id(cpu_dev);
+ if (domain < 0)
+ return domain;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ tcpu_dev = get_cpu_device(cpu);
+ if (!tcpu_dev)
+ continue;
+
+ tdomain = scpi_ops->device_domain_id(tcpu_dev);
+ if (tdomain == domain)
+ cpumask_set_cpu(cpu, cpumask);
+ }
+
+ return 0;
+}
+
+static int scpi_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int ret;
+ unsigned int latency;
+ struct device *cpu_dev;
+ struct scpi_data *priv;
+ struct cpufreq_frequency_table *freq_table;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", policy->cpu);
+ return -ENODEV;
+ }
+
+ ret = scpi_ops->add_opps_to_device(cpu_dev);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to add opps to the device\n");
+ return ret;
+ }
+
+ ret = scpi_get_sharing_cpus(cpu_dev, policy->cpus);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to get sharing cpumask\n");
+ return ret;
+ }
+
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
+ if (ret) {
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
+ ret = -EPROBE_DEFER;
+ goto out_free_opp;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_free_opp;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto out_free_priv;
+ }
+
+ priv->cpu_dev = cpu_dev;
+ priv->clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d\n",
+ __func__, cpu_dev->id);
+ ret = PTR_ERR(priv->clk);
+ goto out_free_cpufreq_table;
+ }
+
+ policy->driver_data = priv;
+ policy->freq_table = freq_table;
+
+ /* scpi allows DVFS request for any domain from any CPU */
+ policy->dvfs_possible_from_any_cpu = true;
+
+ latency = scpi_ops->get_transition_latency(cpu_dev);
+ if (!latency)
+ latency = CPUFREQ_ETERNAL;
+
+ policy->cpuinfo.transition_latency = latency;
+
+ policy->fast_switch_possible = false;
+
+ return 0;
+
+out_free_cpufreq_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_free_priv:
+ kfree(priv);
+out_free_opp:
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
+
+ return ret;
+}
+
+static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct scpi_data *priv = policy->driver_data;
+
+ clk_put(priv->clk);
+ dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+ dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+ kfree(priv);
+
+ return 0;
+}
+
+static struct cpufreq_driver scpi_cpufreq_driver = {
+ .name = "scpi-cpufreq",
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_IS_COOLING_DEV,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .attr = cpufreq_generic_attr,
+ .get = scpi_cpufreq_get_rate,
+ .init = scpi_cpufreq_init,
+ .exit = scpi_cpufreq_exit,
+ .target_index = scpi_cpufreq_set_target,
+ .register_em = cpufreq_register_em_with_opp,
+};
+
+static int scpi_cpufreq_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ scpi_ops = get_scpi_ops();
+ if (!scpi_ops)
+ return -EIO;
+
+ ret = cpufreq_register_driver(&scpi_cpufreq_driver);
+ if (ret)
+ dev_err(&pdev->dev, "%s: registering cpufreq failed, err: %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static int scpi_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&scpi_cpufreq_driver);
+ scpi_ops = NULL;
+ return 0;
+}
+
+static struct platform_driver scpi_cpufreq_platdrv = {
+ .driver = {
+ .name = "scpi-cpufreq",
+ },
+ .probe = scpi_cpufreq_probe,
+ .remove = scpi_cpufreq_remove,
+};
+module_platform_driver(scpi_cpufreq_platdrv);
+
+MODULE_ALIAS("platform:scpi-cpufreq");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCPI CPUFreq interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
new file mode 100644
index 000000000..b8704232c
--- /dev/null
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -0,0 +1,175 @@
+/*
+ * cpufreq driver for the SuperH processors.
+ *
+ * Copyright (C) 2002 - 2012 Paul Mundt
+ * Copyright (C) 2002 M. R. Brown
+ *
+ * Clock framework bits from arch/avr32/mach-at32ap/cpufreq.c
+ *
+ * Copyright (C) 2004-2007 Atmel Corporation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) "cpufreq: " fmt
+
+#include <linux/types.h>
+#include <linux/cpufreq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/cpumask.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/clk.h>
+#include <linux/percpu.h>
+#include <linux/sh_clk.h>
+
+static DEFINE_PER_CPU(struct clk, sh_cpuclk);
+
+struct cpufreq_target {
+ struct cpufreq_policy *policy;
+ unsigned int freq;
+};
+
+static unsigned int sh_cpufreq_get(unsigned int cpu)
+{
+ return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
+}
+
+static long __sh_cpufreq_target(void *arg)
+{
+ struct cpufreq_target *target = arg;
+ struct cpufreq_policy *policy = target->policy;
+ int cpu = policy->cpu;
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
+ struct cpufreq_freqs freqs;
+ struct device *dev;
+ long freq;
+
+ if (smp_processor_id() != cpu)
+ return -ENODEV;
+
+ dev = get_cpu_device(cpu);
+
+ /* Convert target_freq from kHz to Hz */
+ freq = clk_round_rate(cpuclk, target->freq * 1000);
+
+ if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
+ return -EINVAL;
+
+ dev_dbg(dev, "requested frequency %u Hz\n", target->freq * 1000);
+
+ freqs.old = sh_cpufreq_get(cpu);
+ freqs.new = (freq + 500) / 1000;
+ freqs.flags = 0;
+
+ cpufreq_freq_transition_begin(target->policy, &freqs);
+ clk_set_rate(cpuclk, freq);
+ cpufreq_freq_transition_end(target->policy, &freqs, 0);
+
+ dev_dbg(dev, "set frequency %lu Hz\n", freq);
+ return 0;
+}
+
+/*
+ * Here we notify other drivers of the proposed change and the final change.
+ */
+static int sh_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_target data = { .policy = policy, .freq = target_freq };
+
+ return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data);
+}
+
+static int sh_cpufreq_verify(struct cpufreq_policy_data *policy)
+{
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
+ struct cpufreq_frequency_table *freq_table;
+
+ freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
+ if (freq_table)
+ return cpufreq_frequency_table_verify(policy, freq_table);
+
+ cpufreq_verify_within_cpu_limits(policy);
+
+ policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000;
+ policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+
+ cpufreq_verify_within_cpu_limits(policy);
+ return 0;
+}
+
+static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
+ struct cpufreq_frequency_table *freq_table;
+ struct device *dev;
+
+ dev = get_cpu_device(cpu);
+
+ cpuclk = clk_get(dev, "cpu_clk");
+ if (IS_ERR(cpuclk)) {
+ dev_err(dev, "couldn't get CPU clk\n");
+ return PTR_ERR(cpuclk);
+ }
+
+ freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
+ if (freq_table) {
+ policy->freq_table = freq_table;
+ } else {
+ dev_notice(dev, "no frequency table found, falling back "
+ "to rate rounding.\n");
+
+ policy->min = policy->cpuinfo.min_freq =
+ (clk_round_rate(cpuclk, 1) + 500) / 1000;
+ policy->max = policy->cpuinfo.max_freq =
+ (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+ }
+
+ return 0;
+}
+
+static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
+
+ clk_put(cpuclk);
+
+ return 0;
+}
+
+static struct cpufreq_driver sh_cpufreq_driver = {
+ .name = "sh",
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
+ .get = sh_cpufreq_get,
+ .target = sh_cpufreq_target,
+ .verify = sh_cpufreq_verify,
+ .init = sh_cpufreq_cpu_init,
+ .exit = sh_cpufreq_cpu_exit,
+ .attr = cpufreq_generic_attr,
+};
+
+static int __init sh_cpufreq_module_init(void)
+{
+ pr_notice("SuperH CPU frequency driver.\n");
+ return cpufreq_register_driver(&sh_cpufreq_driver);
+}
+
+static void __exit sh_cpufreq_module_exit(void)
+{
+ cpufreq_unregister_driver(&sh_cpufreq_driver);
+}
+
+module_init(sh_cpufreq_module_init);
+module_exit(sh_cpufreq_module_exit);
+
+MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
+MODULE_DESCRIPTION("cpufreq driver for SuperH");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
new file mode 100644
index 000000000..92acbb25a
--- /dev/null
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * Many thanks to Dominik Brodowski for fixing up the cpufreq
+ * infrastructure in order to make this driver easier to implement.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/cpufreq.h>
+#include <linux/threads.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <asm/asi.h>
+#include <asm/timer.h>
+
+static struct cpufreq_driver *cpufreq_us2e_driver;
+
+struct us2e_freq_percpu_info {
+ struct cpufreq_frequency_table table[6];
+};
+
+/* Indexed by cpu number. */
+static struct us2e_freq_percpu_info *us2e_freq_table;
+
+#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
+#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
+
+/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
+ * in the ESTAR mode control register.
+ */
+#define ESTAR_MODE_DIV_1 0x0000000000000000UL
+#define ESTAR_MODE_DIV_2 0x0000000000000001UL
+#define ESTAR_MODE_DIV_4 0x0000000000000003UL
+#define ESTAR_MODE_DIV_6 0x0000000000000002UL
+#define ESTAR_MODE_DIV_8 0x0000000000000004UL
+#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
+
+#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
+#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
+#define MCTRL0_REFR_COUNT_SHIFT 8
+#define MCTRL0_REFR_INTERVAL 7800
+#define MCTRL0_REFR_CLKS_P_CNT 64
+
+static unsigned long read_hbreg(unsigned long addr)
+{
+ unsigned long ret;
+
+ __asm__ __volatile__("ldxa [%1] %2, %0"
+ : "=&r" (ret)
+ : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
+ return ret;
+}
+
+static void write_hbreg(unsigned long addr, unsigned long val)
+{
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
+ : "memory");
+ if (addr == HBIRD_ESTAR_MODE_ADDR) {
+ /* Need to wait 16 clock cycles for the PLL to lock. */
+ udelay(1);
+ }
+}
+
+static void self_refresh_ctl(int enable)
+{
+ unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+
+ if (enable)
+ mctrl |= MCTRL0_SREFRESH_ENAB;
+ else
+ mctrl &= ~MCTRL0_SREFRESH_ENAB;
+ write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
+ (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+}
+
+static void frob_mem_refresh(int cpu_slowing_down,
+ unsigned long clock_tick,
+ unsigned long old_divisor, unsigned long divisor)
+{
+ unsigned long old_refr_count, refr_count, mctrl;
+
+ refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
+ refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
+
+ mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+ old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
+ >> MCTRL0_REFR_COUNT_SHIFT;
+
+ mctrl &= ~MCTRL0_REFR_COUNT_MASK;
+ mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
+ write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
+ mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+
+ if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
+ unsigned long usecs;
+
+ /* We have to wait for both refresh counts (old
+ * and new) to go to zero.
+ */
+ usecs = (MCTRL0_REFR_CLKS_P_CNT *
+ (refr_count + old_refr_count) *
+ 1000000UL *
+ old_divisor) / clock_tick;
+ udelay(usecs + 1UL);
+ }
+}
+
+static void us2e_transition(unsigned long estar, unsigned long new_bits,
+ unsigned long clock_tick,
+ unsigned long old_divisor, unsigned long divisor)
+{
+ estar &= ~ESTAR_MODE_DIV_MASK;
+
+ /* This is based upon the state transition diagram in the IIe manual. */
+ if (old_divisor == 2 && divisor == 1) {
+ self_refresh_ctl(0);
+ write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+ frob_mem_refresh(0, clock_tick, old_divisor, divisor);
+ } else if (old_divisor == 1 && divisor == 2) {
+ frob_mem_refresh(1, clock_tick, old_divisor, divisor);
+ write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+ self_refresh_ctl(1);
+ } else if (old_divisor == 1 && divisor > 2) {
+ us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
+ 1, 2);
+ us2e_transition(estar, new_bits, clock_tick,
+ 2, divisor);
+ } else if (old_divisor > 2 && divisor == 1) {
+ us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
+ old_divisor, 2);
+ us2e_transition(estar, new_bits, clock_tick,
+ 2, divisor);
+ } else if (old_divisor < divisor) {
+ frob_mem_refresh(0, clock_tick, old_divisor, divisor);
+ write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+ } else if (old_divisor > divisor) {
+ write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+ frob_mem_refresh(1, clock_tick, old_divisor, divisor);
+ } else {
+ BUG();
+ }
+}
+
+static unsigned long index_to_estar_mode(unsigned int index)
+{
+ switch (index) {
+ case 0:
+ return ESTAR_MODE_DIV_1;
+
+ case 1:
+ return ESTAR_MODE_DIV_2;
+
+ case 2:
+ return ESTAR_MODE_DIV_4;
+
+ case 3:
+ return ESTAR_MODE_DIV_6;
+
+ case 4:
+ return ESTAR_MODE_DIV_8;
+
+ default:
+ BUG();
+ }
+}
+
+static unsigned long index_to_divisor(unsigned int index)
+{
+ switch (index) {
+ case 0:
+ return 1;
+
+ case 1:
+ return 2;
+
+ case 2:
+ return 4;
+
+ case 3:
+ return 6;
+
+ case 4:
+ return 8;
+
+ default:
+ BUG();
+ }
+}
+
+static unsigned long estar_to_divisor(unsigned long estar)
+{
+ unsigned long ret;
+
+ switch (estar & ESTAR_MODE_DIV_MASK) {
+ case ESTAR_MODE_DIV_1:
+ ret = 1;
+ break;
+ case ESTAR_MODE_DIV_2:
+ ret = 2;
+ break;
+ case ESTAR_MODE_DIV_4:
+ ret = 4;
+ break;
+ case ESTAR_MODE_DIV_6:
+ ret = 6;
+ break;
+ case ESTAR_MODE_DIV_8:
+ ret = 8;
+ break;
+ default:
+ BUG();
+ }
+
+ return ret;
+}
+
+static void __us2e_freq_get(void *arg)
+{
+ unsigned long *estar = arg;
+
+ *estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
+}
+
+static unsigned int us2e_freq_get(unsigned int cpu)
+{
+ unsigned long clock_tick, estar;
+
+ clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ if (smp_call_function_single(cpu, __us2e_freq_get, &estar, 1))
+ return 0;
+
+ return clock_tick / estar_to_divisor(estar);
+}
+
+static void __us2e_freq_target(void *arg)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned int *index = arg;
+ unsigned long new_bits, new_freq;
+ unsigned long clock_tick, divisor, old_divisor, estar;
+
+ new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ new_bits = index_to_estar_mode(*index);
+ divisor = index_to_divisor(*index);
+ new_freq /= divisor;
+
+ estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
+
+ old_divisor = estar_to_divisor(estar);
+
+ if (old_divisor != divisor) {
+ us2e_transition(estar, new_bits, clock_tick * 1000,
+ old_divisor, divisor);
+ }
+}
+
+static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ unsigned int cpu = policy->cpu;
+
+ return smp_call_function_single(cpu, __us2e_freq_target, &index, 1);
+}
+
+static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ struct cpufreq_frequency_table *table =
+ &us2e_freq_table[cpu].table[0];
+
+ table[0].driver_data = 0;
+ table[0].frequency = clock_tick / 1;
+ table[1].driver_data = 1;
+ table[1].frequency = clock_tick / 2;
+ table[2].driver_data = 2;
+ table[2].frequency = clock_tick / 4;
+ table[2].driver_data = 3;
+ table[2].frequency = clock_tick / 6;
+ table[2].driver_data = 4;
+ table[2].frequency = clock_tick / 8;
+ table[2].driver_data = 5;
+ table[3].frequency = CPUFREQ_TABLE_END;
+
+ policy->cpuinfo.transition_latency = 0;
+ policy->cur = clock_tick;
+ policy->freq_table = table;
+
+ return 0;
+}
+
+static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
+{
+ if (cpufreq_us2e_driver)
+ us2e_freq_target(policy, 0);
+
+ return 0;
+}
+
+static int __init us2e_freq_init(void)
+{
+ unsigned long manuf, impl, ver;
+ int ret;
+
+ if (tlb_type != spitfire)
+ return -ENODEV;
+
+ __asm__("rdpr %%ver, %0" : "=r" (ver));
+ manuf = ((ver >> 48) & 0xffff);
+ impl = ((ver >> 32) & 0xffff);
+
+ if (manuf == 0x17 && impl == 0x13) {
+ struct cpufreq_driver *driver;
+
+ ret = -ENOMEM;
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver)
+ goto err_out;
+
+ us2e_freq_table = kzalloc((NR_CPUS * sizeof(*us2e_freq_table)),
+ GFP_KERNEL);
+ if (!us2e_freq_table)
+ goto err_out;
+
+ driver->init = us2e_freq_cpu_init;
+ driver->verify = cpufreq_generic_frequency_table_verify;
+ driver->target_index = us2e_freq_target;
+ driver->get = us2e_freq_get;
+ driver->exit = us2e_freq_cpu_exit;
+ strcpy(driver->name, "UltraSPARC-IIe");
+
+ cpufreq_us2e_driver = driver;
+ ret = cpufreq_register_driver(driver);
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ if (driver) {
+ kfree(driver);
+ cpufreq_us2e_driver = NULL;
+ }
+ kfree(us2e_freq_table);
+ us2e_freq_table = NULL;
+ return ret;
+ }
+
+ return -ENODEV;
+}
+
+static void __exit us2e_freq_exit(void)
+{
+ if (cpufreq_us2e_driver) {
+ cpufreq_unregister_driver(cpufreq_us2e_driver);
+ kfree(cpufreq_us2e_driver);
+ cpufreq_us2e_driver = NULL;
+ kfree(us2e_freq_table);
+ us2e_freq_table = NULL;
+ }
+}
+
+MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
+MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
+MODULE_LICENSE("GPL");
+
+module_init(us2e_freq_init);
+module_exit(us2e_freq_exit);
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
new file mode 100644
index 000000000..e41b35b16
--- /dev/null
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* us3_cpufreq.c: UltraSPARC-III cpu frequency support
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * Many thanks to Dominik Brodowski for fixing up the cpufreq
+ * infrastructure in order to make this driver easier to implement.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/cpufreq.h>
+#include <linux/threads.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <asm/head.h>
+#include <asm/timer.h>
+
+static struct cpufreq_driver *cpufreq_us3_driver;
+
+struct us3_freq_percpu_info {
+ struct cpufreq_frequency_table table[4];
+};
+
+/* Indexed by cpu number. */
+static struct us3_freq_percpu_info *us3_freq_table;
+
+/* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
+ * in the Safari config register.
+ */
+#define SAFARI_CFG_DIV_1 0x0000000000000000UL
+#define SAFARI_CFG_DIV_2 0x0000000040000000UL
+#define SAFARI_CFG_DIV_32 0x0000000080000000UL
+#define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL
+
+static void read_safari_cfg(void *arg)
+{
+ unsigned long ret, *val = arg;
+
+ __asm__ __volatile__("ldxa [%%g0] %1, %0"
+ : "=&r" (ret)
+ : "i" (ASI_SAFARI_CONFIG));
+ *val = ret;
+}
+
+static void update_safari_cfg(void *arg)
+{
+ unsigned long reg, *new_bits = arg;
+
+ read_safari_cfg(&reg);
+ reg &= ~SAFARI_CFG_DIV_MASK;
+ reg |= *new_bits;
+
+ __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (reg), "i" (ASI_SAFARI_CONFIG)
+ : "memory");
+}
+
+static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
+{
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ unsigned long ret;
+
+ switch (safari_cfg & SAFARI_CFG_DIV_MASK) {
+ case SAFARI_CFG_DIV_1:
+ ret = clock_tick / 1;
+ break;
+ case SAFARI_CFG_DIV_2:
+ ret = clock_tick / 2;
+ break;
+ case SAFARI_CFG_DIV_32:
+ ret = clock_tick / 32;
+ break;
+ default:
+ BUG();
+ }
+
+ return ret;
+}
+
+static unsigned int us3_freq_get(unsigned int cpu)
+{
+ unsigned long reg;
+
+ if (smp_call_function_single(cpu, read_safari_cfg, &reg, 1))
+ return 0;
+ return get_current_freq(cpu, reg);
+}
+
+static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned long new_bits, new_freq;
+
+ new_freq = sparc64_get_clock_tick(cpu) / 1000;
+ switch (index) {
+ case 0:
+ new_bits = SAFARI_CFG_DIV_1;
+ new_freq /= 1;
+ break;
+ case 1:
+ new_bits = SAFARI_CFG_DIV_2;
+ new_freq /= 2;
+ break;
+ case 2:
+ new_bits = SAFARI_CFG_DIV_32;
+ new_freq /= 32;
+ break;
+
+ default:
+ BUG();
+ }
+
+ return smp_call_function_single(cpu, update_safari_cfg, &new_bits, 1);
+}
+
+static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ struct cpufreq_frequency_table *table =
+ &us3_freq_table[cpu].table[0];
+
+ table[0].driver_data = 0;
+ table[0].frequency = clock_tick / 1;
+ table[1].driver_data = 1;
+ table[1].frequency = clock_tick / 2;
+ table[2].driver_data = 2;
+ table[2].frequency = clock_tick / 32;
+ table[3].driver_data = 0;
+ table[3].frequency = CPUFREQ_TABLE_END;
+
+ policy->cpuinfo.transition_latency = 0;
+ policy->cur = clock_tick;
+ policy->freq_table = table;
+
+ return 0;
+}
+
+static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
+{
+ if (cpufreq_us3_driver)
+ us3_freq_target(policy, 0);
+
+ return 0;
+}
+
+static int __init us3_freq_init(void)
+{
+ unsigned long manuf, impl, ver;
+ int ret;
+
+ if (tlb_type != cheetah && tlb_type != cheetah_plus)
+ return -ENODEV;
+
+ __asm__("rdpr %%ver, %0" : "=r" (ver));
+ manuf = ((ver >> 48) & 0xffff);
+ impl = ((ver >> 32) & 0xffff);
+
+ if (manuf == CHEETAH_MANUF &&
+ (impl == CHEETAH_IMPL ||
+ impl == CHEETAH_PLUS_IMPL ||
+ impl == JAGUAR_IMPL ||
+ impl == PANTHER_IMPL)) {
+ struct cpufreq_driver *driver;
+
+ ret = -ENOMEM;
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver)
+ goto err_out;
+
+ us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
+ GFP_KERNEL);
+ if (!us3_freq_table)
+ goto err_out;
+
+ driver->init = us3_freq_cpu_init;
+ driver->verify = cpufreq_generic_frequency_table_verify;
+ driver->target_index = us3_freq_target;
+ driver->get = us3_freq_get;
+ driver->exit = us3_freq_cpu_exit;
+ strcpy(driver->name, "UltraSPARC-III");
+
+ cpufreq_us3_driver = driver;
+ ret = cpufreq_register_driver(driver);
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ if (driver) {
+ kfree(driver);
+ cpufreq_us3_driver = NULL;
+ }
+ kfree(us3_freq_table);
+ us3_freq_table = NULL;
+ return ret;
+ }
+
+ return -ENODEV;
+}
+
+static void __exit us3_freq_exit(void)
+{
+ if (cpufreq_us3_driver) {
+ cpufreq_unregister_driver(cpufreq_us3_driver);
+ kfree(cpufreq_us3_driver);
+ cpufreq_us3_driver = NULL;
+ kfree(us3_freq_table);
+ us3_freq_table = NULL;
+ }
+}
+
+MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
+MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III");
+MODULE_LICENSE("GPL");
+
+module_init(us3_freq_init);
+module_exit(us3_freq_exit);
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
new file mode 100644
index 000000000..7d0d62a06
--- /dev/null
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -0,0 +1,247 @@
+/*
+ * drivers/cpufreq/spear-cpufreq.c
+ *
+ * CPU Frequency Scaling for SPEAr platform
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Deepak Sikri <deepak.sikri@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+/* SPEAr CPUFreq driver data structure */
+static struct {
+ struct clk *clk;
+ unsigned int transition_latency;
+ struct cpufreq_frequency_table *freq_tbl;
+ u32 cnt;
+} spear_cpufreq;
+
+static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq)
+{
+ struct clk *sys_pclk;
+ int pclk;
+ /*
+ * In SPEAr1340, cpu clk's parent sys clk can take input from
+ * following sources
+ */
+ const char *sys_clk_src[] = {
+ "sys_syn_clk",
+ "pll1_clk",
+ "pll2_clk",
+ "pll3_clk",
+ };
+
+ /*
+ * As sys clk can have multiple source with their own range
+ * limitation so we choose possible sources accordingly
+ */
+ if (newfreq <= 300000000)
+ pclk = 0; /* src is sys_syn_clk */
+ else if (newfreq > 300000000 && newfreq <= 500000000)
+ pclk = 3; /* src is pll3_clk */
+ else if (newfreq == 600000000)
+ pclk = 1; /* src is pll1_clk */
+ else
+ return ERR_PTR(-EINVAL);
+
+ /* Get parent to sys clock */
+ sys_pclk = clk_get(NULL, sys_clk_src[pclk]);
+ if (IS_ERR(sys_pclk))
+ pr_err("Failed to get %s clock\n", sys_clk_src[pclk]);
+
+ return sys_pclk;
+}
+
+/*
+ * In SPEAr1340, we cannot use newfreq directly because we need to actually
+ * access a source clock (clk) which might not be ancestor of cpu at present.
+ * Hence in SPEAr1340 we would operate on source clock directly before switching
+ * cpu clock to it.
+ */
+static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq)
+{
+ struct clk *sys_clk;
+ int ret = 0;
+
+ sys_clk = clk_get_parent(spear_cpufreq.clk);
+ if (IS_ERR(sys_clk)) {
+ pr_err("failed to get cpu's parent (sys) clock\n");
+ return PTR_ERR(sys_clk);
+ }
+
+ /* Set the rate of the source clock before changing the parent */
+ ret = clk_set_rate(sys_pclk, newfreq);
+ if (ret) {
+ pr_err("Failed to set sys clk rate to %lu\n", newfreq);
+ return ret;
+ }
+
+ ret = clk_set_parent(sys_clk, sys_pclk);
+ if (ret) {
+ pr_err("Failed to set sys clk parent\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int spear_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ long newfreq;
+ struct clk *srcclk;
+ int ret, mult = 1;
+
+ newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000;
+
+ if (of_machine_is_compatible("st,spear1340")) {
+ /*
+ * SPEAr1340 is special in the sense that due to the possibility
+ * of multiple clock sources for cpu clk's parent we can have
+ * different clock source for different frequency of cpu clk.
+ * Hence we need to choose one from amongst these possible clock
+ * sources.
+ */
+ srcclk = spear1340_cpu_get_possible_parent(newfreq);
+ if (IS_ERR(srcclk)) {
+ pr_err("Failed to get src clk\n");
+ return PTR_ERR(srcclk);
+ }
+
+ /* SPEAr1340: src clk is always 2 * intended cpu clk */
+ mult = 2;
+ } else {
+ /*
+ * src clock to be altered is ancestor of cpu clock. Hence we
+ * can directly work on cpu clk
+ */
+ srcclk = spear_cpufreq.clk;
+ }
+
+ newfreq = clk_round_rate(srcclk, newfreq * mult);
+ if (newfreq <= 0) {
+ pr_err("clk_round_rate failed for cpu src clock\n");
+ return newfreq;
+ }
+
+ if (mult == 2)
+ ret = spear1340_set_cpu_rate(srcclk, newfreq);
+ else
+ ret = clk_set_rate(spear_cpufreq.clk, newfreq);
+
+ if (ret)
+ pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret);
+
+ return ret;
+}
+
+static int spear_cpufreq_init(struct cpufreq_policy *policy)
+{
+ policy->clk = spear_cpufreq.clk;
+ cpufreq_generic_init(policy, spear_cpufreq.freq_tbl,
+ spear_cpufreq.transition_latency);
+ return 0;
+}
+
+static struct cpufreq_driver spear_cpufreq_driver = {
+ .name = "cpufreq-spear",
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = spear_cpufreq_target,
+ .get = cpufreq_generic_get,
+ .init = spear_cpufreq_init,
+ .attr = cpufreq_generic_attr,
+};
+
+static int spear_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ const struct property *prop;
+ struct cpufreq_frequency_table *freq_tbl;
+ const __be32 *val;
+ int cnt, i, ret;
+
+ np = of_cpu_device_node_get(0);
+ if (!np) {
+ pr_err("No cpu node found\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(np, "clock-latency",
+ &spear_cpufreq.transition_latency))
+ spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;
+
+ prop = of_find_property(np, "cpufreq_tbl", NULL);
+ if (!prop || !prop->value) {
+ pr_err("Invalid cpufreq_tbl\n");
+ ret = -ENODEV;
+ goto out_put_node;
+ }
+
+ cnt = prop->length / sizeof(u32);
+ val = prop->value;
+
+ freq_tbl = kcalloc(cnt + 1, sizeof(*freq_tbl), GFP_KERNEL);
+ if (!freq_tbl) {
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
+
+ for (i = 0; i < cnt; i++)
+ freq_tbl[i].frequency = be32_to_cpup(val++);
+
+ freq_tbl[i].frequency = CPUFREQ_TABLE_END;
+
+ spear_cpufreq.freq_tbl = freq_tbl;
+
+ of_node_put(np);
+
+ spear_cpufreq.clk = clk_get(NULL, "cpu_clk");
+ if (IS_ERR(spear_cpufreq.clk)) {
+ pr_err("Unable to get CPU clock\n");
+ ret = PTR_ERR(spear_cpufreq.clk);
+ goto out_put_mem;
+ }
+
+ ret = cpufreq_register_driver(&spear_cpufreq_driver);
+ if (!ret)
+ return 0;
+
+ pr_err("failed register driver: %d\n", ret);
+ clk_put(spear_cpufreq.clk);
+
+out_put_mem:
+ kfree(freq_tbl);
+ return ret;
+
+out_put_node:
+ of_node_put(np);
+ return ret;
+}
+
+static struct platform_driver spear_cpufreq_platdrv = {
+ .driver = {
+ .name = "spear-cpufreq",
+ },
+ .probe = spear_cpufreq_probe,
+};
+module_platform_driver(spear_cpufreq_platdrv);
+
+MODULE_AUTHOR("Deepak Sikri <deepak.sikri@st.com>");
+MODULE_DESCRIPTION("SPEAr CPUFreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
new file mode 100644
index 000000000..75b10ecdb
--- /dev/null
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium
+ * M (part of the Centrino chipset).
+ *
+ * Since the original Pentium M, most new Intel CPUs support Enhanced
+ * SpeedStep.
+ *
+ * Despite the "SpeedStep" in the name, this is almost entirely unlike
+ * traditional SpeedStep.
+ *
+ * Modelled on speedstep.c
+ *
+ * Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/sched.h> /* current */
+#include <linux/delay.h>
+#include <linux/compiler.h>
+#include <linux/gfp.h>
+
+#include <asm/msr.h>
+#include <asm/processor.h>
+#include <asm/cpufeature.h>
+#include <asm/cpu_device_id.h>
+
+#define MAINTAINER "linux-pm@vger.kernel.org"
+
+#define INTEL_MSR_RANGE (0xffff)
+
+struct cpu_id
+{
+ __u8 x86; /* CPU family */
+ __u8 x86_model; /* model */
+ __u8 x86_stepping; /* stepping */
+};
+
+enum {
+ CPU_BANIAS,
+ CPU_DOTHAN_A1,
+ CPU_DOTHAN_A2,
+ CPU_DOTHAN_B0,
+ CPU_MP4HT_D0,
+ CPU_MP4HT_E0,
+};
+
+static const struct cpu_id cpu_ids[] = {
+ [CPU_BANIAS] = { 6, 9, 5 },
+ [CPU_DOTHAN_A1] = { 6, 13, 1 },
+ [CPU_DOTHAN_A2] = { 6, 13, 2 },
+ [CPU_DOTHAN_B0] = { 6, 13, 6 },
+ [CPU_MP4HT_D0] = {15, 3, 4 },
+ [CPU_MP4HT_E0] = {15, 4, 1 },
+};
+#define N_IDS ARRAY_SIZE(cpu_ids)
+
+struct cpu_model
+{
+ const struct cpu_id *cpu_id;
+ const char *model_name;
+ unsigned max_freq; /* max clock in kHz */
+
+ struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */
+};
+static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
+ const struct cpu_id *x);
+
+/* Operating points for current CPU */
+static DEFINE_PER_CPU(struct cpu_model *, centrino_model);
+static DEFINE_PER_CPU(const struct cpu_id *, centrino_cpu);
+
+static struct cpufreq_driver centrino_driver;
+
+#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE
+
+/* Computes the correct form for IA32_PERF_CTL MSR for a particular
+ frequency/voltage operating point; frequency in MHz, volts in mV.
+ This is stored as "driver_data" in the structure. */
+#define OP(mhz, mv) \
+ { \
+ .frequency = (mhz) * 1000, \
+ .driver_data = (((mhz)/100) << 8) | ((mv - 700) / 16) \
+ }
+
+/*
+ * These voltage tables were derived from the Intel Pentium M
+ * datasheet, document 25261202.pdf, Table 5. I have verified they
+ * are consistent with my IBM ThinkPad X31, which has a 1.3GHz Pentium
+ * M.
+ */
+
+/* Ultra Low Voltage Intel Pentium M processor 900MHz (Banias) */
+static struct cpufreq_frequency_table banias_900[] =
+{
+ OP(600, 844),
+ OP(800, 988),
+ OP(900, 1004),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+
+/* Ultra Low Voltage Intel Pentium M processor 1000MHz (Banias) */
+static struct cpufreq_frequency_table banias_1000[] =
+{
+ OP(600, 844),
+ OP(800, 972),
+ OP(900, 988),
+ OP(1000, 1004),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+
+/* Low Voltage Intel Pentium M processor 1.10GHz (Banias) */
+static struct cpufreq_frequency_table banias_1100[] =
+{
+ OP( 600, 956),
+ OP( 800, 1020),
+ OP( 900, 1100),
+ OP(1000, 1164),
+ OP(1100, 1180),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+
+
+/* Low Voltage Intel Pentium M processor 1.20GHz (Banias) */
+static struct cpufreq_frequency_table banias_1200[] =
+{
+ OP( 600, 956),
+ OP( 800, 1004),
+ OP( 900, 1020),
+ OP(1000, 1100),
+ OP(1100, 1164),
+ OP(1200, 1180),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+
+/* Intel Pentium M processor 1.30GHz (Banias) */
+static struct cpufreq_frequency_table banias_1300[] =
+{
+ OP( 600, 956),
+ OP( 800, 1260),
+ OP(1000, 1292),
+ OP(1200, 1356),
+ OP(1300, 1388),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+
+/* Intel Pentium M processor 1.40GHz (Banias) */
+static struct cpufreq_frequency_table banias_1400[] =
+{
+ OP( 600, 956),
+ OP( 800, 1180),
+ OP(1000, 1308),
+ OP(1200, 1436),
+ OP(1400, 1484),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+
+/* Intel Pentium M processor 1.50GHz (Banias) */
+static struct cpufreq_frequency_table banias_1500[] =
+{
+ OP( 600, 956),
+ OP( 800, 1116),
+ OP(1000, 1228),
+ OP(1200, 1356),
+ OP(1400, 1452),
+ OP(1500, 1484),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+
+/* Intel Pentium M processor 1.60GHz (Banias) */
+static struct cpufreq_frequency_table banias_1600[] =
+{
+ OP( 600, 956),
+ OP( 800, 1036),
+ OP(1000, 1164),
+ OP(1200, 1276),
+ OP(1400, 1420),
+ OP(1600, 1484),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+
+/* Intel Pentium M processor 1.70GHz (Banias) */
+static struct cpufreq_frequency_table banias_1700[] =
+{
+ OP( 600, 956),
+ OP( 800, 1004),
+ OP(1000, 1116),
+ OP(1200, 1228),
+ OP(1400, 1308),
+ OP(1700, 1484),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+#undef OP
+
+#define _BANIAS(cpuid, max, name) \
+{ .cpu_id = cpuid, \
+ .model_name = "Intel(R) Pentium(R) M processor " name "MHz", \
+ .max_freq = (max)*1000, \
+ .op_points = banias_##max, \
+}
+#define BANIAS(max) _BANIAS(&cpu_ids[CPU_BANIAS], max, #max)
+
+/* CPU models, their operating frequency range, and freq/voltage
+ operating points */
+static struct cpu_model models[] =
+{
+ _BANIAS(&cpu_ids[CPU_BANIAS], 900, " 900"),
+ BANIAS(1000),
+ BANIAS(1100),
+ BANIAS(1200),
+ BANIAS(1300),
+ BANIAS(1400),
+ BANIAS(1500),
+ BANIAS(1600),
+ BANIAS(1700),
+
+ /* NULL model_name is a wildcard */
+ { &cpu_ids[CPU_DOTHAN_A1], NULL, 0, NULL },
+ { &cpu_ids[CPU_DOTHAN_A2], NULL, 0, NULL },
+ { &cpu_ids[CPU_DOTHAN_B0], NULL, 0, NULL },
+ { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL },
+ { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL },
+
+ { NULL, }
+};
+#undef _BANIAS
+#undef BANIAS
+
+static int centrino_cpu_init_table(struct cpufreq_policy *policy)
+{
+ struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
+ struct cpu_model *model;
+
+ for(model = models; model->cpu_id != NULL; model++)
+ if (centrino_verify_cpu_id(cpu, model->cpu_id) &&
+ (model->model_name == NULL ||
+ strcmp(cpu->x86_model_id, model->model_name) == 0))
+ break;
+
+ if (model->cpu_id == NULL) {
+ /* No match at all */
+ pr_debug("no support for CPU model \"%s\": "
+ "send /proc/cpuinfo to " MAINTAINER "\n",
+ cpu->x86_model_id);
+ return -ENOENT;
+ }
+
+ if (model->op_points == NULL) {
+ /* Matched a non-match */
+ pr_debug("no table support for CPU model \"%s\"\n",
+ cpu->x86_model_id);
+ pr_debug("try using the acpi-cpufreq driver\n");
+ return -ENOENT;
+ }
+
+ per_cpu(centrino_model, policy->cpu) = model;
+
+ pr_debug("found \"%s\": max frequency: %dkHz\n",
+ model->model_name, model->max_freq);
+
+ return 0;
+}
+
+#else
+static inline int centrino_cpu_init_table(struct cpufreq_policy *policy)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */
+
+static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
+ const struct cpu_id *x)
+{
+ if ((c->x86 == x->x86) &&
+ (c->x86_model == x->x86_model) &&
+ (c->x86_stepping == x->x86_stepping))
+ return 1;
+ return 0;
+}
+
+/* To be called only after centrino_model is initialized */
+static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
+{
+ int i;
+
+ /*
+ * Extract clock in kHz from PERF_CTL value
+ * for centrino, as some DSDTs are buggy.
+ * Ideally, this can be done using the acpi_data structure.
+ */
+ if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) ||
+ (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) ||
+ (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) {
+ msr = (msr >> 8) & 0xff;
+ return msr * 100000;
+ }
+
+ if ((!per_cpu(centrino_model, cpu)) ||
+ (!per_cpu(centrino_model, cpu)->op_points))
+ return 0;
+
+ msr &= 0xffff;
+ for (i = 0;
+ per_cpu(centrino_model, cpu)->op_points[i].frequency
+ != CPUFREQ_TABLE_END;
+ i++) {
+ if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data)
+ return per_cpu(centrino_model, cpu)->
+ op_points[i].frequency;
+ }
+ if (failsafe)
+ return per_cpu(centrino_model, cpu)->op_points[i-1].frequency;
+ else
+ return 0;
+}
+
+/* Return the current CPU frequency in kHz */
+static unsigned int get_cur_freq(unsigned int cpu)
+{
+ unsigned l, h;
+ unsigned clock_freq;
+
+ rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h);
+ clock_freq = extract_clock(l, cpu, 0);
+
+ if (unlikely(clock_freq == 0)) {
+ /*
+ * On some CPUs, we can see transient MSR values (which are
+ * not present in _PSS), while CPU is doing some automatic
+ * P-state transition (like TM2). Get the last freq set
+ * in PERF_CTL.
+ */
+ rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h);
+ clock_freq = extract_clock(l, cpu, 1);
+ }
+ return clock_freq;
+}
+
+
+static int centrino_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
+ unsigned l, h;
+ int i;
+
+ /* Only Intel makes Enhanced Speedstep-capable CPUs */
+ if (cpu->x86_vendor != X86_VENDOR_INTEL ||
+ !cpu_has(cpu, X86_FEATURE_EST))
+ return -ENODEV;
+
+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
+ centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
+
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+ for (i = 0; i < N_IDS; i++)
+ if (centrino_verify_cpu_id(cpu, &cpu_ids[i]))
+ break;
+
+ if (i != N_IDS)
+ per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i];
+
+ if (!per_cpu(centrino_cpu, policy->cpu)) {
+ pr_debug("found unsupported CPU with "
+ "Enhanced SpeedStep: send /proc/cpuinfo to "
+ MAINTAINER "\n");
+ return -ENODEV;
+ }
+
+ if (centrino_cpu_init_table(policy))
+ return -ENODEV;
+
+ /* Check to see if Enhanced SpeedStep is enabled, and try to
+ enable it if not. */
+ rdmsr(MSR_IA32_MISC_ENABLE, l, h);
+
+ if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
+ l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
+ pr_debug("trying to enable Enhanced SpeedStep (%x)\n", l);
+ wrmsr(MSR_IA32_MISC_ENABLE, l, h);
+
+ /* check to see if it stuck */
+ rdmsr(MSR_IA32_MISC_ENABLE, l, h);
+ if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
+ pr_info("couldn't enable Enhanced SpeedStep\n");
+ return -ENODEV;
+ }
+ }
+
+ policy->cpuinfo.transition_latency = 10000;
+ /* 10uS transition latency */
+ policy->freq_table = per_cpu(centrino_model, policy->cpu)->op_points;
+
+ return 0;
+}
+
+static int centrino_cpu_exit(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+
+ if (!per_cpu(centrino_model, cpu))
+ return -ENODEV;
+
+ per_cpu(centrino_model, cpu) = NULL;
+
+ return 0;
+}
+
+/**
+ * centrino_target - set a new CPUFreq policy
+ * @policy: new policy
+ * @index: index of target frequency
+ *
+ * Sets a new CPUFreq policy.
+ */
+static int centrino_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
+ int retval = 0;
+ unsigned int j, first_cpu;
+ struct cpufreq_frequency_table *op_points;
+ cpumask_var_t covered_cpus;
+
+ if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ first_cpu = 1;
+ op_points = &per_cpu(centrino_model, cpu)->op_points[index];
+ for_each_cpu(j, policy->cpus) {
+ int good_cpu;
+
+ /*
+ * Support for SMP systems.
+ * Make sure we are running on CPU that wants to change freq
+ */
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ good_cpu = cpumask_any_and(policy->cpus,
+ cpu_online_mask);
+ else
+ good_cpu = j;
+
+ if (good_cpu >= nr_cpu_ids) {
+ pr_debug("couldn't limit to CPUs in this domain\n");
+ retval = -EAGAIN;
+ if (first_cpu) {
+ /* We haven't started the transition yet. */
+ goto out;
+ }
+ break;
+ }
+
+ msr = op_points->driver_data;
+
+ if (first_cpu) {
+ rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h);
+ if (msr == (oldmsr & 0xffff)) {
+ pr_debug("no change needed - msr was and needs "
+ "to be %x\n", oldmsr);
+ retval = 0;
+ goto out;
+ }
+
+ first_cpu = 0;
+ /* all but 16 LSB are reserved, treat them with care */
+ oldmsr &= ~0xffff;
+ msr &= 0xffff;
+ oldmsr |= msr;
+ }
+
+ wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr, h);
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ break;
+
+ cpumask_set_cpu(j, covered_cpus);
+ }
+
+ if (unlikely(retval)) {
+ /*
+ * We have failed halfway through the frequency change.
+ * We have sent callbacks to policy->cpus and
+ * MSRs have already been written on coverd_cpus.
+ * Best effort undo..
+ */
+
+ for_each_cpu(j, covered_cpus)
+ wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h);
+ }
+ retval = 0;
+
+out:
+ free_cpumask_var(covered_cpus);
+ return retval;
+}
+
+static struct cpufreq_driver centrino_driver = {
+ .name = "centrino", /* should be speedstep-centrino,
+ but there's a 16 char limit */
+ .init = centrino_cpu_init,
+ .exit = centrino_cpu_exit,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = centrino_target,
+ .get = get_cur_freq,
+ .attr = cpufreq_generic_attr,
+};
+
+/*
+ * This doesn't replace the detailed checks above because
+ * the generic CPU IDs don't have a way to match for steppings
+ * or ASCII model IDs.
+ */
+static const struct x86_cpu_id centrino_ids[] = {
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 9, X86_FEATURE_EST, NULL),
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 13, X86_FEATURE_EST, NULL),
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 3, X86_FEATURE_EST, NULL),
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 4, X86_FEATURE_EST, NULL),
+ {}
+};
+
+/**
+ * centrino_init - initializes the Enhanced SpeedStep CPUFreq driver
+ *
+ * Initializes the Enhanced SpeedStep support. Returns -ENODEV on
+ * unsupported devices, -ENOENT if there's no voltage table for this
+ * particular CPU model, -EINVAL on problems during initiatization,
+ * and zero on success.
+ *
+ * This is quite picky. Not only does the CPU have to advertise the
+ * "est" flag in the cpuid capability flags, we look for a specific
+ * CPU model and stepping, and we need to have the exact model name in
+ * our voltage tables. That is, be paranoid about not releasing
+ * someone's valuable magic smoke.
+ */
+static int __init centrino_init(void)
+{
+ if (!x86_match_cpu(centrino_ids))
+ return -ENODEV;
+ return cpufreq_register_driver(&centrino_driver);
+}
+
+static void __exit centrino_exit(void)
+{
+ cpufreq_unregister_driver(&centrino_driver);
+}
+
+MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");
+MODULE_DESCRIPTION ("Enhanced SpeedStep driver for Intel Pentium M processors.");
+MODULE_LICENSE ("GPL");
+
+late_initcall(centrino_init);
+module_exit(centrino_exit);
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
new file mode 100644
index 000000000..f2076d72b
--- /dev/null
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (C) 2001 Dave Jones, Arjan van de ven.
+ * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ *
+ * Based upon reverse engineered information, and on Intel documentation
+ * for chipsets ICH2-M and ICH3-M.
+ *
+ * Many thanks to Ducrot Bruno for finding and fixing the last
+ * "missing link" for ICH2-M/ICH3-M support, and to Thomas Winkler
+ * for extensive testing.
+ *
+ * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
+ */
+
+
+/*********************************************************************
+ * SPEEDSTEP - DEFINITIONS *
+ *********************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+
+#include <asm/cpu_device_id.h>
+
+#include "speedstep-lib.h"
+
+
+/* speedstep_chipset:
+ * It is necessary to know which chipset is used. As accesses to
+ * this device occur at various places in this module, we need a
+ * static struct pci_dev * pointing to that device.
+ */
+static struct pci_dev *speedstep_chipset_dev;
+
+
+/* speedstep_processor
+ */
+static enum speedstep_processor speedstep_processor;
+
+static u32 pmbase;
+
+/*
+ * There are only two frequency states for each processor. Values
+ * are in kHz for the time being.
+ */
+static struct cpufreq_frequency_table speedstep_freqs[] = {
+ {0, SPEEDSTEP_HIGH, 0},
+ {0, SPEEDSTEP_LOW, 0},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+
+/**
+ * speedstep_find_register - read the PMBASE address
+ *
+ * Returns: -ENODEV if no register could be found
+ */
+static int speedstep_find_register(void)
+{
+ if (!speedstep_chipset_dev)
+ return -ENODEV;
+
+ /* get PMBASE */
+ pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase);
+ if (!(pmbase & 0x01)) {
+ pr_err("could not find speedstep register\n");
+ return -ENODEV;
+ }
+
+ pmbase &= 0xFFFFFFFE;
+ if (!pmbase) {
+ pr_err("could not find speedstep register\n");
+ return -ENODEV;
+ }
+
+ pr_debug("pmbase is 0x%x\n", pmbase);
+ return 0;
+}
+
+/**
+ * speedstep_set_state - set the SpeedStep state
+ * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
+ *
+ * Tries to change the SpeedStep state. Can be called from
+ * smp_call_function_single.
+ */
+static void speedstep_set_state(unsigned int state)
+{
+ u8 pm2_blk;
+ u8 value;
+ unsigned long flags;
+
+ if (state > 0x1)
+ return;
+
+ /* Disable IRQs */
+ local_irq_save(flags);
+
+ /* read state */
+ value = inb(pmbase + 0x50);
+
+ pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value);
+
+ /* write new state */
+ value &= 0xFE;
+ value |= state;
+
+ pr_debug("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase);
+
+ /* Disable bus master arbitration */
+ pm2_blk = inb(pmbase + 0x20);
+ pm2_blk |= 0x01;
+ outb(pm2_blk, (pmbase + 0x20));
+
+ /* Actual transition */
+ outb(value, (pmbase + 0x50));
+
+ /* Restore bus master arbitration */
+ pm2_blk &= 0xfe;
+ outb(pm2_blk, (pmbase + 0x20));
+
+ /* check if transition was successful */
+ value = inb(pmbase + 0x50);
+
+ /* Enable IRQs */
+ local_irq_restore(flags);
+
+ pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value);
+
+ if (state == (value & 0x1))
+ pr_debug("change to %u MHz succeeded\n",
+ speedstep_get_frequency(speedstep_processor) / 1000);
+ else
+ pr_err("change failed - I/O error\n");
+
+ return;
+}
+
+/* Wrapper for smp_call_function_single. */
+static void _speedstep_set_state(void *_state)
+{
+ speedstep_set_state(*(unsigned int *)_state);
+}
+
+/**
+ * speedstep_activate - activate SpeedStep control in the chipset
+ *
+ * Tries to activate the SpeedStep status and control registers.
+ * Returns -EINVAL on an unsupported chipset, and zero on success.
+ */
+static int speedstep_activate(void)
+{
+ u16 value = 0;
+
+ if (!speedstep_chipset_dev)
+ return -EINVAL;
+
+ pci_read_config_word(speedstep_chipset_dev, 0x00A0, &value);
+ if (!(value & 0x08)) {
+ value |= 0x08;
+ pr_debug("activating SpeedStep (TM) registers\n");
+ pci_write_config_word(speedstep_chipset_dev, 0x00A0, value);
+ }
+
+ return 0;
+}
+
+
+/**
+ * speedstep_detect_chipset - detect the Southbridge which contains SpeedStep logic
+ *
+ * Detects ICH2-M, ICH3-M and ICH4-M so far. The pci_dev points to
+ * the LPC bridge / PM module which contains all power-management
+ * functions. Returns the SPEEDSTEP_CHIPSET_-number for the detected
+ * chipset, or zero on failure.
+ */
+static unsigned int speedstep_detect_chipset(void)
+{
+ speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82801DB_12,
+ PCI_ANY_ID, PCI_ANY_ID,
+ NULL);
+ if (speedstep_chipset_dev)
+ return 4; /* 4-M */
+
+ speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82801CA_12,
+ PCI_ANY_ID, PCI_ANY_ID,
+ NULL);
+ if (speedstep_chipset_dev)
+ return 3; /* 3-M */
+
+
+ speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82801BA_10,
+ PCI_ANY_ID, PCI_ANY_ID,
+ NULL);
+ if (speedstep_chipset_dev) {
+ /* speedstep.c causes lockups on Dell Inspirons 8000 and
+ * 8100 which use a pretty old revision of the 82815
+ * host bridge. Abort on these systems.
+ */
+ struct pci_dev *hostbridge;
+
+ hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82815_MC,
+ PCI_ANY_ID, PCI_ANY_ID,
+ NULL);
+
+ if (!hostbridge)
+ return 2; /* 2-M */
+
+ if (hostbridge->revision < 5) {
+ pr_debug("hostbridge does not support speedstep\n");
+ speedstep_chipset_dev = NULL;
+ pci_dev_put(hostbridge);
+ return 0;
+ }
+
+ pci_dev_put(hostbridge);
+ return 2; /* 2-M */
+ }
+
+ return 0;
+}
+
+static void get_freq_data(void *_speed)
+{
+ unsigned int *speed = _speed;
+
+ *speed = speedstep_get_frequency(speedstep_processor);
+}
+
+static unsigned int speedstep_get(unsigned int cpu)
+{
+ unsigned int speed;
+
+ /* You're supposed to ensure CPU is online. */
+ BUG_ON(smp_call_function_single(cpu, get_freq_data, &speed, 1));
+
+ pr_debug("detected %u kHz as current frequency\n", speed);
+ return speed;
+}
+
+/**
+ * speedstep_target - set a new CPUFreq policy
+ * @policy: new policy
+ * @index: index of target frequency
+ *
+ * Sets a new CPUFreq policy.
+ */
+static int speedstep_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ unsigned int policy_cpu;
+
+ policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
+
+ smp_call_function_single(policy_cpu, _speedstep_set_state, &index,
+ true);
+
+ return 0;
+}
+
+
+struct get_freqs {
+ struct cpufreq_policy *policy;
+ int ret;
+};
+
+static void get_freqs_on_cpu(void *_get_freqs)
+{
+ struct get_freqs *get_freqs = _get_freqs;
+
+ get_freqs->ret =
+ speedstep_get_freqs(speedstep_processor,
+ &speedstep_freqs[SPEEDSTEP_LOW].frequency,
+ &speedstep_freqs[SPEEDSTEP_HIGH].frequency,
+ &get_freqs->policy->cpuinfo.transition_latency,
+ &speedstep_set_state);
+}
+
+static int speedstep_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int policy_cpu;
+ struct get_freqs gf;
+
+ /* only run on CPU to be set, or on its sibling */
+#ifdef CONFIG_SMP
+ cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu));
+#endif
+ policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
+
+ /* detect low and high frequency and transition latency */
+ gf.policy = policy;
+ smp_call_function_single(policy_cpu, get_freqs_on_cpu, &gf, 1);
+ if (gf.ret)
+ return gf.ret;
+
+ policy->freq_table = speedstep_freqs;
+
+ return 0;
+}
+
+
+static struct cpufreq_driver speedstep_driver = {
+ .name = "speedstep-ich",
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = speedstep_target,
+ .init = speedstep_cpu_init,
+ .get = speedstep_get,
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct x86_cpu_id ss_smi_ids[] = {
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0x8, 0),
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0xb, 0),
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 15, 0x2, 0),
+ {}
+};
+
+/**
+ * speedstep_init - initializes the SpeedStep CPUFreq driver
+ *
+ * Initializes the SpeedStep support. Returns -ENODEV on unsupported
+ * devices, -EINVAL on problems during initiatization, and zero on
+ * success.
+ */
+static int __init speedstep_init(void)
+{
+ if (!x86_match_cpu(ss_smi_ids))
+ return -ENODEV;
+
+ /* detect processor */
+ speedstep_processor = speedstep_detect_processor();
+ if (!speedstep_processor) {
+ pr_debug("Intel(R) SpeedStep(TM) capable processor "
+ "not found\n");
+ return -ENODEV;
+ }
+
+ /* detect chipset */
+ if (!speedstep_detect_chipset()) {
+ pr_debug("Intel(R) SpeedStep(TM) for this chipset not "
+ "(yet) available.\n");
+ return -ENODEV;
+ }
+
+ /* activate speedstep support */
+ if (speedstep_activate()) {
+ pci_dev_put(speedstep_chipset_dev);
+ return -EINVAL;
+ }
+
+ if (speedstep_find_register())
+ return -ENODEV;
+
+ return cpufreq_register_driver(&speedstep_driver);
+}
+
+
+/**
+ * speedstep_exit - unregisters SpeedStep support
+ *
+ * Unregisters SpeedStep support.
+ */
+static void __exit speedstep_exit(void)
+{
+ pci_dev_put(speedstep_chipset_dev);
+ cpufreq_unregister_driver(&speedstep_driver);
+}
+
+
+MODULE_AUTHOR("Dave Jones, Dominik Brodowski <linux@brodo.de>");
+MODULE_DESCRIPTION("Speedstep driver for Intel mobile processors on chipsets "
+ "with ICH-M southbridges.");
+MODULE_LICENSE("GPL");
+
+module_init(speedstep_init);
+module_exit(speedstep_exit);
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
new file mode 100644
index 000000000..0b66df4ed
--- /dev/null
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ *
+ * Library for common functions for Intel SpeedStep v.1 and v.2 support
+ *
+ * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+
+#include <asm/msr.h>
+#include <asm/tsc.h>
+#include "speedstep-lib.h"
+
+#define PFX "speedstep-lib: "
+
+#ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK
+static int relaxed_check;
+#else
+#define relaxed_check 0
+#endif
+
+/*********************************************************************
+ * GET PROCESSOR CORE SPEED IN KHZ *
+ *********************************************************************/
+
+static unsigned int pentium3_get_frequency(enum speedstep_processor processor)
+{
+ /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */
+ static const struct {
+ unsigned int ratio; /* Frequency Multiplier (x10) */
+ u8 bitmap; /* power on configuration bits
+ [27, 25:22] (in MSR 0x2a) */
+ } msr_decode_mult[] = {
+ { 30, 0x01 },
+ { 35, 0x05 },
+ { 40, 0x02 },
+ { 45, 0x06 },
+ { 50, 0x00 },
+ { 55, 0x04 },
+ { 60, 0x0b },
+ { 65, 0x0f },
+ { 70, 0x09 },
+ { 75, 0x0d },
+ { 80, 0x0a },
+ { 85, 0x26 },
+ { 90, 0x20 },
+ { 100, 0x2b },
+ { 0, 0xff } /* error or unknown value */
+ };
+
+ /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */
+ static const struct {
+ unsigned int value; /* Front Side Bus speed in MHz */
+ u8 bitmap; /* power on configuration bits [18: 19]
+ (in MSR 0x2a) */
+ } msr_decode_fsb[] = {
+ { 66, 0x0 },
+ { 100, 0x2 },
+ { 133, 0x1 },
+ { 0, 0xff}
+ };
+
+ u32 msr_lo, msr_tmp;
+ int i = 0, j = 0;
+
+ /* read MSR 0x2a - we only need the low 32 bits */
+ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
+ pr_debug("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
+ msr_tmp = msr_lo;
+
+ /* decode the FSB */
+ msr_tmp &= 0x00c0000;
+ msr_tmp >>= 18;
+ while (msr_tmp != msr_decode_fsb[i].bitmap) {
+ if (msr_decode_fsb[i].bitmap == 0xff)
+ return 0;
+ i++;
+ }
+
+ /* decode the multiplier */
+ if (processor == SPEEDSTEP_CPU_PIII_C_EARLY) {
+ pr_debug("workaround for early PIIIs\n");
+ msr_lo &= 0x03c00000;
+ } else
+ msr_lo &= 0x0bc00000;
+ msr_lo >>= 22;
+ while (msr_lo != msr_decode_mult[j].bitmap) {
+ if (msr_decode_mult[j].bitmap == 0xff)
+ return 0;
+ j++;
+ }
+
+ pr_debug("speed is %u\n",
+ (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100));
+
+ return msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100;
+}
+
+
+static unsigned int pentiumM_get_frequency(void)
+{
+ u32 msr_lo, msr_tmp;
+
+ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
+ pr_debug("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
+
+ /* see table B-2 of 24547212.pdf */
+ if (msr_lo & 0x00040000) {
+ printk(KERN_DEBUG PFX "PM - invalid FSB: 0x%x 0x%x\n",
+ msr_lo, msr_tmp);
+ return 0;
+ }
+
+ msr_tmp = (msr_lo >> 22) & 0x1f;
+ pr_debug("bits 22-26 are 0x%x, speed is %u\n",
+ msr_tmp, (msr_tmp * 100 * 1000));
+
+ return msr_tmp * 100 * 1000;
+}
+
+static unsigned int pentium_core_get_frequency(void)
+{
+ u32 fsb = 0;
+ u32 msr_lo, msr_tmp;
+ int ret;
+
+ rdmsr(MSR_FSB_FREQ, msr_lo, msr_tmp);
+ /* see table B-2 of 25366920.pdf */
+ switch (msr_lo & 0x07) {
+ case 5:
+ fsb = 100000;
+ break;
+ case 1:
+ fsb = 133333;
+ break;
+ case 3:
+ fsb = 166667;
+ break;
+ case 2:
+ fsb = 200000;
+ break;
+ case 0:
+ fsb = 266667;
+ break;
+ case 4:
+ fsb = 333333;
+ break;
+ default:
+ pr_err("PCORE - MSR_FSB_FREQ undefined value\n");
+ }
+
+ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
+ pr_debug("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n",
+ msr_lo, msr_tmp);
+
+ msr_tmp = (msr_lo >> 22) & 0x1f;
+ pr_debug("bits 22-26 are 0x%x, speed is %u\n",
+ msr_tmp, (msr_tmp * fsb));
+
+ ret = (msr_tmp * fsb);
+ return ret;
+}
+
+
+static unsigned int pentium4_get_frequency(void)
+{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+ u32 msr_lo, msr_hi, mult;
+ unsigned int fsb = 0;
+ unsigned int ret;
+ u8 fsb_code;
+
+ /* Pentium 4 Model 0 and 1 do not have the Core Clock Frequency
+ * to System Bus Frequency Ratio Field in the Processor Frequency
+ * Configuration Register of the MSR. Therefore the current
+ * frequency cannot be calculated and has to be measured.
+ */
+ if (c->x86_model < 2)
+ return cpu_khz;
+
+ rdmsr(0x2c, msr_lo, msr_hi);
+
+ pr_debug("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi);
+
+ /* decode the FSB: see IA-32 Intel (C) Architecture Software
+ * Developer's Manual, Volume 3: System Prgramming Guide,
+ * revision #12 in Table B-1: MSRs in the Pentium 4 and
+ * Intel Xeon Processors, on page B-4 and B-5.
+ */
+ fsb_code = (msr_lo >> 16) & 0x7;
+ switch (fsb_code) {
+ case 0:
+ fsb = 100 * 1000;
+ break;
+ case 1:
+ fsb = 13333 * 10;
+ break;
+ case 2:
+ fsb = 200 * 1000;
+ break;
+ }
+
+ if (!fsb)
+ printk(KERN_DEBUG PFX "couldn't detect FSB speed. "
+ "Please send an e-mail to <linux@brodo.de>\n");
+
+ /* Multiplier. */
+ mult = msr_lo >> 24;
+
+ pr_debug("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n",
+ fsb, mult, (fsb * mult));
+
+ ret = (fsb * mult);
+ return ret;
+}
+
+
+/* Warning: may get called from smp_call_function_single. */
+unsigned int speedstep_get_frequency(enum speedstep_processor processor)
+{
+ switch (processor) {
+ case SPEEDSTEP_CPU_PCORE:
+ return pentium_core_get_frequency();
+ case SPEEDSTEP_CPU_PM:
+ return pentiumM_get_frequency();
+ case SPEEDSTEP_CPU_P4D:
+ case SPEEDSTEP_CPU_P4M:
+ return pentium4_get_frequency();
+ case SPEEDSTEP_CPU_PIII_T:
+ case SPEEDSTEP_CPU_PIII_C:
+ case SPEEDSTEP_CPU_PIII_C_EARLY:
+ return pentium3_get_frequency(processor);
+ default:
+ return 0;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(speedstep_get_frequency);
+
+
+/*********************************************************************
+ * DETECT SPEEDSTEP-CAPABLE PROCESSOR *
+ *********************************************************************/
+
+/* Keep in sync with the x86_cpu_id tables in the different modules */
+enum speedstep_processor speedstep_detect_processor(void)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ u32 ebx, msr_lo, msr_hi;
+
+ pr_debug("x86: %x, model: %x\n", c->x86, c->x86_model);
+
+ if ((c->x86_vendor != X86_VENDOR_INTEL) ||
+ ((c->x86 != 6) && (c->x86 != 0xF)))
+ return 0;
+
+ if (c->x86 == 0xF) {
+ /* Intel Mobile Pentium 4-M
+ * or Intel Mobile Pentium 4 with 533 MHz FSB */
+ if (c->x86_model != 2)
+ return 0;
+
+ ebx = cpuid_ebx(0x00000001);
+ ebx &= 0x000000FF;
+
+ pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping);
+
+ switch (c->x86_stepping) {
+ case 4:
+ /*
+ * B-stepping [M-P4-M]
+ * sample has ebx = 0x0f, production has 0x0e.
+ */
+ if ((ebx == 0x0e) || (ebx == 0x0f))
+ return SPEEDSTEP_CPU_P4M;
+ break;
+ case 7:
+ /*
+ * C-stepping [M-P4-M]
+ * needs to have ebx=0x0e, else it's a celeron:
+ * cf. 25130917.pdf / page 7, footnote 5 even
+ * though 25072120.pdf / page 7 doesn't say
+ * samples are only of B-stepping...
+ */
+ if (ebx == 0x0e)
+ return SPEEDSTEP_CPU_P4M;
+ break;
+ case 9:
+ /*
+ * D-stepping [M-P4-M or M-P4/533]
+ *
+ * this is totally strange: CPUID 0x0F29 is
+ * used by M-P4-M, M-P4/533 and(!) Celeron CPUs.
+ * The latter need to be sorted out as they don't
+ * support speedstep.
+ * Celerons with CPUID 0x0F29 may have either
+ * ebx=0x8 or 0xf -- 25130917.pdf doesn't say anything
+ * specific.
+ * M-P4-Ms may have either ebx=0xe or 0xf [see above]
+ * M-P4/533 have either ebx=0xe or 0xf. [25317607.pdf]
+ * also, M-P4M HTs have ebx=0x8, too
+ * For now, they are distinguished by the model_id
+ * string
+ */
+ if ((ebx == 0x0e) ||
+ (strstr(c->x86_model_id,
+ "Mobile Intel(R) Pentium(R) 4") != NULL))
+ return SPEEDSTEP_CPU_P4M;
+ break;
+ default:
+ break;
+ }
+ return 0;
+ }
+
+ switch (c->x86_model) {
+ case 0x0B: /* Intel PIII [Tualatin] */
+ /* cpuid_ebx(1) is 0x04 for desktop PIII,
+ * 0x06 for mobile PIII-M */
+ ebx = cpuid_ebx(0x00000001);
+ pr_debug("ebx is %x\n", ebx);
+
+ ebx &= 0x000000FF;
+
+ if (ebx != 0x06)
+ return 0;
+
+ /* So far all PIII-M processors support SpeedStep. See
+ * Intel's 24540640.pdf of June 2003
+ */
+ return SPEEDSTEP_CPU_PIII_T;
+
+ case 0x08: /* Intel PIII [Coppermine] */
+
+ /* all mobile PIII Coppermines have FSB 100 MHz
+ * ==> sort out a few desktop PIIIs. */
+ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi);
+ pr_debug("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n",
+ msr_lo, msr_hi);
+ msr_lo &= 0x00c0000;
+ if (msr_lo != 0x0080000)
+ return 0;
+
+ /*
+ * If the processor is a mobile version,
+ * platform ID has bit 50 set
+ * it has SpeedStep technology if either
+ * bit 56 or 57 is set
+ */
+ rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi);
+ pr_debug("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n",
+ msr_lo, msr_hi);
+ if ((msr_hi & (1<<18)) &&
+ (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
+ if (c->x86_stepping == 0x01) {
+ pr_debug("early PIII version\n");
+ return SPEEDSTEP_CPU_PIII_C_EARLY;
+ } else
+ return SPEEDSTEP_CPU_PIII_C;
+ }
+ fallthrough;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(speedstep_detect_processor);
+
+
+/*********************************************************************
+ * DETECT SPEEDSTEP SPEEDS *
+ *********************************************************************/
+
+unsigned int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state) (unsigned int state))
+{
+ unsigned int prev_speed;
+ unsigned int ret = 0;
+ unsigned long flags;
+ ktime_t tv1, tv2;
+
+ if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
+ return -EINVAL;
+
+ pr_debug("trying to determine both speeds\n");
+
+ /* get current speed */
+ prev_speed = speedstep_get_frequency(processor);
+ if (!prev_speed)
+ return -EIO;
+
+ pr_debug("previous speed is %u\n", prev_speed);
+
+ preempt_disable();
+ local_irq_save(flags);
+
+ /* switch to low state */
+ set_state(SPEEDSTEP_LOW);
+ *low_speed = speedstep_get_frequency(processor);
+ if (!*low_speed) {
+ ret = -EIO;
+ goto out;
+ }
+
+ pr_debug("low speed is %u\n", *low_speed);
+
+ /* start latency measurement */
+ if (transition_latency)
+ tv1 = ktime_get();
+
+ /* switch to high state */
+ set_state(SPEEDSTEP_HIGH);
+
+ /* end latency measurement */
+ if (transition_latency)
+ tv2 = ktime_get();
+
+ *high_speed = speedstep_get_frequency(processor);
+ if (!*high_speed) {
+ ret = -EIO;
+ goto out;
+ }
+
+ pr_debug("high speed is %u\n", *high_speed);
+
+ if (*low_speed == *high_speed) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* switch to previous state, if necessary */
+ if (*high_speed != prev_speed)
+ set_state(SPEEDSTEP_LOW);
+
+ if (transition_latency) {
+ *transition_latency = ktime_to_us(ktime_sub(tv2, tv1));
+ pr_debug("transition latency is %u uSec\n", *transition_latency);
+
+ /* convert uSec to nSec and add 20% for safety reasons */
+ *transition_latency *= 1200;
+
+ /* check if the latency measurement is too high or too low
+ * and set it to a safe value (500uSec) in that case
+ */
+ if (*transition_latency > 10000000 ||
+ *transition_latency < 50000) {
+ pr_warn("frequency transition measured seems out of range (%u nSec), falling back to a safe one of %u nSec\n",
+ *transition_latency, 500000);
+ *transition_latency = 500000;
+ }
+ }
+
+out:
+ local_irq_restore(flags);
+ preempt_enable();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(speedstep_get_freqs);
+
+#ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK
+module_param(relaxed_check, int, 0444);
+MODULE_PARM_DESC(relaxed_check,
+ "Don't do all checks for speedstep capability.");
+#endif
+
+MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
+MODULE_DESCRIPTION("Library for Intel SpeedStep 1 or 2 cpufreq drivers.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/speedstep-lib.h b/drivers/cpufreq/speedstep-lib.h
new file mode 100644
index 000000000..dc762ea78
--- /dev/null
+++ b/drivers/cpufreq/speedstep-lib.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ *
+ * Library for common functions for Intel SpeedStep v.1 and v.2 support
+ *
+ * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
+ */
+
+
+
+/* processors */
+enum speedstep_processor {
+ SPEEDSTEP_CPU_PIII_C_EARLY = 0x00000001, /* Coppermine core */
+ SPEEDSTEP_CPU_PIII_C = 0x00000002, /* Coppermine core */
+ SPEEDSTEP_CPU_PIII_T = 0x00000003, /* Tualatin core */
+ SPEEDSTEP_CPU_P4M = 0x00000004, /* P4-M */
+/* the following processors are not speedstep-capable and are not auto-detected
+ * in speedstep_detect_processor(). However, their speed can be detected using
+ * the speedstep_get_frequency() call. */
+ SPEEDSTEP_CPU_PM = 0xFFFFFF03, /* Pentium M */
+ SPEEDSTEP_CPU_P4D = 0xFFFFFF04, /* desktop P4 */
+ SPEEDSTEP_CPU_PCORE = 0xFFFFFF05, /* Core */
+};
+
+/* speedstep states -- only two of them */
+
+#define SPEEDSTEP_HIGH 0x00000000
+#define SPEEDSTEP_LOW 0x00000001
+
+
+/* detect a speedstep-capable processor */
+extern enum speedstep_processor speedstep_detect_processor(void);
+
+/* detect the current speed (in khz) of the processor */
+extern unsigned int speedstep_get_frequency(enum speedstep_processor processor);
+
+
+/* detect the low and high speeds of the processor. The callback
+ * set_state"'s first argument is either SPEEDSTEP_HIGH or
+ * SPEEDSTEP_LOW; the second argument is zero so that no
+ * cpufreq_notify_transition calls are initiated.
+ */
+extern unsigned int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state) (unsigned int state));
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
new file mode 100644
index 000000000..0ce9d4b6d
--- /dev/null
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel SpeedStep SMI driver.
+ *
+ * (C) 2003 Hiroshi Miura <miura@da-cha.org>
+ */
+
+
+/*********************************************************************
+ * SPEEDSTEP - DEFINITIONS *
+ *********************************************************************/
+
+#define pr_fmt(fmt) "cpufreq: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <asm/ist.h>
+#include <asm/cpu_device_id.h>
+
+#include "speedstep-lib.h"
+
+/* speedstep system management interface port/command.
+ *
+ * These parameters are got from IST-SMI BIOS call.
+ * If user gives it, these are used.
+ *
+ */
+static int smi_port;
+static int smi_cmd;
+static unsigned int smi_sig;
+
+/* info about the processor */
+static enum speedstep_processor speedstep_processor;
+
+/*
+ * There are only two frequency states for each processor. Values
+ * are in kHz for the time being.
+ */
+static struct cpufreq_frequency_table speedstep_freqs[] = {
+ {0, SPEEDSTEP_HIGH, 0},
+ {0, SPEEDSTEP_LOW, 0},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+#define GET_SPEEDSTEP_OWNER 0
+#define GET_SPEEDSTEP_STATE 1
+#define SET_SPEEDSTEP_STATE 2
+#define GET_SPEEDSTEP_FREQS 4
+
+/* how often shall the SMI call be tried if it failed, e.g. because
+ * of DMA activity going on? */
+#define SMI_TRIES 5
+
+/**
+ * speedstep_smi_ownership
+ */
+static int speedstep_smi_ownership(void)
+{
+ u32 command, result, magic, dummy;
+ u32 function = GET_SPEEDSTEP_OWNER;
+ unsigned char magic_data[] = "Copyright (c) 1999 Intel Corporation";
+
+ command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
+ magic = virt_to_phys(magic_data);
+
+ pr_debug("trying to obtain ownership with command %x at port %x\n",
+ command, smi_port);
+
+ __asm__ __volatile__(
+ "push %%ebp\n"
+ "out %%al, (%%dx)\n"
+ "pop %%ebp\n"
+ : "=D" (result),
+ "=a" (dummy), "=b" (dummy), "=c" (dummy), "=d" (dummy),
+ "=S" (dummy)
+ : "a" (command), "b" (function), "c" (0), "d" (smi_port),
+ "D" (0), "S" (magic)
+ : "memory"
+ );
+
+ pr_debug("result is %x\n", result);
+
+ return result;
+}
+
+/**
+ * speedstep_smi_get_freqs - get SpeedStep preferred & current freq.
+ * @low: the low frequency value is placed here
+ * @high: the high frequency value is placed here
+ *
+ * Only available on later SpeedStep-enabled systems, returns false results or
+ * even hangs [cf. bugme.osdl.org # 1422] on earlier systems. Empirical testing
+ * shows that the latter occurs if !(ist_info.event & 0xFFFF).
+ */
+static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high)
+{
+ u32 command, result = 0, edi, high_mhz, low_mhz, dummy;
+ u32 state = 0;
+ u32 function = GET_SPEEDSTEP_FREQS;
+
+ if (!(ist_info.event & 0xFFFF)) {
+ pr_debug("bug #1422 -- can't read freqs from BIOS\n");
+ return -ENODEV;
+ }
+
+ command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
+
+ pr_debug("trying to determine frequencies with command %x at port %x\n",
+ command, smi_port);
+
+ __asm__ __volatile__(
+ "push %%ebp\n"
+ "out %%al, (%%dx)\n"
+ "pop %%ebp"
+ : "=a" (result),
+ "=b" (high_mhz),
+ "=c" (low_mhz),
+ "=d" (state), "=D" (edi), "=S" (dummy)
+ : "a" (command),
+ "b" (function),
+ "c" (state),
+ "d" (smi_port), "S" (0), "D" (0)
+ );
+
+ pr_debug("result %x, low_freq %u, high_freq %u\n",
+ result, low_mhz, high_mhz);
+
+ /* abort if results are obviously incorrect... */
+ if ((high_mhz + low_mhz) < 600)
+ return -EINVAL;
+
+ *high = high_mhz * 1000;
+ *low = low_mhz * 1000;
+
+ return result;
+}
+
+/**
+ * speedstep_set_state - set the SpeedStep state
+ * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
+ *
+ */
+static void speedstep_set_state(unsigned int state)
+{
+ unsigned int result = 0, command, new_state, dummy;
+ unsigned long flags;
+ unsigned int function = SET_SPEEDSTEP_STATE;
+ unsigned int retry = 0;
+
+ if (state > 0x1)
+ return;
+
+ /* Disable IRQs */
+ preempt_disable();
+ local_irq_save(flags);
+
+ command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
+
+ pr_debug("trying to set frequency to state %u "
+ "with command %x at port %x\n",
+ state, command, smi_port);
+
+ do {
+ if (retry) {
+ /*
+ * We need to enable interrupts, otherwise the blockage
+ * won't resolve.
+ *
+ * We disable preemption so that other processes don't
+ * run. If other processes were running, they could
+ * submit more DMA requests, making the blockage worse.
+ */
+ pr_debug("retry %u, previous result %u, waiting...\n",
+ retry, result);
+ local_irq_enable();
+ mdelay(retry * 50);
+ local_irq_disable();
+ }
+ retry++;
+ __asm__ __volatile__(
+ "push %%ebp\n"
+ "out %%al, (%%dx)\n"
+ "pop %%ebp"
+ : "=b" (new_state), "=D" (result),
+ "=c" (dummy), "=a" (dummy),
+ "=d" (dummy), "=S" (dummy)
+ : "a" (command), "b" (function), "c" (state),
+ "d" (smi_port), "S" (0), "D" (0)
+ );
+ } while ((new_state != state) && (retry <= SMI_TRIES));
+
+ /* enable IRQs */
+ local_irq_restore(flags);
+ preempt_enable();
+
+ if (new_state == state)
+ pr_debug("change to %u MHz succeeded after %u tries "
+ "with result %u\n",
+ (speedstep_freqs[new_state].frequency / 1000),
+ retry, result);
+ else
+ pr_err("change to state %u failed with new_state %u and result %u\n",
+ state, new_state, result);
+
+ return;
+}
+
+
+/**
+ * speedstep_target - set a new CPUFreq policy
+ * @policy: new policy
+ * @index: index of new freq
+ *
+ * Sets a new CPUFreq policy/freq.
+ */
+static int speedstep_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ speedstep_set_state(index);
+
+ return 0;
+}
+
+
+static int speedstep_cpu_init(struct cpufreq_policy *policy)
+{
+ int result;
+ unsigned int *low, *high;
+
+ /* capability check */
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+ result = speedstep_smi_ownership();
+ if (result) {
+ pr_debug("fails in acquiring ownership of a SMI interface.\n");
+ return -EINVAL;
+ }
+
+ /* detect low and high frequency */
+ low = &speedstep_freqs[SPEEDSTEP_LOW].frequency;
+ high = &speedstep_freqs[SPEEDSTEP_HIGH].frequency;
+
+ result = speedstep_smi_get_freqs(low, high);
+ if (result) {
+ /* fall back to speedstep_lib.c dection mechanism:
+ * try both states out */
+ pr_debug("could not detect low and high frequencies "
+ "by SMI call.\n");
+ result = speedstep_get_freqs(speedstep_processor,
+ low, high,
+ NULL,
+ &speedstep_set_state);
+
+ if (result) {
+ pr_debug("could not detect two different speeds"
+ " -- aborting.\n");
+ return result;
+ } else
+ pr_debug("workaround worked.\n");
+ }
+
+ policy->freq_table = speedstep_freqs;
+
+ return 0;
+}
+
+static unsigned int speedstep_get(unsigned int cpu)
+{
+ if (cpu)
+ return -ENODEV;
+ return speedstep_get_frequency(speedstep_processor);
+}
+
+
+static int speedstep_resume(struct cpufreq_policy *policy)
+{
+ int result = speedstep_smi_ownership();
+
+ if (result)
+ pr_debug("fails in re-acquiring ownership of a SMI interface.\n");
+
+ return result;
+}
+
+static struct cpufreq_driver speedstep_driver = {
+ .name = "speedstep-smi",
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = speedstep_target,
+ .init = speedstep_cpu_init,
+ .get = speedstep_get,
+ .resume = speedstep_resume,
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct x86_cpu_id ss_smi_ids[] = {
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0x8, 0),
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0xb, 0),
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 15, 0x2, 0),
+ {}
+};
+
+/**
+ * speedstep_init - initializes the SpeedStep CPUFreq driver
+ *
+ * Initializes the SpeedStep support. Returns -ENODEV on unsupported
+ * BIOS, -EINVAL on problems during initiatization, and zero on
+ * success.
+ */
+static int __init speedstep_init(void)
+{
+ if (!x86_match_cpu(ss_smi_ids))
+ return -ENODEV;
+
+ speedstep_processor = speedstep_detect_processor();
+
+ switch (speedstep_processor) {
+ case SPEEDSTEP_CPU_PIII_T:
+ case SPEEDSTEP_CPU_PIII_C:
+ case SPEEDSTEP_CPU_PIII_C_EARLY:
+ break;
+ default:
+ speedstep_processor = 0;
+ }
+
+ if (!speedstep_processor) {
+ pr_debug("No supported Intel CPU detected.\n");
+ return -ENODEV;
+ }
+
+ pr_debug("signature:0x%.8x, command:0x%.8x, "
+ "event:0x%.8x, perf_level:0x%.8x.\n",
+ ist_info.signature, ist_info.command,
+ ist_info.event, ist_info.perf_level);
+
+ /* Error if no IST-SMI BIOS or no PARM
+ sig= 'ISGE' aka 'Intel Speedstep Gate E' */
+ if ((ist_info.signature != 0x47534943) && (
+ (smi_port == 0) || (smi_cmd == 0)))
+ return -ENODEV;
+
+ if (smi_sig == 1)
+ smi_sig = 0x47534943;
+ else
+ smi_sig = ist_info.signature;
+
+ /* setup smi_port from MODLULE_PARM or BIOS */
+ if ((smi_port > 0xff) || (smi_port < 0))
+ return -EINVAL;
+ else if (smi_port == 0)
+ smi_port = ist_info.command & 0xff;
+
+ if ((smi_cmd > 0xff) || (smi_cmd < 0))
+ return -EINVAL;
+ else if (smi_cmd == 0)
+ smi_cmd = (ist_info.command >> 16) & 0xff;
+
+ return cpufreq_register_driver(&speedstep_driver);
+}
+
+
+/**
+ * speedstep_exit - unregisters SpeedStep support
+ *
+ * Unregisters SpeedStep support.
+ */
+static void __exit speedstep_exit(void)
+{
+ cpufreq_unregister_driver(&speedstep_driver);
+}
+
+module_param_hw(smi_port, int, ioport, 0444);
+module_param(smi_cmd, int, 0444);
+module_param(smi_sig, uint, 0444);
+
+MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value "
+ "-- Intel's default setting is 0xb2");
+MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value "
+ "-- Intel's default setting is 0x82");
+MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the "
+ "SMI interface.");
+
+MODULE_AUTHOR("Hiroshi Miura");
+MODULE_DESCRIPTION("Speedstep driver for IST applet SMI interface.");
+MODULE_LICENSE("GPL");
+
+module_init(speedstep_init);
+module_exit(speedstep_exit);
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
new file mode 100644
index 000000000..1a63aeea8
--- /dev/null
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Match running platform with pre-defined OPP values for CPUFreq
+ *
+ * Author: Ajit Pal Singh <ajitpal.singh@st.com>
+ * Lee Jones <lee.jones@linaro.org>
+ *
+ * Copyright (C) 2015 STMicroelectronics (R&D) Limited
+ */
+
+#include <linux/cpu.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+
+#define VERSION_ELEMENTS 3
+#define MAX_PCODE_NAME_LEN 7
+
+#define VERSION_SHIFT 28
+#define HW_INFO_INDEX 1
+#define MAJOR_ID_INDEX 1
+#define MINOR_ID_INDEX 2
+
+/*
+ * Only match on "suitable for ALL versions" entries
+ *
+ * This will be used with the BIT() macro. It sets the
+ * top bit of a 32bit value and is equal to 0x80000000.
+ */
+#define DEFAULT_VERSION 31
+
+enum {
+ PCODE = 0,
+ SUBSTRATE,
+ DVFS_MAX_REGFIELDS,
+};
+
+/**
+ * struct sti_cpufreq_ddata - ST CPUFreq Driver Data
+ *
+ * @cpu: CPU's OF node
+ * @syscfg_eng: Engineering Syscon register map
+ * @syscfg: Syscon register map
+ */
+static struct sti_cpufreq_ddata {
+ struct device *cpu;
+ struct regmap *syscfg_eng;
+ struct regmap *syscfg;
+} ddata;
+
+static int sti_cpufreq_fetch_major(void) {
+ struct device_node *np = ddata.cpu->of_node;
+ struct device *dev = ddata.cpu;
+ unsigned int major_offset;
+ unsigned int socid;
+ int ret;
+
+ ret = of_property_read_u32_index(np, "st,syscfg",
+ MAJOR_ID_INDEX, &major_offset);
+ if (ret) {
+ dev_err(dev, "No major number offset provided in %pOF [%d]\n",
+ np, ret);
+ return ret;
+ }
+
+ ret = regmap_read(ddata.syscfg, major_offset, &socid);
+ if (ret) {
+ dev_err(dev, "Failed to read major number from syscon [%d]\n",
+ ret);
+ return ret;
+ }
+
+ return ((socid >> VERSION_SHIFT) & 0xf) + 1;
+}
+
+static int sti_cpufreq_fetch_minor(void)
+{
+ struct device *dev = ddata.cpu;
+ struct device_node *np = dev->of_node;
+ unsigned int minor_offset;
+ unsigned int minid;
+ int ret;
+
+ ret = of_property_read_u32_index(np, "st,syscfg-eng",
+ MINOR_ID_INDEX, &minor_offset);
+ if (ret) {
+ dev_err(dev,
+ "No minor number offset provided %pOF [%d]\n",
+ np, ret);
+ return ret;
+ }
+
+ ret = regmap_read(ddata.syscfg_eng, minor_offset, &minid);
+ if (ret) {
+ dev_err(dev,
+ "Failed to read the minor number from syscon [%d]\n",
+ ret);
+ return ret;
+ }
+
+ return minid & 0xf;
+}
+
+static int sti_cpufreq_fetch_regmap_field(const struct reg_field *reg_fields,
+ int hw_info_offset, int field)
+{
+ struct regmap_field *regmap_field;
+ struct reg_field reg_field = reg_fields[field];
+ struct device *dev = ddata.cpu;
+ unsigned int value;
+ int ret;
+
+ reg_field.reg = hw_info_offset;
+ regmap_field = devm_regmap_field_alloc(dev,
+ ddata.syscfg_eng,
+ reg_field);
+ if (IS_ERR(regmap_field)) {
+ dev_err(dev, "Failed to allocate reg field\n");
+ return PTR_ERR(regmap_field);
+ }
+
+ ret = regmap_field_read(regmap_field, &value);
+ if (ret) {
+ dev_err(dev, "Failed to read %s code\n",
+ field ? "SUBSTRATE" : "PCODE");
+ return ret;
+ }
+
+ return value;
+}
+
+static const struct reg_field sti_stih407_dvfs_regfields[DVFS_MAX_REGFIELDS] = {
+ [PCODE] = REG_FIELD(0, 16, 19),
+ [SUBSTRATE] = REG_FIELD(0, 0, 2),
+};
+
+static const struct reg_field *sti_cpufreq_match(void)
+{
+ if (of_machine_is_compatible("st,stih407") ||
+ of_machine_is_compatible("st,stih410") ||
+ of_machine_is_compatible("st,stih418"))
+ return sti_stih407_dvfs_regfields;
+
+ return NULL;
+}
+
+static int sti_cpufreq_set_opp_info(void)
+{
+ struct device *dev = ddata.cpu;
+ struct device_node *np = dev->of_node;
+ const struct reg_field *reg_fields;
+ unsigned int hw_info_offset;
+ unsigned int version[VERSION_ELEMENTS];
+ int pcode, substrate, major, minor;
+ int opp_token, ret;
+ char name[MAX_PCODE_NAME_LEN];
+ struct dev_pm_opp_config config = {
+ .supported_hw = version,
+ .supported_hw_count = ARRAY_SIZE(version),
+ .prop_name = name,
+ };
+
+ reg_fields = sti_cpufreq_match();
+ if (!reg_fields) {
+ dev_err(dev, "This SoC doesn't support voltage scaling\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32_index(np, "st,syscfg-eng",
+ HW_INFO_INDEX, &hw_info_offset);
+ if (ret) {
+ dev_warn(dev, "Failed to read HW info offset from DT\n");
+ substrate = DEFAULT_VERSION;
+ pcode = 0;
+ goto use_defaults;
+ }
+
+ pcode = sti_cpufreq_fetch_regmap_field(reg_fields,
+ hw_info_offset,
+ PCODE);
+ if (pcode < 0) {
+ dev_warn(dev, "Failed to obtain process code\n");
+ /* Use default pcode */
+ pcode = 0;
+ }
+
+ substrate = sti_cpufreq_fetch_regmap_field(reg_fields,
+ hw_info_offset,
+ SUBSTRATE);
+ if (substrate) {
+ dev_warn(dev, "Failed to obtain substrate code\n");
+ /* Use default substrate */
+ substrate = DEFAULT_VERSION;
+ }
+
+use_defaults:
+ major = sti_cpufreq_fetch_major();
+ if (major < 0) {
+ dev_err(dev, "Failed to obtain major version\n");
+ /* Use default major number */
+ major = DEFAULT_VERSION;
+ }
+
+ minor = sti_cpufreq_fetch_minor();
+ if (minor < 0) {
+ dev_err(dev, "Failed to obtain minor version\n");
+ /* Use default minor number */
+ minor = DEFAULT_VERSION;
+ }
+
+ snprintf(name, MAX_PCODE_NAME_LEN, "pcode%d", pcode);
+
+ version[0] = BIT(major);
+ version[1] = BIT(minor);
+ version[2] = BIT(substrate);
+
+ opp_token = dev_pm_opp_set_config(dev, &config);
+ if (opp_token < 0) {
+ dev_err(dev, "Failed to set OPP config\n");
+ return opp_token;
+ }
+
+ dev_dbg(dev, "pcode: %d major: %d minor: %d substrate: %d\n",
+ pcode, major, minor, substrate);
+ dev_dbg(dev, "version[0]: %x version[1]: %x version[2]: %x\n",
+ version[0], version[1], version[2]);
+
+ return 0;
+}
+
+static int sti_cpufreq_fetch_syscon_registers(void)
+{
+ struct device *dev = ddata.cpu;
+ struct device_node *np = dev->of_node;
+
+ ddata.syscfg = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+ if (IS_ERR(ddata.syscfg)) {
+ dev_err(dev, "\"st,syscfg\" not supplied\n");
+ return PTR_ERR(ddata.syscfg);
+ }
+
+ ddata.syscfg_eng = syscon_regmap_lookup_by_phandle(np, "st,syscfg-eng");
+ if (IS_ERR(ddata.syscfg_eng)) {
+ dev_err(dev, "\"st,syscfg-eng\" not supplied\n");
+ return PTR_ERR(ddata.syscfg_eng);
+ }
+
+ return 0;
+}
+
+static int __init sti_cpufreq_init(void)
+{
+ int ret;
+
+ if ((!of_machine_is_compatible("st,stih407")) &&
+ (!of_machine_is_compatible("st,stih410")) &&
+ (!of_machine_is_compatible("st,stih418")))
+ return -ENODEV;
+
+ ddata.cpu = get_cpu_device(0);
+ if (!ddata.cpu) {
+ dev_err(ddata.cpu, "Failed to get device for CPU0\n");
+ goto skip_voltage_scaling;
+ }
+
+ if (!of_get_property(ddata.cpu->of_node, "operating-points-v2", NULL)) {
+ dev_err(ddata.cpu, "OPP-v2 not supported\n");
+ goto skip_voltage_scaling;
+ }
+
+ ret = sti_cpufreq_fetch_syscon_registers();
+ if (ret)
+ goto skip_voltage_scaling;
+
+ ret = sti_cpufreq_set_opp_info();
+ if (!ret)
+ goto register_cpufreq_dt;
+
+skip_voltage_scaling:
+ dev_err(ddata.cpu, "Not doing voltage scaling\n");
+
+register_cpufreq_dt:
+ platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+
+ return 0;
+}
+module_init(sti_cpufreq_init);
+
+static const struct of_device_id __maybe_unused sti_cpufreq_of_match[] = {
+ { .compatible = "st,stih407" },
+ { .compatible = "st,stih410" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sti_cpufreq_of_match);
+
+MODULE_DESCRIPTION("STMicroelectronics CPUFreq/OPP driver");
+MODULE_AUTHOR("Ajitpal Singh <ajitpal.singh@st.com>");
+MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
new file mode 100644
index 000000000..1583a370d
--- /dev/null
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner CPUFreq nvmem based driver
+ *
+ * The sun50i-cpufreq-nvmem driver reads the efuse value from the SoC to
+ * provide the OPP framework with required information.
+ *
+ * Copyright (C) 2019 Yangtao Li <tiny.windzz@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+
+#define MAX_NAME_LEN 7
+
+#define NVMEM_MASK 0x7
+#define NVMEM_SHIFT 5
+
+static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
+
+/**
+ * sun50i_cpufreq_get_efuse() - Determine speed grade from efuse value
+ * @versions: Set to the value parsed from efuse
+ *
+ * Returns 0 if success.
+ */
+static int sun50i_cpufreq_get_efuse(u32 *versions)
+{
+ struct nvmem_cell *speedbin_nvmem;
+ struct device_node *np;
+ struct device *cpu_dev;
+ u32 *speedbin, efuse_value;
+ size_t len;
+ int ret;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ if (!np)
+ return -ENOENT;
+
+ ret = of_device_is_compatible(np,
+ "allwinner,sun50i-h6-operating-points");
+ if (!ret) {
+ of_node_put(np);
+ return -ENOENT;
+ }
+
+ speedbin_nvmem = of_nvmem_cell_get(np, NULL);
+ of_node_put(np);
+ if (IS_ERR(speedbin_nvmem))
+ return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
+ "Could not get nvmem cell\n");
+
+ speedbin = nvmem_cell_read(speedbin_nvmem, &len);
+ nvmem_cell_put(speedbin_nvmem);
+ if (IS_ERR(speedbin))
+ return PTR_ERR(speedbin);
+
+ efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
+
+ /*
+ * We treat unexpected efuse values as if the SoC was from
+ * the slowest bin. Expected efuse values are 1-3, slowest
+ * to fastest.
+ */
+ if (efuse_value >= 1 && efuse_value <= 3)
+ *versions = efuse_value - 1;
+ else
+ *versions = 0;
+
+ kfree(speedbin);
+ return 0;
+};
+
+static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
+{
+ int *opp_tokens;
+ char name[MAX_NAME_LEN];
+ unsigned int cpu;
+ u32 speed = 0;
+ int ret;
+
+ opp_tokens = kcalloc(num_possible_cpus(), sizeof(*opp_tokens),
+ GFP_KERNEL);
+ if (!opp_tokens)
+ return -ENOMEM;
+
+ ret = sun50i_cpufreq_get_efuse(&speed);
+ if (ret) {
+ kfree(opp_tokens);
+ return ret;
+ }
+
+ snprintf(name, MAX_NAME_LEN, "speed%d", speed);
+
+ for_each_possible_cpu(cpu) {
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev) {
+ ret = -ENODEV;
+ goto free_opp;
+ }
+
+ opp_tokens[cpu] = dev_pm_opp_set_prop_name(cpu_dev, name);
+ if (opp_tokens[cpu] < 0) {
+ ret = opp_tokens[cpu];
+ pr_err("Failed to set prop name\n");
+ goto free_opp;
+ }
+ }
+
+ cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
+ NULL, 0);
+ if (!IS_ERR(cpufreq_dt_pdev)) {
+ platform_set_drvdata(pdev, opp_tokens);
+ return 0;
+ }
+
+ ret = PTR_ERR(cpufreq_dt_pdev);
+ pr_err("Failed to register platform device\n");
+
+free_opp:
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_put_prop_name(opp_tokens[cpu]);
+ kfree(opp_tokens);
+
+ return ret;
+}
+
+static int sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
+{
+ int *opp_tokens = platform_get_drvdata(pdev);
+ unsigned int cpu;
+
+ platform_device_unregister(cpufreq_dt_pdev);
+
+ for_each_possible_cpu(cpu)
+ dev_pm_opp_put_prop_name(opp_tokens[cpu]);
+
+ kfree(opp_tokens);
+
+ return 0;
+}
+
+static struct platform_driver sun50i_cpufreq_driver = {
+ .probe = sun50i_cpufreq_nvmem_probe,
+ .remove = sun50i_cpufreq_nvmem_remove,
+ .driver = {
+ .name = "sun50i-cpufreq-nvmem",
+ },
+};
+
+static const struct of_device_id sun50i_cpufreq_match_list[] = {
+ { .compatible = "allwinner,sun50i-h6" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list);
+
+static const struct of_device_id *sun50i_cpufreq_match_node(void)
+{
+ const struct of_device_id *match;
+ struct device_node *np;
+
+ np = of_find_node_by_path("/");
+ match = of_match_node(sun50i_cpufreq_match_list, np);
+ of_node_put(np);
+
+ return match;
+}
+
+/*
+ * Since the driver depends on nvmem drivers, which may return EPROBE_DEFER,
+ * all the real activity is done in the probe, which may be defered as well.
+ * The init here is only registering the driver and the platform device.
+ */
+static int __init sun50i_cpufreq_init(void)
+{
+ const struct of_device_id *match;
+ int ret;
+
+ match = sun50i_cpufreq_match_node();
+ if (!match)
+ return -ENODEV;
+
+ ret = platform_driver_register(&sun50i_cpufreq_driver);
+ if (unlikely(ret < 0))
+ return ret;
+
+ sun50i_cpufreq_pdev =
+ platform_device_register_simple("sun50i-cpufreq-nvmem",
+ -1, NULL, 0);
+ ret = PTR_ERR_OR_ZERO(sun50i_cpufreq_pdev);
+ if (ret == 0)
+ return 0;
+
+ platform_driver_unregister(&sun50i_cpufreq_driver);
+ return ret;
+}
+module_init(sun50i_cpufreq_init);
+
+static void __exit sun50i_cpufreq_exit(void)
+{
+ platform_device_unregister(sun50i_cpufreq_pdev);
+ platform_driver_unregister(&sun50i_cpufreq_driver);
+}
+module_exit(sun50i_cpufreq_exit);
+
+MODULE_DESCRIPTION("Sun50i-h6 cpufreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
new file mode 100644
index 000000000..7a1ea6fdc
--- /dev/null
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Tegra 124 cpufreq driver
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/types.h>
+
+struct tegra124_cpufreq_priv {
+ struct clk *cpu_clk;
+ struct clk *pllp_clk;
+ struct clk *pllx_clk;
+ struct clk *dfll_clk;
+ struct platform_device *cpufreq_dt_pdev;
+};
+
+static int tegra124_cpu_switch_to_dfll(struct tegra124_cpufreq_priv *priv)
+{
+ struct clk *orig_parent;
+ int ret;
+
+ ret = clk_set_rate(priv->dfll_clk, clk_get_rate(priv->cpu_clk));
+ if (ret)
+ return ret;
+
+ orig_parent = clk_get_parent(priv->cpu_clk);
+ clk_set_parent(priv->cpu_clk, priv->pllp_clk);
+
+ ret = clk_prepare_enable(priv->dfll_clk);
+ if (ret)
+ goto out;
+
+ clk_set_parent(priv->cpu_clk, priv->dfll_clk);
+
+ return 0;
+
+out:
+ clk_set_parent(priv->cpu_clk, orig_parent);
+
+ return ret;
+}
+
+static int tegra124_cpufreq_probe(struct platform_device *pdev)
+{
+ struct tegra124_cpufreq_priv *priv;
+ struct device_node *np;
+ struct device *cpu_dev;
+ struct platform_device_info cpufreq_dt_devinfo = {};
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ np = of_cpu_device_node_get(0);
+ if (!np)
+ return -ENODEV;
+
+ priv->cpu_clk = of_clk_get_by_name(np, "cpu_g");
+ if (IS_ERR(priv->cpu_clk)) {
+ ret = PTR_ERR(priv->cpu_clk);
+ goto out_put_np;
+ }
+
+ priv->dfll_clk = of_clk_get_by_name(np, "dfll");
+ if (IS_ERR(priv->dfll_clk)) {
+ ret = PTR_ERR(priv->dfll_clk);
+ goto out_put_cpu_clk;
+ }
+
+ priv->pllx_clk = of_clk_get_by_name(np, "pll_x");
+ if (IS_ERR(priv->pllx_clk)) {
+ ret = PTR_ERR(priv->pllx_clk);
+ goto out_put_dfll_clk;
+ }
+
+ priv->pllp_clk = of_clk_get_by_name(np, "pll_p");
+ if (IS_ERR(priv->pllp_clk)) {
+ ret = PTR_ERR(priv->pllp_clk);
+ goto out_put_pllx_clk;
+ }
+
+ ret = tegra124_cpu_switch_to_dfll(priv);
+ if (ret)
+ goto out_put_pllp_clk;
+
+ cpufreq_dt_devinfo.name = "cpufreq-dt";
+ cpufreq_dt_devinfo.parent = &pdev->dev;
+
+ priv->cpufreq_dt_pdev =
+ platform_device_register_full(&cpufreq_dt_devinfo);
+ if (IS_ERR(priv->cpufreq_dt_pdev)) {
+ ret = PTR_ERR(priv->cpufreq_dt_pdev);
+ goto out_put_pllp_clk;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ of_node_put(np);
+
+ return 0;
+
+out_put_pllp_clk:
+ clk_put(priv->pllp_clk);
+out_put_pllx_clk:
+ clk_put(priv->pllx_clk);
+out_put_dfll_clk:
+ clk_put(priv->dfll_clk);
+out_put_cpu_clk:
+ clk_put(priv->cpu_clk);
+out_put_np:
+ of_node_put(np);
+
+ return ret;
+}
+
+static int __maybe_unused tegra124_cpufreq_suspend(struct device *dev)
+{
+ struct tegra124_cpufreq_priv *priv = dev_get_drvdata(dev);
+ int err;
+
+ /*
+ * PLLP rate 408Mhz is below the CPU Fmax at Vmin and is safe to
+ * use during suspend and resume. So, switch the CPU clock source
+ * to PLLP and disable DFLL.
+ */
+ err = clk_set_parent(priv->cpu_clk, priv->pllp_clk);
+ if (err < 0) {
+ dev_err(dev, "failed to reparent to PLLP: %d\n", err);
+ return err;
+ }
+
+ clk_disable_unprepare(priv->dfll_clk);
+
+ return 0;
+}
+
+static int __maybe_unused tegra124_cpufreq_resume(struct device *dev)
+{
+ struct tegra124_cpufreq_priv *priv = dev_get_drvdata(dev);
+ int err;
+
+ /*
+ * Warmboot code powers up the CPU with PLLP clock source.
+ * Enable DFLL clock and switch CPU clock source back to DFLL.
+ */
+ err = clk_prepare_enable(priv->dfll_clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable DFLL clock for CPU: %d\n", err);
+ goto disable_cpufreq;
+ }
+
+ err = clk_set_parent(priv->cpu_clk, priv->dfll_clk);
+ if (err < 0) {
+ dev_err(dev, "failed to reparent to DFLL clock: %d\n", err);
+ goto disable_dfll;
+ }
+
+ return 0;
+
+disable_dfll:
+ clk_disable_unprepare(priv->dfll_clk);
+disable_cpufreq:
+ disable_cpufreq();
+
+ return err;
+}
+
+static const struct dev_pm_ops tegra124_cpufreq_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra124_cpufreq_suspend,
+ tegra124_cpufreq_resume)
+};
+
+static struct platform_driver tegra124_cpufreq_platdrv = {
+ .driver.name = "cpufreq-tegra124",
+ .driver.pm = &tegra124_cpufreq_pm_ops,
+ .probe = tegra124_cpufreq_probe,
+};
+
+static int __init tegra_cpufreq_init(void)
+{
+ int ret;
+ struct platform_device *pdev;
+
+ if (!(of_machine_is_compatible("nvidia,tegra124") ||
+ of_machine_is_compatible("nvidia,tegra210")))
+ return -ENODEV;
+
+ /*
+ * Platform driver+device required for handling EPROBE_DEFER with
+ * the regulator and the DFLL clock
+ */
+ ret = platform_driver_register(&tegra124_cpufreq_platdrv);
+ if (ret)
+ return ret;
+
+ pdev = platform_device_register_simple("cpufreq-tegra124", -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ platform_driver_unregister(&tegra124_cpufreq_platdrv);
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+module_init(tegra_cpufreq_init);
+
+MODULE_AUTHOR("Tuomas Tynkkynen <ttynkkynen@nvidia.com>");
+MODULE_DESCRIPTION("cpufreq driver for NVIDIA Tegra124");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
new file mode 100644
index 000000000..6c88827f4
--- /dev/null
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+
+#define TEGRA186_NUM_CLUSTERS 2
+#define EDVD_OFFSET_A57(core) ((SZ_64K * 6) + (0x20 + (core) * 0x4))
+#define EDVD_OFFSET_DENVER(core) ((SZ_64K * 7) + (0x20 + (core) * 0x4))
+#define EDVD_CORE_VOLT_FREQ_F_SHIFT 0
+#define EDVD_CORE_VOLT_FREQ_F_MASK 0xffff
+#define EDVD_CORE_VOLT_FREQ_V_SHIFT 16
+
+struct tegra186_cpufreq_cpu {
+ unsigned int bpmp_cluster_id;
+ unsigned int edvd_offset;
+};
+
+static const struct tegra186_cpufreq_cpu tegra186_cpus[] = {
+ /* CPU0 - A57 Cluster */
+ {
+ .bpmp_cluster_id = 1,
+ .edvd_offset = EDVD_OFFSET_A57(0)
+ },
+ /* CPU1 - Denver Cluster */
+ {
+ .bpmp_cluster_id = 0,
+ .edvd_offset = EDVD_OFFSET_DENVER(0)
+ },
+ /* CPU2 - Denver Cluster */
+ {
+ .bpmp_cluster_id = 0,
+ .edvd_offset = EDVD_OFFSET_DENVER(1)
+ },
+ /* CPU3 - A57 Cluster */
+ {
+ .bpmp_cluster_id = 1,
+ .edvd_offset = EDVD_OFFSET_A57(1)
+ },
+ /* CPU4 - A57 Cluster */
+ {
+ .bpmp_cluster_id = 1,
+ .edvd_offset = EDVD_OFFSET_A57(2)
+ },
+ /* CPU5 - A57 Cluster */
+ {
+ .bpmp_cluster_id = 1,
+ .edvd_offset = EDVD_OFFSET_A57(3)
+ },
+};
+
+struct tegra186_cpufreq_cluster {
+ struct cpufreq_frequency_table *table;
+ u32 ref_clk_khz;
+ u32 div;
+};
+
+struct tegra186_cpufreq_data {
+ void __iomem *regs;
+ struct tegra186_cpufreq_cluster *clusters;
+ const struct tegra186_cpufreq_cpu *cpus;
+};
+
+static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
+{
+ struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
+ unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
+
+ policy->freq_table = data->clusters[cluster].table;
+ policy->cpuinfo.transition_latency = 300 * 1000;
+ policy->driver_data = NULL;
+
+ return 0;
+}
+
+static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
+ struct cpufreq_frequency_table *tbl = policy->freq_table + index;
+ unsigned int edvd_offset = data->cpus[policy->cpu].edvd_offset;
+ u32 edvd_val = tbl->driver_data;
+
+ writel(edvd_val, data->regs + edvd_offset);
+
+ return 0;
+}
+
+static unsigned int tegra186_cpufreq_get(unsigned int cpu)
+{
+ struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
+ struct tegra186_cpufreq_cluster *cluster;
+ struct cpufreq_policy *policy;
+ unsigned int edvd_offset, cluster_id;
+ u32 ndiv;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return 0;
+
+ edvd_offset = data->cpus[policy->cpu].edvd_offset;
+ ndiv = readl(data->regs + edvd_offset) & EDVD_CORE_VOLT_FREQ_F_MASK;
+ cluster_id = data->cpus[policy->cpu].bpmp_cluster_id;
+ cluster = &data->clusters[cluster_id];
+ cpufreq_cpu_put(policy);
+
+ return (cluster->ref_clk_khz * ndiv) / cluster->div;
+}
+
+static struct cpufreq_driver tegra186_cpufreq_driver = {
+ .name = "tegra186",
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .get = tegra186_cpufreq_get,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = tegra186_cpufreq_set_target,
+ .init = tegra186_cpufreq_init,
+ .attr = cpufreq_generic_attr,
+};
+
+static struct cpufreq_frequency_table *init_vhint_table(
+ struct platform_device *pdev, struct tegra_bpmp *bpmp,
+ struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id)
+{
+ struct cpufreq_frequency_table *table;
+ struct mrq_cpu_vhint_request req;
+ struct tegra_bpmp_message msg;
+ struct cpu_vhint_data *data;
+ int err, i, j, num_rates = 0;
+ dma_addr_t phys;
+ void *virt;
+
+ virt = dma_alloc_coherent(bpmp->dev, sizeof(*data), &phys,
+ GFP_KERNEL);
+ if (!virt)
+ return ERR_PTR(-ENOMEM);
+
+ data = (struct cpu_vhint_data *)virt;
+
+ memset(&req, 0, sizeof(req));
+ req.addr = phys;
+ req.cluster_id = cluster_id;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_CPU_VHINT;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err) {
+ table = ERR_PTR(err);
+ goto free;
+ }
+ if (msg.rx.ret) {
+ table = ERR_PTR(-EINVAL);
+ goto free;
+ }
+
+ for (i = data->vfloor; i <= data->vceil; i++) {
+ u16 ndiv = data->ndiv[i];
+
+ if (ndiv < data->ndiv_min || ndiv > data->ndiv_max)
+ continue;
+
+ /* Only store lowest voltage index for each rate */
+ if (i > 0 && ndiv == data->ndiv[i - 1])
+ continue;
+
+ num_rates++;
+ }
+
+ table = devm_kcalloc(&pdev->dev, num_rates + 1, sizeof(*table),
+ GFP_KERNEL);
+ if (!table) {
+ table = ERR_PTR(-ENOMEM);
+ goto free;
+ }
+
+ cluster->ref_clk_khz = data->ref_clk_hz / 1000;
+ cluster->div = data->pdiv * data->mdiv;
+
+ for (i = data->vfloor, j = 0; i <= data->vceil; i++) {
+ struct cpufreq_frequency_table *point;
+ u16 ndiv = data->ndiv[i];
+ u32 edvd_val = 0;
+
+ if (ndiv < data->ndiv_min || ndiv > data->ndiv_max)
+ continue;
+
+ /* Only store lowest voltage index for each rate */
+ if (i > 0 && ndiv == data->ndiv[i - 1])
+ continue;
+
+ edvd_val |= i << EDVD_CORE_VOLT_FREQ_V_SHIFT;
+ edvd_val |= ndiv << EDVD_CORE_VOLT_FREQ_F_SHIFT;
+
+ point = &table[j++];
+ point->driver_data = edvd_val;
+ point->frequency = (cluster->ref_clk_khz * ndiv) / cluster->div;
+ }
+
+ table[j].frequency = CPUFREQ_TABLE_END;
+
+free:
+ dma_free_coherent(bpmp->dev, sizeof(*data), virt, phys);
+
+ return table;
+}
+
+static int tegra186_cpufreq_probe(struct platform_device *pdev)
+{
+ struct tegra186_cpufreq_data *data;
+ struct tegra_bpmp *bpmp;
+ unsigned int i = 0, err;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->clusters = devm_kcalloc(&pdev->dev, TEGRA186_NUM_CLUSTERS,
+ sizeof(*data->clusters), GFP_KERNEL);
+ if (!data->clusters)
+ return -ENOMEM;
+
+ data->cpus = tegra186_cpus;
+
+ bpmp = tegra_bpmp_get(&pdev->dev);
+ if (IS_ERR(bpmp))
+ return PTR_ERR(bpmp);
+
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->regs)) {
+ err = PTR_ERR(data->regs);
+ goto put_bpmp;
+ }
+
+ for (i = 0; i < TEGRA186_NUM_CLUSTERS; i++) {
+ struct tegra186_cpufreq_cluster *cluster = &data->clusters[i];
+
+ cluster->table = init_vhint_table(pdev, bpmp, cluster, i);
+ if (IS_ERR(cluster->table)) {
+ err = PTR_ERR(cluster->table);
+ goto put_bpmp;
+ }
+ }
+
+ tegra186_cpufreq_driver.driver_data = data;
+
+ err = cpufreq_register_driver(&tegra186_cpufreq_driver);
+
+put_bpmp:
+ tegra_bpmp_put(bpmp);
+
+ return err;
+}
+
+static int tegra186_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&tegra186_cpufreq_driver);
+
+ return 0;
+}
+
+static const struct of_device_id tegra186_cpufreq_of_match[] = {
+ { .compatible = "nvidia,tegra186-ccplex-cluster", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra186_cpufreq_of_match);
+
+static struct platform_driver tegra186_cpufreq_platform_driver = {
+ .driver = {
+ .name = "tegra186-cpufreq",
+ .of_match_table = tegra186_cpufreq_of_match,
+ },
+ .probe = tegra186_cpufreq_probe,
+ .remove = tegra186_cpufreq_remove,
+};
+module_platform_driver(tegra186_cpufreq_platform_driver);
+
+MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra186 cpufreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
new file mode 100644
index 000000000..4596c3e32
--- /dev/null
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -0,0 +1,607 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 - 2022, NVIDIA CORPORATION. All rights reserved
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/smp_plat.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+
+#define KHZ 1000
+#define REF_CLK_MHZ 408 /* 408 MHz */
+#define US_DELAY 500
+#define CPUFREQ_TBL_STEP_HZ (50 * KHZ * KHZ)
+#define MAX_CNT ~0U
+
+#define NDIV_MASK 0x1FF
+
+#define CORE_OFFSET(cpu) (cpu * 8)
+#define CMU_CLKS_BASE 0x2000
+#define SCRATCH_FREQ_CORE_REG(data, cpu) (data->regs + CMU_CLKS_BASE + CORE_OFFSET(cpu))
+
+#define MMCRAB_CLUSTER_BASE(cl) (0x30000 + (cl * 0x10000))
+#define CLUSTER_ACTMON_BASE(data, cl) \
+ (data->regs + (MMCRAB_CLUSTER_BASE(cl) + data->soc->actmon_cntr_base))
+#define CORE_ACTMON_CNTR_REG(data, cl, cpu) (CLUSTER_ACTMON_BASE(data, cl) + CORE_OFFSET(cpu))
+
+/* cpufreq transisition latency */
+#define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */
+
+struct tegra_cpu_ctr {
+ u32 cpu;
+ u32 coreclk_cnt, last_coreclk_cnt;
+ u32 refclk_cnt, last_refclk_cnt;
+};
+
+struct read_counters_work {
+ struct work_struct work;
+ struct tegra_cpu_ctr c;
+};
+
+struct tegra_cpufreq_ops {
+ void (*read_counters)(struct tegra_cpu_ctr *c);
+ void (*set_cpu_ndiv)(struct cpufreq_policy *policy, u64 ndiv);
+ void (*get_cpu_cluster_id)(u32 cpu, u32 *cpuid, u32 *clusterid);
+ int (*get_cpu_ndiv)(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv);
+};
+
+struct tegra_cpufreq_soc {
+ struct tegra_cpufreq_ops *ops;
+ int maxcpus_per_cluster;
+ unsigned int num_clusters;
+ phys_addr_t actmon_cntr_base;
+};
+
+struct tegra194_cpufreq_data {
+ void __iomem *regs;
+ struct cpufreq_frequency_table **tables;
+ const struct tegra_cpufreq_soc *soc;
+};
+
+static struct workqueue_struct *read_counters_wq;
+
+static void tegra_get_cpu_mpidr(void *mpidr)
+{
+ *((u64 *)mpidr) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+}
+
+static void tegra234_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
+{
+ u64 mpidr;
+
+ smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true);
+
+ if (cpuid)
+ *cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ if (clusterid)
+ *clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+}
+
+static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
+{
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+ void __iomem *freq_core_reg;
+ u64 mpidr_id;
+
+ /* use physical id to get address of per core frequency register */
+ mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
+ freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
+
+ *ndiv = readl(freq_core_reg) & NDIV_MASK;
+
+ return 0;
+}
+
+static void tegra234_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
+{
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+ void __iomem *freq_core_reg;
+ u32 cpu, cpuid, clusterid;
+ u64 mpidr_id;
+
+ for_each_cpu_and(cpu, policy->cpus, cpu_online_mask) {
+ data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
+
+ /* use physical id to get address of per core frequency register */
+ mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
+ freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
+
+ writel(ndiv, freq_core_reg);
+ }
+}
+
+/*
+ * This register provides access to two counter values with a single
+ * 64-bit read. The counter values are used to determine the average
+ * actual frequency a core has run at over a period of time.
+ * [63:32] PLLP counter: Counts at fixed frequency (408 MHz)
+ * [31:0] Core clock counter: Counts on every core clock cycle
+ */
+static void tegra234_read_counters(struct tegra_cpu_ctr *c)
+{
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+ void __iomem *actmon_reg;
+ u32 cpuid, clusterid;
+ u64 val;
+
+ data->soc->ops->get_cpu_cluster_id(c->cpu, &cpuid, &clusterid);
+ actmon_reg = CORE_ACTMON_CNTR_REG(data, clusterid, cpuid);
+
+ val = readq(actmon_reg);
+ c->last_refclk_cnt = upper_32_bits(val);
+ c->last_coreclk_cnt = lower_32_bits(val);
+ udelay(US_DELAY);
+ val = readq(actmon_reg);
+ c->refclk_cnt = upper_32_bits(val);
+ c->coreclk_cnt = lower_32_bits(val);
+}
+
+static struct tegra_cpufreq_ops tegra234_cpufreq_ops = {
+ .read_counters = tegra234_read_counters,
+ .get_cpu_cluster_id = tegra234_get_cpu_cluster_id,
+ .get_cpu_ndiv = tegra234_get_cpu_ndiv,
+ .set_cpu_ndiv = tegra234_set_cpu_ndiv,
+};
+
+static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
+ .ops = &tegra234_cpufreq_ops,
+ .actmon_cntr_base = 0x9000,
+ .maxcpus_per_cluster = 4,
+ .num_clusters = 3,
+};
+
+static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = {
+ .ops = &tegra234_cpufreq_ops,
+ .actmon_cntr_base = 0x4000,
+ .maxcpus_per_cluster = 8,
+ .num_clusters = 1,
+};
+
+static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
+{
+ u64 mpidr;
+
+ smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true);
+
+ if (cpuid)
+ *cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ if (clusterid)
+ *clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+}
+
+/*
+ * Read per-core Read-only system register NVFREQ_FEEDBACK_EL1.
+ * The register provides frequency feedback information to
+ * determine the average actual frequency a core has run at over
+ * a period of time.
+ * [31:0] PLLP counter: Counts at fixed frequency (408 MHz)
+ * [63:32] Core clock counter: counts on every core clock cycle
+ * where the core is architecturally clocking
+ */
+static u64 read_freq_feedback(void)
+{
+ u64 val = 0;
+
+ asm volatile("mrs %0, s3_0_c15_c0_5" : "=r" (val) : );
+
+ return val;
+}
+
+static inline u32 map_ndiv_to_freq(struct mrq_cpu_ndiv_limits_response
+ *nltbl, u16 ndiv)
+{
+ return nltbl->ref_clk_hz / KHZ * ndiv / (nltbl->pdiv * nltbl->mdiv);
+}
+
+static void tegra194_read_counters(struct tegra_cpu_ctr *c)
+{
+ u64 val;
+
+ val = read_freq_feedback();
+ c->last_refclk_cnt = lower_32_bits(val);
+ c->last_coreclk_cnt = upper_32_bits(val);
+ udelay(US_DELAY);
+ val = read_freq_feedback();
+ c->refclk_cnt = lower_32_bits(val);
+ c->coreclk_cnt = upper_32_bits(val);
+}
+
+static void tegra_read_counters(struct work_struct *work)
+{
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+ struct read_counters_work *read_counters_work;
+ struct tegra_cpu_ctr *c;
+
+ /*
+ * ref_clk_counter(32 bit counter) runs on constant clk,
+ * pll_p(408MHz).
+ * It will take = 2 ^ 32 / 408 MHz to overflow ref clk counter
+ * = 10526880 usec = 10.527 sec to overflow
+ *
+ * Like wise core_clk_counter(32 bit counter) runs on core clock.
+ * It's synchronized to crab_clk (cpu_crab_clk) which runs at
+ * freq of cluster. Assuming max cluster clock ~2000MHz,
+ * It will take = 2 ^ 32 / 2000 MHz to overflow core clk counter
+ * = ~2.147 sec to overflow
+ */
+ read_counters_work = container_of(work, struct read_counters_work,
+ work);
+ c = &read_counters_work->c;
+
+ data->soc->ops->read_counters(c);
+}
+
+/*
+ * Return instantaneous cpu speed
+ * Instantaneous freq is calculated as -
+ * -Takes sample on every query of getting the freq.
+ * - Read core and ref clock counters;
+ * - Delay for X us
+ * - Read above cycle counters again
+ * - Calculates freq by subtracting current and previous counters
+ * divided by the delay time or eqv. of ref_clk_counter in delta time
+ * - Return Kcycles/second, freq in KHz
+ *
+ * delta time period = x sec
+ * = delta ref_clk_counter / (408 * 10^6) sec
+ * freq in Hz = cycles/sec
+ * = (delta cycles / x sec
+ * = (delta cycles * 408 * 10^6) / delta ref_clk_counter
+ * in KHz = (delta cycles * 408 * 10^3) / delta ref_clk_counter
+ *
+ * @cpu - logical cpu whose freq to be updated
+ * Returns freq in KHz on success, 0 if cpu is offline
+ */
+static unsigned int tegra194_calculate_speed(u32 cpu)
+{
+ struct read_counters_work read_counters_work;
+ struct tegra_cpu_ctr c;
+ u32 delta_refcnt;
+ u32 delta_ccnt;
+ u32 rate_mhz;
+
+ /*
+ * udelay() is required to reconstruct cpu frequency over an
+ * observation window. Using workqueue to call udelay() with
+ * interrupts enabled.
+ */
+ read_counters_work.c.cpu = cpu;
+ INIT_WORK_ONSTACK(&read_counters_work.work, tegra_read_counters);
+ queue_work_on(cpu, read_counters_wq, &read_counters_work.work);
+ flush_work(&read_counters_work.work);
+ c = read_counters_work.c;
+
+ if (c.coreclk_cnt < c.last_coreclk_cnt)
+ delta_ccnt = c.coreclk_cnt + (MAX_CNT - c.last_coreclk_cnt);
+ else
+ delta_ccnt = c.coreclk_cnt - c.last_coreclk_cnt;
+ if (!delta_ccnt)
+ return 0;
+
+ /* ref clock is 32 bits */
+ if (c.refclk_cnt < c.last_refclk_cnt)
+ delta_refcnt = c.refclk_cnt + (MAX_CNT - c.last_refclk_cnt);
+ else
+ delta_refcnt = c.refclk_cnt - c.last_refclk_cnt;
+ if (!delta_refcnt) {
+ pr_debug("cpufreq: %d is idle, delta_refcnt: 0\n", cpu);
+ return 0;
+ }
+ rate_mhz = ((unsigned long)(delta_ccnt * REF_CLK_MHZ)) / delta_refcnt;
+
+ return (rate_mhz * KHZ); /* in KHz */
+}
+
+static void tegra194_get_cpu_ndiv_sysreg(void *ndiv)
+{
+ u64 ndiv_val;
+
+ asm volatile("mrs %0, s3_0_c15_c0_4" : "=r" (ndiv_val) : );
+
+ *(u64 *)ndiv = ndiv_val;
+}
+
+static int tegra194_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
+{
+ return smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true);
+}
+
+static void tegra194_set_cpu_ndiv_sysreg(void *data)
+{
+ u64 ndiv_val = *(u64 *)data;
+
+ asm volatile("msr s3_0_c15_c0_4, %0" : : "r" (ndiv_val));
+}
+
+static void tegra194_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
+{
+ on_each_cpu_mask(policy->cpus, tegra194_set_cpu_ndiv_sysreg, &ndiv, true);
+}
+
+static unsigned int tegra194_get_speed(u32 cpu)
+{
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+ struct cpufreq_frequency_table *pos;
+ u32 cpuid, clusterid;
+ unsigned int rate;
+ u64 ndiv;
+ int ret;
+
+ data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
+
+ /* reconstruct actual cpu freq using counters */
+ rate = tegra194_calculate_speed(cpu);
+
+ /* get last written ndiv value */
+ ret = data->soc->ops->get_cpu_ndiv(cpu, cpuid, clusterid, &ndiv);
+ if (WARN_ON_ONCE(ret))
+ return rate;
+
+ /*
+ * If the reconstructed frequency has acceptable delta from
+ * the last written value, then return freq corresponding
+ * to the last written ndiv value from freq_table. This is
+ * done to return consistent value.
+ */
+ cpufreq_for_each_valid_entry(pos, data->tables[clusterid]) {
+ if (pos->driver_data != ndiv)
+ continue;
+
+ if (abs(pos->frequency - rate) > 115200) {
+ pr_warn("cpufreq: cpu%d,cur:%u,set:%u,set ndiv:%llu\n",
+ cpu, rate, pos->frequency, ndiv);
+ } else {
+ rate = pos->frequency;
+ }
+ break;
+ }
+ return rate;
+}
+
+static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
+{
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+ int maxcpus_per_cluster = data->soc->maxcpus_per_cluster;
+ u32 start_cpu, cpu;
+ u32 clusterid;
+
+ data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid);
+
+ if (clusterid >= data->soc->num_clusters || !data->tables[clusterid])
+ return -EINVAL;
+
+ start_cpu = rounddown(policy->cpu, maxcpus_per_cluster);
+ /* set same policy for all cpus in a cluster */
+ for (cpu = start_cpu; cpu < (start_cpu + maxcpus_per_cluster); cpu++) {
+ if (cpu_possible(cpu))
+ cpumask_set_cpu(cpu, policy->cpus);
+ }
+ policy->freq_table = data->tables[clusterid];
+ policy->cpuinfo.transition_latency = TEGRA_CPUFREQ_TRANSITION_LATENCY;
+
+ return 0;
+}
+
+static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct cpufreq_frequency_table *tbl = policy->freq_table + index;
+ struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
+
+ /*
+ * Each core writes frequency in per core register. Then both cores
+ * in a cluster run at same frequency which is the maximum frequency
+ * request out of the values requested by both cores in that cluster.
+ */
+ data->soc->ops->set_cpu_ndiv(policy, (u64)tbl->driver_data);
+
+ return 0;
+}
+
+static struct cpufreq_driver tegra194_cpufreq_driver = {
+ .name = "tegra194",
+ .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = tegra194_cpufreq_set_target,
+ .get = tegra194_get_speed,
+ .init = tegra194_cpufreq_init,
+ .attr = cpufreq_generic_attr,
+};
+
+static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
+ .read_counters = tegra194_read_counters,
+ .get_cpu_cluster_id = tegra194_get_cpu_cluster_id,
+ .get_cpu_ndiv = tegra194_get_cpu_ndiv,
+ .set_cpu_ndiv = tegra194_set_cpu_ndiv,
+};
+
+static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
+ .ops = &tegra194_cpufreq_ops,
+ .maxcpus_per_cluster = 2,
+ .num_clusters = 4,
+};
+
+static void tegra194_cpufreq_free_resources(void)
+{
+ destroy_workqueue(read_counters_wq);
+}
+
+static struct cpufreq_frequency_table *
+init_freq_table(struct platform_device *pdev, struct tegra_bpmp *bpmp,
+ unsigned int cluster_id)
+{
+ struct cpufreq_frequency_table *freq_table;
+ struct mrq_cpu_ndiv_limits_response resp;
+ unsigned int num_freqs, ndiv, delta_ndiv;
+ struct mrq_cpu_ndiv_limits_request req;
+ struct tegra_bpmp_message msg;
+ u16 freq_table_step_size;
+ int err, index;
+
+ memset(&req, 0, sizeof(req));
+ req.cluster_id = cluster_id;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_CPU_NDIV_LIMITS;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &resp;
+ msg.rx.size = sizeof(resp);
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err)
+ return ERR_PTR(err);
+ if (msg.rx.ret == -BPMP_EINVAL) {
+ /* Cluster not available */
+ return NULL;
+ }
+ if (msg.rx.ret)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Make sure frequency table step is a multiple of mdiv to match
+ * vhint table granularity.
+ */
+ freq_table_step_size = resp.mdiv *
+ DIV_ROUND_UP(CPUFREQ_TBL_STEP_HZ, resp.ref_clk_hz);
+
+ dev_dbg(&pdev->dev, "cluster %d: frequency table step size: %d\n",
+ cluster_id, freq_table_step_size);
+
+ delta_ndiv = resp.ndiv_max - resp.ndiv_min;
+
+ if (unlikely(delta_ndiv == 0)) {
+ num_freqs = 1;
+ } else {
+ /* We store both ndiv_min and ndiv_max hence the +1 */
+ num_freqs = delta_ndiv / freq_table_step_size + 1;
+ }
+
+ num_freqs += (delta_ndiv % freq_table_step_size) ? 1 : 0;
+
+ freq_table = devm_kcalloc(&pdev->dev, num_freqs + 1,
+ sizeof(*freq_table), GFP_KERNEL);
+ if (!freq_table)
+ return ERR_PTR(-ENOMEM);
+
+ for (index = 0, ndiv = resp.ndiv_min;
+ ndiv < resp.ndiv_max;
+ index++, ndiv += freq_table_step_size) {
+ freq_table[index].driver_data = ndiv;
+ freq_table[index].frequency = map_ndiv_to_freq(&resp, ndiv);
+ }
+
+ freq_table[index].driver_data = resp.ndiv_max;
+ freq_table[index++].frequency = map_ndiv_to_freq(&resp, resp.ndiv_max);
+ freq_table[index].frequency = CPUFREQ_TABLE_END;
+
+ return freq_table;
+}
+
+static int tegra194_cpufreq_probe(struct platform_device *pdev)
+{
+ const struct tegra_cpufreq_soc *soc;
+ struct tegra194_cpufreq_data *data;
+ struct tegra_bpmp *bpmp;
+ int err, i;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ soc = of_device_get_match_data(&pdev->dev);
+
+ if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) {
+ data->soc = soc;
+ } else {
+ dev_err(&pdev->dev, "soc data missing\n");
+ return -EINVAL;
+ }
+
+ data->tables = devm_kcalloc(&pdev->dev, data->soc->num_clusters,
+ sizeof(*data->tables), GFP_KERNEL);
+ if (!data->tables)
+ return -ENOMEM;
+
+ if (soc->actmon_cntr_base) {
+ /* mmio registers are used for frequency request and re-construction */
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->regs))
+ return PTR_ERR(data->regs);
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ bpmp = tegra_bpmp_get(&pdev->dev);
+ if (IS_ERR(bpmp))
+ return PTR_ERR(bpmp);
+
+ read_counters_wq = alloc_workqueue("read_counters_wq", __WQ_LEGACY, 1);
+ if (!read_counters_wq) {
+ dev_err(&pdev->dev, "fail to create_workqueue\n");
+ err = -EINVAL;
+ goto put_bpmp;
+ }
+
+ for (i = 0; i < data->soc->num_clusters; i++) {
+ data->tables[i] = init_freq_table(pdev, bpmp, i);
+ if (IS_ERR(data->tables[i])) {
+ err = PTR_ERR(data->tables[i]);
+ goto err_free_res;
+ }
+ }
+
+ tegra194_cpufreq_driver.driver_data = data;
+
+ err = cpufreq_register_driver(&tegra194_cpufreq_driver);
+ if (!err)
+ goto put_bpmp;
+
+err_free_res:
+ tegra194_cpufreq_free_resources();
+put_bpmp:
+ tegra_bpmp_put(bpmp);
+ return err;
+}
+
+static int tegra194_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&tegra194_cpufreq_driver);
+ tegra194_cpufreq_free_resources();
+
+ return 0;
+}
+
+static const struct of_device_id tegra194_cpufreq_of_match[] = {
+ { .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc },
+ { .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc },
+ { .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tegra194_cpufreq_of_match);
+
+static struct platform_driver tegra194_ccplex_driver = {
+ .driver = {
+ .name = "tegra194-cpufreq",
+ .of_match_table = tegra194_cpufreq_of_match,
+ },
+ .probe = tegra194_cpufreq_probe,
+ .remove = tegra194_cpufreq_remove,
+};
+module_platform_driver(tegra194_ccplex_driver);
+
+MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
+MODULE_AUTHOR("Sumit Gupta <sumitg@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra194 cpufreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
new file mode 100644
index 000000000..dfd2de4f8
--- /dev/null
+++ b/drivers/cpufreq/tegra20-cpufreq.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
+ */
+
+#include <linux/bits.h>
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/types.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
+
+static bool cpu0_node_has_opp_v2_prop(void)
+{
+ struct device_node *np = of_cpu_device_node_get(0);
+ bool ret = false;
+
+ if (of_property_present(np, "operating-points-v2"))
+ ret = true;
+
+ of_node_put(np);
+ return ret;
+}
+
+static void tegra20_cpufreq_put_supported_hw(void *opp_token)
+{
+ dev_pm_opp_put_supported_hw((unsigned long) opp_token);
+}
+
+static void tegra20_cpufreq_dt_unregister(void *cpufreq_dt)
+{
+ platform_device_unregister(cpufreq_dt);
+}
+
+static int tegra20_cpufreq_probe(struct platform_device *pdev)
+{
+ struct platform_device *cpufreq_dt;
+ struct device *cpu_dev;
+ u32 versions[2];
+ int err;
+
+ if (!cpu0_node_has_opp_v2_prop()) {
+ dev_err(&pdev->dev, "operating points not found\n");
+ dev_err(&pdev->dev, "please update your device tree\n");
+ return -ENODEV;
+ }
+
+ if (of_machine_is_compatible("nvidia,tegra20")) {
+ versions[0] = BIT(tegra_sku_info.cpu_process_id);
+ versions[1] = BIT(tegra_sku_info.soc_speedo_id);
+ } else {
+ versions[0] = BIT(tegra_sku_info.cpu_process_id);
+ versions[1] = BIT(tegra_sku_info.cpu_speedo_id);
+ }
+
+ dev_info(&pdev->dev, "hardware version 0x%x 0x%x\n",
+ versions[0], versions[1]);
+
+ cpu_dev = get_cpu_device(0);
+ if (WARN_ON(!cpu_dev))
+ return -ENODEV;
+
+ err = dev_pm_opp_set_supported_hw(cpu_dev, versions, 2);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set supported hw: %d\n", err);
+ return err;
+ }
+
+ err = devm_add_action_or_reset(&pdev->dev,
+ tegra20_cpufreq_put_supported_hw,
+ (void *)((unsigned long) err));
+ if (err)
+ return err;
+
+ cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ err = PTR_ERR_OR_ZERO(cpufreq_dt);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to create cpufreq-dt device: %d\n", err);
+ return err;
+ }
+
+ err = devm_add_action_or_reset(&pdev->dev,
+ tegra20_cpufreq_dt_unregister,
+ cpufreq_dt);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static struct platform_driver tegra20_cpufreq_driver = {
+ .probe = tegra20_cpufreq_probe,
+ .driver = {
+ .name = "tegra20-cpufreq",
+ },
+};
+module_platform_driver(tegra20_cpufreq_driver);
+
+MODULE_ALIAS("platform:tegra20-cpufreq");
+MODULE_AUTHOR("Colin Cross <ccross@android.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra20 cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
new file mode 100644
index 000000000..f64180dd2
--- /dev/null
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI CPUFreq/OPP hw-supported driver
+ *
+ * Copyright (C) 2016-2017 Texas Instruments, Inc.
+ * Dave Gerlach <d-gerlach@ti.com>
+ */
+
+#include <linux/cpu.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define REVISION_MASK 0xF
+#define REVISION_SHIFT 28
+
+#define AM33XX_800M_ARM_MPU_MAX_FREQ 0x1E2F
+#define AM43XX_600M_ARM_MPU_MAX_FREQ 0xFFA
+
+#define DRA7_EFUSE_HAS_OD_MPU_OPP 11
+#define DRA7_EFUSE_HAS_HIGH_MPU_OPP 15
+#define DRA76_EFUSE_HAS_PLUS_MPU_OPP 18
+#define DRA7_EFUSE_HAS_ALL_MPU_OPP 23
+#define DRA76_EFUSE_HAS_ALL_MPU_OPP 24
+
+#define DRA7_EFUSE_NOM_MPU_OPP BIT(0)
+#define DRA7_EFUSE_OD_MPU_OPP BIT(1)
+#define DRA7_EFUSE_HIGH_MPU_OPP BIT(2)
+#define DRA76_EFUSE_PLUS_MPU_OPP BIT(3)
+
+#define OMAP3_CONTROL_DEVICE_STATUS 0x4800244C
+#define OMAP3_CONTROL_IDCODE 0x4830A204
+#define OMAP34xx_ProdID_SKUID 0x4830A20C
+#define OMAP3_SYSCON_BASE (0x48000000 + 0x2000 + 0x270)
+
+#define VERSION_COUNT 2
+
+struct ti_cpufreq_data;
+
+struct ti_cpufreq_soc_data {
+ const char * const *reg_names;
+ unsigned long (*efuse_xlate)(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse);
+ unsigned long efuse_fallback;
+ unsigned long efuse_offset;
+ unsigned long efuse_mask;
+ unsigned long efuse_shift;
+ unsigned long rev_offset;
+ bool multi_regulator;
+};
+
+struct ti_cpufreq_data {
+ struct device *cpu_dev;
+ struct device_node *opp_node;
+ struct regmap *syscon;
+ const struct ti_cpufreq_soc_data *soc_data;
+};
+
+static unsigned long amx3_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ if (!efuse)
+ efuse = opp_data->soc_data->efuse_fallback;
+ /* AM335x and AM437x use "OPP disable" bits, so invert */
+ return ~efuse;
+}
+
+static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ unsigned long calculated_efuse = DRA7_EFUSE_NOM_MPU_OPP;
+
+ /*
+ * The efuse on dra7 and am57 parts contains a specific
+ * value indicating the highest available OPP.
+ */
+
+ switch (efuse) {
+ case DRA76_EFUSE_HAS_PLUS_MPU_OPP:
+ case DRA76_EFUSE_HAS_ALL_MPU_OPP:
+ calculated_efuse |= DRA76_EFUSE_PLUS_MPU_OPP;
+ fallthrough;
+ case DRA7_EFUSE_HAS_ALL_MPU_OPP:
+ case DRA7_EFUSE_HAS_HIGH_MPU_OPP:
+ calculated_efuse |= DRA7_EFUSE_HIGH_MPU_OPP;
+ fallthrough;
+ case DRA7_EFUSE_HAS_OD_MPU_OPP:
+ calculated_efuse |= DRA7_EFUSE_OD_MPU_OPP;
+ }
+
+ return calculated_efuse;
+}
+
+static unsigned long omap3_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ /* OPP enable bit ("Speed Binned") */
+ return BIT(efuse);
+}
+
+static struct ti_cpufreq_soc_data am3x_soc_data = {
+ .efuse_xlate = amx3_efuse_xlate,
+ .efuse_fallback = AM33XX_800M_ARM_MPU_MAX_FREQ,
+ .efuse_offset = 0x07fc,
+ .efuse_mask = 0x1fff,
+ .rev_offset = 0x600,
+ .multi_regulator = false,
+};
+
+static struct ti_cpufreq_soc_data am4x_soc_data = {
+ .efuse_xlate = amx3_efuse_xlate,
+ .efuse_fallback = AM43XX_600M_ARM_MPU_MAX_FREQ,
+ .efuse_offset = 0x0610,
+ .efuse_mask = 0x3f,
+ .rev_offset = 0x600,
+ .multi_regulator = false,
+};
+
+static struct ti_cpufreq_soc_data dra7_soc_data = {
+ .efuse_xlate = dra7_efuse_xlate,
+ .efuse_offset = 0x020c,
+ .efuse_mask = 0xf80000,
+ .efuse_shift = 19,
+ .rev_offset = 0x204,
+ .multi_regulator = true,
+};
+
+/*
+ * OMAP35x TRM (SPRUF98K):
+ * CONTROL_IDCODE (0x4830 A204) describes Silicon revisions.
+ * Control OMAP Status Register 15:0 (Address 0x4800 244C)
+ * to separate between omap3503, omap3515, omap3525, omap3530
+ * and feature presence.
+ * There are encodings for versions limited to 400/266MHz
+ * but we ignore.
+ * Not clear if this also holds for omap34xx.
+ * some eFuse values e.g. CONTROL_FUSE_OPP1_VDD1
+ * are stored in the SYSCON register range
+ * Register 0x4830A20C [ProdID.SKUID] [0:3]
+ * 0x0 for normal 600/430MHz device.
+ * 0x8 for 720/520MHz device.
+ * Not clear what omap34xx value is.
+ */
+
+static struct ti_cpufreq_soc_data omap34xx_soc_data = {
+ .efuse_xlate = omap3_efuse_xlate,
+ .efuse_offset = OMAP34xx_ProdID_SKUID - OMAP3_SYSCON_BASE,
+ .efuse_shift = 3,
+ .efuse_mask = BIT(3),
+ .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+ .multi_regulator = false,
+};
+
+/*
+ * AM/DM37x TRM (SPRUGN4M)
+ * CONTROL_IDCODE (0x4830 A204) describes Silicon revisions.
+ * Control Device Status Register 15:0 (Address 0x4800 244C)
+ * to separate between am3703, am3715, dm3725, dm3730
+ * and feature presence.
+ * Speed Binned = Bit 9
+ * 0 800/600 MHz
+ * 1 1000/800 MHz
+ * some eFuse values e.g. CONTROL_FUSE_OPP 1G_VDD1
+ * are stored in the SYSCON register range.
+ * There is no 0x4830A20C [ProdID.SKUID] register (exists but
+ * seems to always read as 0).
+ */
+
+static const char * const omap3_reg_names[] = {"cpu0", "vbb", NULL};
+
+static struct ti_cpufreq_soc_data omap36xx_soc_data = {
+ .reg_names = omap3_reg_names,
+ .efuse_xlate = omap3_efuse_xlate,
+ .efuse_offset = OMAP3_CONTROL_DEVICE_STATUS - OMAP3_SYSCON_BASE,
+ .efuse_shift = 9,
+ .efuse_mask = BIT(9),
+ .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+ .multi_regulator = true,
+};
+
+/*
+ * AM3517 is quite similar to AM/DM37x except that it has no
+ * high speed grade eFuse and no abb ldo
+ */
+
+static struct ti_cpufreq_soc_data am3517_soc_data = {
+ .efuse_xlate = omap3_efuse_xlate,
+ .efuse_offset = OMAP3_CONTROL_DEVICE_STATUS - OMAP3_SYSCON_BASE,
+ .efuse_shift = 0,
+ .efuse_mask = 0,
+ .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+ .multi_regulator = false,
+};
+
+
+/**
+ * ti_cpufreq_get_efuse() - Parse and return efuse value present on SoC
+ * @opp_data: pointer to ti_cpufreq_data context
+ * @efuse_value: Set to the value parsed from efuse
+ *
+ * Returns error code if efuse not read properly.
+ */
+static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
+ u32 *efuse_value)
+{
+ struct device *dev = opp_data->cpu_dev;
+ u32 efuse;
+ int ret;
+
+ ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset,
+ &efuse);
+ if (ret == -EIO) {
+ /* not a syscon register! */
+ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
+ opp_data->soc_data->efuse_offset, 4);
+
+ if (!regs)
+ return -ENOMEM;
+ efuse = readl(regs);
+ iounmap(regs);
+ }
+ else if (ret) {
+ dev_err(dev,
+ "Failed to read the efuse value from syscon: %d\n",
+ ret);
+ return ret;
+ }
+
+ efuse = (efuse & opp_data->soc_data->efuse_mask);
+ efuse >>= opp_data->soc_data->efuse_shift;
+
+ *efuse_value = opp_data->soc_data->efuse_xlate(opp_data, efuse);
+
+ return 0;
+}
+
+/**
+ * ti_cpufreq_get_rev() - Parse and return rev value present on SoC
+ * @opp_data: pointer to ti_cpufreq_data context
+ * @revision_value: Set to the value parsed from revision register
+ *
+ * Returns error code if revision not read properly.
+ */
+static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
+ u32 *revision_value)
+{
+ struct device *dev = opp_data->cpu_dev;
+ u32 revision;
+ int ret;
+
+ ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset,
+ &revision);
+ if (ret == -EIO) {
+ /* not a syscon register! */
+ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
+ opp_data->soc_data->rev_offset, 4);
+
+ if (!regs)
+ return -ENOMEM;
+ revision = readl(regs);
+ iounmap(regs);
+ }
+ else if (ret) {
+ dev_err(dev,
+ "Failed to read the revision number from syscon: %d\n",
+ ret);
+ return ret;
+ }
+
+ *revision_value = BIT((revision >> REVISION_SHIFT) & REVISION_MASK);
+
+ return 0;
+}
+
+static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data)
+{
+ struct device *dev = opp_data->cpu_dev;
+ struct device_node *np = opp_data->opp_node;
+
+ opp_data->syscon = syscon_regmap_lookup_by_phandle(np,
+ "syscon");
+ if (IS_ERR(opp_data->syscon)) {
+ dev_err(dev,
+ "\"syscon\" is missing, cannot use OPPv2 table.\n");
+ return PTR_ERR(opp_data->syscon);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ti_cpufreq_of_match[] = {
+ { .compatible = "ti,am33xx", .data = &am3x_soc_data, },
+ { .compatible = "ti,am3517", .data = &am3517_soc_data, },
+ { .compatible = "ti,am43", .data = &am4x_soc_data, },
+ { .compatible = "ti,dra7", .data = &dra7_soc_data },
+ { .compatible = "ti,omap34xx", .data = &omap34xx_soc_data, },
+ { .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
+ /* legacy */
+ { .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
+ { .compatible = "ti,omap3630", .data = &omap36xx_soc_data, },
+ {},
+};
+
+static const struct of_device_id *ti_cpufreq_match_node(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+
+ np = of_find_node_by_path("/");
+ match = of_match_node(ti_cpufreq_of_match, np);
+ of_node_put(np);
+
+ return match;
+}
+
+static int ti_cpufreq_probe(struct platform_device *pdev)
+{
+ u32 version[VERSION_COUNT];
+ const struct of_device_id *match;
+ struct ti_cpufreq_data *opp_data;
+ const char * const default_reg_names[] = {"vdd", "vbb", NULL};
+ int ret;
+ struct dev_pm_opp_config config = {
+ .supported_hw = version,
+ .supported_hw_count = ARRAY_SIZE(version),
+ };
+
+ match = dev_get_platdata(&pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ opp_data = devm_kzalloc(&pdev->dev, sizeof(*opp_data), GFP_KERNEL);
+ if (!opp_data)
+ return -ENOMEM;
+
+ opp_data->soc_data = match->data;
+
+ opp_data->cpu_dev = get_cpu_device(0);
+ if (!opp_data->cpu_dev) {
+ pr_err("%s: Failed to get device for CPU0\n", __func__);
+ return -ENODEV;
+ }
+
+ opp_data->opp_node = dev_pm_opp_of_get_opp_desc_node(opp_data->cpu_dev);
+ if (!opp_data->opp_node) {
+ dev_info(opp_data->cpu_dev,
+ "OPP-v2 not supported, cpufreq-dt will attempt to use legacy tables.\n");
+ goto register_cpufreq_dt;
+ }
+
+ ret = ti_cpufreq_setup_syscon_register(opp_data);
+ if (ret)
+ goto fail_put_node;
+
+ /*
+ * OPPs determine whether or not they are supported based on
+ * two metrics:
+ * 0 - SoC Revision
+ * 1 - eFuse value
+ */
+ ret = ti_cpufreq_get_rev(opp_data, &version[0]);
+ if (ret)
+ goto fail_put_node;
+
+ ret = ti_cpufreq_get_efuse(opp_data, &version[1]);
+ if (ret)
+ goto fail_put_node;
+
+ if (opp_data->soc_data->multi_regulator) {
+ if (opp_data->soc_data->reg_names)
+ config.regulator_names = opp_data->soc_data->reg_names;
+ else
+ config.regulator_names = default_reg_names;
+ }
+
+ ret = dev_pm_opp_set_config(opp_data->cpu_dev, &config);
+ if (ret < 0) {
+ dev_err(opp_data->cpu_dev, "Failed to set OPP config\n");
+ goto fail_put_node;
+ }
+
+ of_node_put(opp_data->opp_node);
+
+register_cpufreq_dt:
+ platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+
+ return 0;
+
+fail_put_node:
+ of_node_put(opp_data->opp_node);
+
+ return ret;
+}
+
+static int __init ti_cpufreq_init(void)
+{
+ const struct of_device_id *match;
+
+ /* Check to ensure we are on a compatible platform */
+ match = ti_cpufreq_match_node();
+ if (match)
+ platform_device_register_data(NULL, "ti-cpufreq", -1, match,
+ sizeof(*match));
+
+ return 0;
+}
+module_init(ti_cpufreq_init);
+
+static struct platform_driver ti_cpufreq_driver = {
+ .probe = ti_cpufreq_probe,
+ .driver = {
+ .name = "ti-cpufreq",
+ },
+};
+builtin_platform_driver(ti_cpufreq_driver);
+
+MODULE_DESCRIPTION("TI CPUFreq/OPP hw-supported driver");
+MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
new file mode 100644
index 000000000..d295f405c
--- /dev/null
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Versatile Express SPC CPUFreq Interface driver
+ *
+ * Copyright (C) 2013 - 2019 ARM Ltd.
+ * Sudeep Holla <sudeep.holla@arm.com>
+ *
+ * Copyright (C) 2013 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/topology.h>
+#include <linux/types.h>
+
+/* Currently we support only two clusters */
+#define A15_CLUSTER 0
+#define A7_CLUSTER 1
+#define MAX_CLUSTERS 2
+
+#ifdef CONFIG_BL_SWITCHER
+#include <asm/bL_switcher.h>
+static bool bL_switching_enabled;
+#define is_bL_switching_enabled() bL_switching_enabled
+#define set_switching_enabled(x) (bL_switching_enabled = (x))
+#else
+#define is_bL_switching_enabled() false
+#define set_switching_enabled(x) do { } while (0)
+#define bL_switch_request(...) do { } while (0)
+#define bL_switcher_put_enabled() do { } while (0)
+#define bL_switcher_get_enabled() do { } while (0)
+#endif
+
+#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
+#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
+
+static struct clk *clk[MAX_CLUSTERS];
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
+static atomic_t cluster_usage[MAX_CLUSTERS + 1];
+
+static unsigned int clk_big_min; /* (Big) clock frequencies */
+static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
+
+static DEFINE_PER_CPU(unsigned int, physical_cluster);
+static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
+
+static struct mutex cluster_lock[MAX_CLUSTERS];
+
+static inline int raw_cpu_to_cluster(int cpu)
+{
+ return topology_physical_package_id(cpu);
+}
+
+static inline int cpu_to_cluster(int cpu)
+{
+ return is_bL_switching_enabled() ?
+ MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
+}
+
+static unsigned int find_cluster_maxfreq(int cluster)
+{
+ int j;
+ u32 max_freq = 0, cpu_freq;
+
+ for_each_online_cpu(j) {
+ cpu_freq = per_cpu(cpu_last_req_freq, j);
+
+ if (cluster == per_cpu(physical_cluster, j) &&
+ max_freq < cpu_freq)
+ max_freq = cpu_freq;
+ }
+
+ return max_freq;
+}
+
+static unsigned int clk_get_cpu_rate(unsigned int cpu)
+{
+ u32 cur_cluster = per_cpu(physical_cluster, cpu);
+ u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
+
+ /* For switcher we use virtual A7 clock rates */
+ if (is_bL_switching_enabled())
+ rate = VIRT_FREQ(cur_cluster, rate);
+
+ return rate;
+}
+
+static unsigned int ve_spc_cpufreq_get_rate(unsigned int cpu)
+{
+ if (is_bL_switching_enabled())
+ return per_cpu(cpu_last_req_freq, cpu);
+ else
+ return clk_get_cpu_rate(cpu);
+}
+
+static unsigned int
+ve_spc_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
+{
+ u32 new_rate, prev_rate;
+ int ret;
+ bool bLs = is_bL_switching_enabled();
+
+ mutex_lock(&cluster_lock[new_cluster]);
+
+ if (bLs) {
+ prev_rate = per_cpu(cpu_last_req_freq, cpu);
+ per_cpu(cpu_last_req_freq, cpu) = rate;
+ per_cpu(physical_cluster, cpu) = new_cluster;
+
+ new_rate = find_cluster_maxfreq(new_cluster);
+ new_rate = ACTUAL_FREQ(new_cluster, new_rate);
+ } else {
+ new_rate = rate;
+ }
+
+ ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
+ if (!ret) {
+ /*
+ * FIXME: clk_set_rate hasn't returned an error here however it
+ * may be that clk_change_rate failed due to hardware or
+ * firmware issues and wasn't able to report that due to the
+ * current design of the clk core layer. To work around this
+ * problem we will read back the clock rate and check it is
+ * correct. This needs to be removed once clk core is fixed.
+ */
+ if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
+ ret = -EIO;
+ }
+
+ if (WARN_ON(ret)) {
+ if (bLs) {
+ per_cpu(cpu_last_req_freq, cpu) = prev_rate;
+ per_cpu(physical_cluster, cpu) = old_cluster;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ return ret;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ /* Recalc freq for old cluster when switching clusters */
+ if (old_cluster != new_cluster) {
+ /* Switch cluster */
+ bL_switch_request(cpu, new_cluster);
+
+ mutex_lock(&cluster_lock[old_cluster]);
+
+ /* Set freq of old cluster if there are cpus left on it */
+ new_rate = find_cluster_maxfreq(old_cluster);
+ new_rate = ACTUAL_FREQ(old_cluster, new_rate);
+
+ if (new_rate &&
+ clk_set_rate(clk[old_cluster], new_rate * 1000)) {
+ pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
+ __func__, ret, old_cluster);
+ }
+ mutex_unlock(&cluster_lock[old_cluster]);
+ }
+
+ return 0;
+}
+
+/* Set clock frequency */
+static int ve_spc_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
+ unsigned int freqs_new;
+
+ cur_cluster = cpu_to_cluster(cpu);
+ new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
+
+ freqs_new = freq_table[cur_cluster][index].frequency;
+
+ if (is_bL_switching_enabled()) {
+ if (actual_cluster == A15_CLUSTER && freqs_new < clk_big_min)
+ new_cluster = A7_CLUSTER;
+ else if (actual_cluster == A7_CLUSTER &&
+ freqs_new > clk_little_max)
+ new_cluster = A15_CLUSTER;
+ }
+
+ return ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster,
+ freqs_new);
+}
+
+static inline u32 get_table_count(struct cpufreq_frequency_table *table)
+{
+ int count;
+
+ for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
+ ;
+
+ return count;
+}
+
+/* get the minimum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_min(struct cpufreq_frequency_table *table)
+{
+ struct cpufreq_frequency_table *pos;
+ u32 min_freq = ~0;
+
+ cpufreq_for_each_entry(pos, table)
+ if (pos->frequency < min_freq)
+ min_freq = pos->frequency;
+ return min_freq;
+}
+
+/* get the maximum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_max(struct cpufreq_frequency_table *table)
+{
+ struct cpufreq_frequency_table *pos;
+ u32 max_freq = 0;
+
+ cpufreq_for_each_entry(pos, table)
+ if (pos->frequency > max_freq)
+ max_freq = pos->frequency;
+ return max_freq;
+}
+
+static bool search_frequency(struct cpufreq_frequency_table *table, int size,
+ unsigned int freq)
+{
+ int count;
+
+ for (count = 0; count < size; count++) {
+ if (table[count].frequency == freq)
+ return true;
+ }
+
+ return false;
+}
+
+static int merge_cluster_tables(void)
+{
+ int i, j, k = 0, count = 1;
+ struct cpufreq_frequency_table *table;
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ count += get_table_count(freq_table[i]);
+
+ table = kcalloc(count, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ freq_table[MAX_CLUSTERS] = table;
+
+ /* Add in reverse order to get freqs in increasing order */
+ for (i = MAX_CLUSTERS - 1; i >= 0; i--, count = k) {
+ for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
+ j++) {
+ if (i == A15_CLUSTER &&
+ search_frequency(table, count, freq_table[i][j].frequency))
+ continue; /* skip duplicates */
+ table[k++].frequency =
+ VIRT_FREQ(i, freq_table[i][j].frequency);
+ }
+ }
+
+ table[k].driver_data = k;
+ table[k].frequency = CPUFREQ_TABLE_END;
+
+ return 0;
+}
+
+static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
+{
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
+
+ if (!freq_table[cluster])
+ return;
+
+ clk_put(clk[cluster]);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+}
+
+static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
+{
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i;
+
+ if (atomic_dec_return(&cluster_usage[cluster]))
+ return;
+
+ if (cluster < MAX_CLUSTERS)
+ return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
+
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+
+ if (!cdev)
+ return;
+
+ _put_cluster_clk_and_freq_table(cdev, cpumask);
+ }
+
+ /* free virtual table */
+ kfree(freq_table[cluster]);
+}
+
+static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
+{
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
+ int ret;
+
+ if (freq_table[cluster])
+ return 0;
+
+ /*
+ * platform specific SPC code must initialise the opp table
+ * so just check if the OPP count is non-zero
+ */
+ ret = dev_pm_opp_get_opp_count(cpu_dev) <= 0;
+ if (ret)
+ goto out;
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ if (ret)
+ goto out;
+
+ clk[cluster] = clk_get(cpu_dev, NULL);
+ if (!IS_ERR(clk[cluster]))
+ return 0;
+
+ dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
+ __func__, cpu_dev->id, cluster);
+ ret = PTR_ERR(clk[cluster]);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+
+out:
+ dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
+ cluster);
+ return ret;
+}
+
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
+{
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i, ret;
+
+ if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+ return 0;
+
+ if (cluster < MAX_CLUSTERS) {
+ ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
+ if (ret)
+ atomic_dec(&cluster_usage[cluster]);
+ return ret;
+ }
+
+ /*
+ * Get data for all clusters and fill virtual cluster with a merge of
+ * both
+ */
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+
+ if (!cdev)
+ return -ENODEV;
+
+ ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
+ if (ret)
+ goto put_clusters;
+ }
+
+ ret = merge_cluster_tables();
+ if (ret)
+ goto put_clusters;
+
+ /* Assuming 2 cluster, set clk_big_min and clk_little_max */
+ clk_big_min = get_table_min(freq_table[A15_CLUSTER]);
+ clk_little_max = VIRT_FREQ(A7_CLUSTER,
+ get_table_max(freq_table[A7_CLUSTER]));
+
+ return 0;
+
+put_clusters:
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+
+ if (!cdev)
+ return -ENODEV;
+
+ _put_cluster_clk_and_freq_table(cdev, cpumask);
+ }
+
+ atomic_dec(&cluster_usage[cluster]);
+
+ return ret;
+}
+
+/* Per-CPU initialization */
+static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
+{
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
+ struct device *cpu_dev;
+ int ret;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ if (cur_cluster < MAX_CLUSTERS) {
+ int cpu;
+
+ dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus);
+
+ for_each_cpu(cpu, policy->cpus)
+ per_cpu(physical_cluster, cpu) = cur_cluster;
+ } else {
+ /* Assumption: during init, we are always running on A15 */
+ per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
+ }
+
+ ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
+ if (ret)
+ return ret;
+
+ policy->freq_table = freq_table[cur_cluster];
+ policy->cpuinfo.transition_latency = 1000000; /* 1 ms */
+
+ if (is_bL_switching_enabled())
+ per_cpu(cpu_last_req_freq, policy->cpu) =
+ clk_get_cpu_rate(policy->cpu);
+
+ dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
+ return 0;
+}
+
+static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct device *cpu_dev;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
+ return 0;
+}
+
+static struct cpufreq_driver ve_spc_cpufreq_driver = {
+ .name = "vexpress-spc",
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = ve_spc_cpufreq_set_target,
+ .get = ve_spc_cpufreq_get_rate,
+ .init = ve_spc_cpufreq_init,
+ .exit = ve_spc_cpufreq_exit,
+ .register_em = cpufreq_register_em_with_opp,
+ .attr = cpufreq_generic_attr,
+};
+
+#ifdef CONFIG_BL_SWITCHER
+static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
+ unsigned long action, void *_arg)
+{
+ pr_debug("%s: action: %ld\n", __func__, action);
+
+ switch (action) {
+ case BL_NOTIFY_PRE_ENABLE:
+ case BL_NOTIFY_PRE_DISABLE:
+ cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_ENABLE:
+ set_switching_enabled(true);
+ cpufreq_register_driver(&ve_spc_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_DISABLE:
+ set_switching_enabled(false);
+ cpufreq_register_driver(&ve_spc_cpufreq_driver);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bL_switcher_notifier = {
+ .notifier_call = bL_cpufreq_switcher_notifier,
+};
+
+static int __bLs_register_notifier(void)
+{
+ return bL_switcher_register_notifier(&bL_switcher_notifier);
+}
+
+static int __bLs_unregister_notifier(void)
+{
+ return bL_switcher_unregister_notifier(&bL_switcher_notifier);
+}
+#else
+static int __bLs_register_notifier(void) { return 0; }
+static int __bLs_unregister_notifier(void) { return 0; }
+#endif
+
+static int ve_spc_cpufreq_probe(struct platform_device *pdev)
+{
+ int ret, i;
+
+ set_switching_enabled(bL_switcher_get_enabled());
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ mutex_init(&cluster_lock[i]);
+
+ if (!is_bL_switching_enabled())
+ ve_spc_cpufreq_driver.flags |= CPUFREQ_IS_COOLING_DEV;
+
+ ret = cpufreq_register_driver(&ve_spc_cpufreq_driver);
+ if (ret) {
+ pr_info("%s: Failed registering platform driver: %s, err: %d\n",
+ __func__, ve_spc_cpufreq_driver.name, ret);
+ } else {
+ ret = __bLs_register_notifier();
+ if (ret)
+ cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
+ else
+ pr_info("%s: Registered platform driver: %s\n",
+ __func__, ve_spc_cpufreq_driver.name);
+ }
+
+ bL_switcher_put_enabled();
+ return ret;
+}
+
+static int ve_spc_cpufreq_remove(struct platform_device *pdev)
+{
+ bL_switcher_get_enabled();
+ __bLs_unregister_notifier();
+ cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
+ bL_switcher_put_enabled();
+ pr_info("%s: Un-registered platform driver: %s\n", __func__,
+ ve_spc_cpufreq_driver.name);
+ return 0;
+}
+
+static struct platform_driver ve_spc_cpufreq_platdrv = {
+ .driver = {
+ .name = "vexpress-spc-cpufreq",
+ },
+ .probe = ve_spc_cpufreq_probe,
+ .remove = ve_spc_cpufreq_remove,
+};
+module_platform_driver(ve_spc_cpufreq_platdrv);
+
+MODULE_ALIAS("platform:vexpress-spc-cpufreq");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("Vexpress SPC ARM big LITTLE cpufreq driver");
+MODULE_LICENSE("GPL v2");