summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig38
-rw-r--r--arch/powerpc/Makefile36
-rw-r--r--arch/powerpc/boot/dts/akebono.dts6
-rw-r--r--arch/powerpc/boot/simple_alloc.c7
-rw-r--r--arch/powerpc/configs/40x.config2
-rw-r--r--arch/powerpc/configs/44x.config2
-rw-r--r--arch/powerpc/configs/85xx-32bit.config1
-rw-r--r--arch/powerpc/configs/8xx.config2
-rw-r--r--arch/powerpc/configs/ps3_defconfig1
-rw-r--r--arch/powerpc/crypto/.gitignore2
-rw-r--r--arch/powerpc/crypto/Kconfig20
-rw-r--r--arch/powerpc/crypto/Makefile20
-rw-r--r--arch/powerpc/crypto/aes.c134
-rw-r--r--arch/powerpc/crypto/aes_cbc.c133
-rw-r--r--arch/powerpc/crypto/aes_ctr.c149
-rw-r--r--arch/powerpc/crypto/aes_xts.c162
-rw-r--r--arch/powerpc/crypto/aesp8-ppc.h30
-rw-r--r--arch/powerpc/crypto/aesp8-ppc.pl3889
-rw-r--r--arch/powerpc/crypto/ghash.c185
-rw-r--r--arch/powerpc/crypto/ghashp8-ppc.pl243
-rw-r--r--arch/powerpc/crypto/vmx.c77
-rw-r--r--arch/powerpc/include/asm/backlight.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-64k.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h36
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h2
-rw-r--r--arch/powerpc/include/asm/cputable.h14
-rw-r--r--arch/powerpc/include/asm/hvcall.h10
-rw-r--r--arch/powerpc/include/asm/ibmebus.h2
-rw-r--r--arch/powerpc/include/asm/interrupt.h2
-rw-r--r--arch/powerpc/include/asm/io.h24
-rw-r--r--arch/powerpc/include/asm/kexec.h98
-rw-r--r--arch/powerpc/include/asm/machdep.h1
-rw-r--r--arch/powerpc/include/asm/macio.h4
-rw-r--r--arch/powerpc/include/asm/mmu.h10
-rw-r--r--arch/powerpc/include/asm/mpic.h2
-rw-r--r--arch/powerpc/include/asm/paca.h4
-rw-r--r--arch/powerpc/include/asm/page.h2
-rw-r--r--arch/powerpc/include/asm/pgtable.h30
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h4
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h26
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/reg_fsl_emb.h32
-rw-r--r--arch/powerpc/include/asm/set_memory.h7
-rw-r--r--arch/powerpc/include/asm/smp.h1
-rw-r--r--arch/powerpc/include/asm/switch_to.h4
-rw-r--r--arch/powerpc/include/asm/tlb.h2
-rw-r--r--arch/powerpc/include/asm/trace.h3
-rw-r--r--arch/powerpc/include/asm/uaccess.h16
-rw-r--r--arch/powerpc/include/asm/vdso/gettimeofday.h3
-rw-r--r--arch/powerpc/include/asm/vio.h2
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h4
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h45
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/cpu_specs_book3s_64.h34
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c10
-rw-r--r--arch/powerpc/kernel/iommu.c7
-rw-r--r--arch/powerpc/kernel/irq.c5
-rw-r--r--arch/powerpc/kernel/isa-bridge.c4
-rw-r--r--arch/powerpc/kernel/kprobes-ftrace.c3
-rw-r--r--arch/powerpc/kernel/kprobes.c10
-rw-r--r--arch/powerpc/kernel/prom.c28
-rw-r--r--arch/powerpc/kernel/prom_init.c10
-rw-r--r--arch/powerpc/kernel/secure_boot.c8
-rw-r--r--arch/powerpc/kernel/setup-common.c62
-rw-r--r--arch/powerpc/kernel/smp.c10
-rw-r--r--arch/powerpc/kernel/syscall.c2
-rw-r--r--arch/powerpc/kernel/traps.c4
-rw-r--r--arch/powerpc/kexec/Makefile4
-rw-r--r--arch/powerpc/kexec/core.c32
-rw-r--r--arch/powerpc/kexec/elf_64.c4
-rw-r--r--arch/powerpc/kexec/file_load_64.c269
-rw-r--r--arch/powerpc/kexec/ranges.c8
-rw-r--r--arch/powerpc/kexec/relocate_32.S2
-rw-r--r--arch/powerpc/kexec/vmcore_info.c32
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c12
-rw-r--r--arch/powerpc/kvm/book3s_hv.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_nestedv2.c4
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/powerpc/lib/copypage_power7.S12
-rw-r--r--arch/powerpc/lib/copyuser_power7.S12
-rw-r--r--arch/powerpc/lib/memcpy_power7.S10
-rw-r--r--arch/powerpc/lib/sstep.c23
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c7
-rw-r--r--arch/powerpc/mm/book3s64/hash_hugepage.c15
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c3
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c3
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c30
-rw-r--r--arch/powerpc/mm/drmem.c10
-rw-r--r--arch/powerpc/mm/hugetlbpage.c6
-rw-r--r--arch/powerpc/mm/mmu_decl.h16
-rw-r--r--arch/powerpc/mm/nohash/8xx.c33
-rw-r--r--arch/powerpc/mm/nohash/e500.c10
-rw-r--r--arch/powerpc/mm/nohash/kaslr_booke.c4
-rw-r--r--arch/powerpc/mm/numa.c6
-rw-r--r--arch/powerpc/mm/pageattr.c28
-rw-r--r--arch/powerpc/mm/pgtable.c11
-rw-r--r--arch/powerpc/mm/pgtable_32.c51
-rw-r--r--arch/powerpc/mm/pgtable_64.c11
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c21
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c149
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c42
-rw-r--r--arch/powerpc/perf/core-book3s.c4
-rw-r--r--arch/powerpc/perf/internal.h1
-rw-r--r--arch/powerpc/perf/power10-pmu.c27
-rw-r--r--arch/powerpc/platforms/40x/ppc40x_simple.c9
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_generic.c4
-rw-r--r--arch/powerpc/platforms/52xx/efika.c4
-rw-r--r--arch/powerpc/platforms/52xx/lite5200.c10
-rw-r--r--arch/powerpc/platforms/52xx/mpc5200_simple.c10
-rw-r--r--arch/powerpc/platforms/83xx/mpc830x_rdb.c10
-rw-r--r--arch/powerpc/platforms/83xx/mpc831x_rdb.c10
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_rdb.c10
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c3
-rw-r--r--arch/powerpc/platforms/85xx/bsc913x_qds.c2
-rw-r--r--arch/powerpc/platforms/85xx/bsc913x_rdb.c2
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c2
-rw-r--r--arch/powerpc/platforms/85xx/ge_imp3a.c2
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c6
-rw-r--r--arch/powerpc/platforms/85xx/smp.c9
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c10
-rw-r--r--arch/powerpc/platforms/amigaone/setup.c8
-rw-r--r--arch/powerpc/platforms/pasemi/gpio_mdio.c6
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c4
-rw-r--r--arch/powerpc/platforms/powermac/backlight.c26
-rw-r--r--arch/powerpc/platforms/powermac/feature.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-core.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-prd.c5
-rw-r--r--arch/powerpc/platforms/powernv/smp.c2
-rw-r--r--arch/powerpc/platforms/ps3/hvcall.S298
-rw-r--r--arch/powerpc/platforms/pseries/ibmebus.c4
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c6
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c16
-rw-r--r--arch/powerpc/platforms/pseries/msi.c11
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c6
-rw-r--r--arch/powerpc/platforms/pseries/setup.c12
-rw-r--r--arch/powerpc/platforms/pseries/vio.c61
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c6
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/pmi.c6
-rw-r--r--arch/powerpc/xmon/xmon.c12
144 files changed, 6282 insertions, 979 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b9fc064d38..1c4be33736 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -154,7 +154,7 @@ config PPC
select ARCH_HAS_SYSCALL_WRAPPER if !SPU_BASE && !COMPAT
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
- select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_HAS_UBSAN
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_KEEP_MEMBLOCK
select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE if PPC_RADIX_MMU
@@ -212,7 +212,7 @@ config PPC
select HAVE_ARCH_HUGE_VMAP if PPC_RADIX_MMU || PPC_8xx
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
- select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
+ select HAVE_ARCH_KASAN if PPC32 && PAGE_SHIFT <= 14
select HAVE_ARCH_KASAN if PPC_RADIX_MMU
select HAVE_ARCH_KASAN if PPC_BOOK3E_64
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
@@ -333,7 +333,6 @@ config PANIC_TIMEOUT
config COMPAT
bool "Enable support for 32bit binaries"
depends on PPC64
- depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
default y if !CPU_LITTLE_ENDIAN
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
@@ -689,9 +688,7 @@ config ARCH_SELECTS_CRASH_DUMP
config FA_DUMP
bool "Firmware-assisted dump"
- depends on PPC64 && (PPC_RTAS || PPC_POWERNV)
- select CRASH_CORE
- select CRASH_DUMP
+ depends on CRASH_DUMP && PPC64 && (PPC_RTAS || PPC_POWERNV)
help
A robust mechanism to get reliable kernel crash dump with
assistance from firmware. This approach does not use kexec,
@@ -809,19 +806,23 @@ choice
config PPC_4K_PAGES
bool "4k page size"
select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
+ select HAVE_PAGE_SIZE_4KB
config PPC_16K_PAGES
bool "16k page size"
depends on 44x || PPC_8xx
+ select HAVE_PAGE_SIZE_16KB
config PPC_64K_PAGES
bool "64k page size"
depends on 44x || PPC_BOOK3S_64
select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
+ select HAVE_PAGE_SIZE_64KB
config PPC_256K_PAGES
bool "256k page size (Requires non-standard binutils settings)"
depends on 44x && !PPC_47x
+ select HAVE_PAGE_SIZE_256KB
help
Make the page size 256k.
@@ -832,29 +833,6 @@ config PPC_256K_PAGES
endchoice
-config PAGE_SIZE_4KB
- def_bool y
- depends on PPC_4K_PAGES
-
-config PAGE_SIZE_16KB
- def_bool y
- depends on PPC_16K_PAGES
-
-config PAGE_SIZE_64KB
- def_bool y
- depends on PPC_64K_PAGES
-
-config PAGE_SIZE_256KB
- def_bool y
- depends on PPC_256K_PAGES
-
-config PPC_PAGE_SHIFT
- int
- default 18 if PPC_256K_PAGES
- default 16 if PPC_64K_PAGES
- default 14 if PPC_16K_PAGES
- default 12
-
config THREAD_SHIFT
int "Thread shift" if EXPERT
range 13 15
@@ -891,7 +869,7 @@ config DATA_SHIFT
default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA
default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
default 24 if STRICT_KERNEL_RWX && PPC_85xx
- default PPC_PAGE_SHIFT
+ default PAGE_SHIFT
help
On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO.
Smaller is the alignment, greater is the number of necessary DBATs.
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 051247027d..65261cbe5b 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -144,11 +144,11 @@ CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mlong-double-128)
# Clang unconditionally reserves r2 on ppc32 and does not support the flag
-# https://bugs.llvm.org/show_bug.cgi?id=39555
+# https://llvm.org/pr39555
CFLAGS-$(CONFIG_PPC32) := $(call cc-option, -ffixed-r2)
# Clang doesn't support -mmultiple / -mno-multiple
-# https://bugs.llvm.org/show_bug.cgi?id=39556
+# https://llvm.org/pr39556
CFLAGS-$(CONFIG_PPC32) += $(call cc-option, $(MULTIPLEWORD))
CFLAGS-$(CONFIG_PPC32) += $(call cc-option,-mno-readonly-in-sdata)
@@ -219,18 +219,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# often slow when they are implemented at all
KBUILD_CFLAGS += $(call cc-option,-mno-string)
-cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
-
-# When using '-many -mpower4' gas will first try and find a matching power4
-# mnemonic and failing that it will allow any valid mnemonic that GAS knows
-# about. GCC will pass -many to GAS when assembling, clang does not.
-# LLVM IAS doesn't understand either flag: https://github.com/ClangBuiltLinux/linux/issues/675
-# but LLVM IAS only supports ISA >= 2.06 for Book3S 64 anyway...
-cpu-as-$(CONFIG_PPC_BOOK3S_64) += $(call as-option,-Wa$(comma)-mpower4) $(call as-option,-Wa$(comma)-many)
-
-KBUILD_AFLAGS += $(cpu-as-y)
-KBUILD_CFLAGS += $(cpu-as-y)
-
KBUILD_AFLAGS += $(aflags-y)
KBUILD_CFLAGS += $(cflags-y)
@@ -314,6 +302,26 @@ ppc32_allmodconfig:
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/book3s_32.config \
-f $(srctree)/Makefile allmodconfig
+generated_configs += ppc40x_allmodconfig
+ppc40x_allmodconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/40x.config \
+ -f $(srctree)/Makefile allmodconfig
+
+generated_configs += ppc44x_allmodconfig
+ppc44x_allmodconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/44x.config \
+ -f $(srctree)/Makefile allmodconfig
+
+generated_configs += ppc8xx_allmodconfig
+ppc8xx_allmodconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/8xx.config \
+ -f $(srctree)/Makefile allmodconfig
+
+generated_configs += ppc85xx_allmodconfig
+ppc85xx_allmodconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/85xx-32bit.config \
+ -f $(srctree)/Makefile allmodconfig
+
generated_configs += ppc_defconfig
ppc_defconfig:
$(call merge_into_defconfig,book3s_32.config,)
diff --git a/arch/powerpc/boot/dts/akebono.dts b/arch/powerpc/boot/dts/akebono.dts
index df18f8dc46..343326c303 100644
--- a/arch/powerpc/boot/dts/akebono.dts
+++ b/arch/powerpc/boot/dts/akebono.dts
@@ -126,7 +126,7 @@
interrupts = <93 2>;
};
- EHCI0: ehci@30010000000 {
+ EHCI0: usb@30010000000 {
compatible = "ibm,476gtr-ehci", "generic-ehci";
reg = <0x300 0x10000000 0x0 0x10000>;
interrupt-parent = <&MPIC>;
@@ -140,14 +140,14 @@
interrupt-parent = <&MPIC>;
};
- OHCI0: ohci@30010010000 {
+ OHCI0: usb@30010010000 {
compatible = "ibm,476gtr-ohci", "generic-ohci";
reg = <0x300 0x10010000 0x0 0x10000>;
interrupt-parent = <&MPIC>;
interrupts = <89 1>;
};
- OHCI1: ohci@30010020000 {
+ OHCI1: usb@30010020000 {
compatible = "ibm,476gtr-ohci", "generic-ohci";
reg = <0x300 0x10020000 0x0 0x10000>;
interrupt-parent = <&MPIC>;
diff --git a/arch/powerpc/boot/simple_alloc.c b/arch/powerpc/boot/simple_alloc.c
index 267d6524ca..d07796fdf9 100644
--- a/arch/powerpc/boot/simple_alloc.c
+++ b/arch/powerpc/boot/simple_alloc.c
@@ -112,8 +112,11 @@ static void *simple_realloc(void *ptr, unsigned long size)
return ptr;
new = simple_malloc(size);
- memcpy(new, ptr, p->size);
- simple_free(ptr);
+ if (new) {
+ memcpy(new, ptr, p->size);
+ simple_free(ptr);
+ }
+
return new;
}
diff --git a/arch/powerpc/configs/40x.config b/arch/powerpc/configs/40x.config
new file mode 100644
index 0000000000..82a9d58ddb
--- /dev/null
+++ b/arch/powerpc/configs/40x.config
@@ -0,0 +1,2 @@
+CONFIG_PPC64=n
+CONFIG_40x=y
diff --git a/arch/powerpc/configs/44x.config b/arch/powerpc/configs/44x.config
new file mode 100644
index 0000000000..79b7b19629
--- /dev/null
+++ b/arch/powerpc/configs/44x.config
@@ -0,0 +1,2 @@
+CONFIG_PPC64=n
+CONFIG_44x=y
diff --git a/arch/powerpc/configs/85xx-32bit.config b/arch/powerpc/configs/85xx-32bit.config
index 6b8894d727..a85310bcb1 100644
--- a/arch/powerpc/configs/85xx-32bit.config
+++ b/arch/powerpc/configs/85xx-32bit.config
@@ -1,3 +1,4 @@
+CONFIG_PPC64=n
CONFIG_HIGHMEM=y
CONFIG_KEXEC=y
CONFIG_PPC_85xx=y
diff --git a/arch/powerpc/configs/8xx.config b/arch/powerpc/configs/8xx.config
new file mode 100644
index 0000000000..7eb3ffbbd6
--- /dev/null
+++ b/arch/powerpc/configs/8xx.config
@@ -0,0 +1,2 @@
+CONFIG_PPC64=n
+CONFIG_PPC_8xx=y
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index aa8bb0208b..2b175ddf82 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -24,7 +24,6 @@ CONFIG_PS3_VRAM=m
CONFIG_PS3_LPM=m
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
CONFIG_KEXEC=y
-# CONFIG_PPC64_BIG_ENDIAN_ELF_ABI_V2 is not set
CONFIG_PPC_4K_PAGES=y
CONFIG_SCHED_SMT=y
CONFIG_PM=y
diff --git a/arch/powerpc/crypto/.gitignore b/arch/powerpc/crypto/.gitignore
index e1094f08f7..e9fe73aac8 100644
--- a/arch/powerpc/crypto/.gitignore
+++ b/arch/powerpc/crypto/.gitignore
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
aesp10-ppc.S
+aesp8-ppc.S
ghashp10-ppc.S
+ghashp8-ppc.S
diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig
index 6fc2248ca5..1e201b7ae2 100644
--- a/arch/powerpc/crypto/Kconfig
+++ b/arch/powerpc/crypto/Kconfig
@@ -137,4 +137,24 @@ config CRYPTO_POLY1305_P10
- Power10 or later
- Little-endian
+config CRYPTO_DEV_VMX
+ bool "Support for VMX cryptographic acceleration instructions"
+ depends on PPC64 && VSX
+ help
+ Support for VMX cryptographic acceleration instructions.
+
+config CRYPTO_DEV_VMX_ENCRYPT
+ tristate "Encryption acceleration support on P8 CPU"
+ depends on CRYPTO_DEV_VMX
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_CTR
+ select CRYPTO_GHASH
+ select CRYPTO_XTS
+ default m
+ help
+ Support for VMX cryptographic acceleration instructions on Power8 CPU.
+ This module supports acceleration for AES and GHASH in hardware. If you
+ choose 'M' here, this module will be called vmx-crypto.
+
endmenu
diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile
index ebdac1b9eb..fca0e97398 100644
--- a/arch/powerpc/crypto/Makefile
+++ b/arch/powerpc/crypto/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_CRYPTO_VPMSUM_TESTER) += crc-vpmsum_test.o
obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o
obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o
obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o
+obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
md5-ppc-y := md5-asm.o md5-glue.o
@@ -27,14 +28,29 @@ crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o
chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o
poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o
+vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
+
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+override flavour := linux-ppc64le
+else
+ifdef CONFIG_PPC64_ELF_ABI_V2
+override flavour := linux-ppc64-elfv2
+else
+override flavour := linux-ppc64
+endif
+endif
quiet_cmd_perl = PERL $@
- cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@
+ cmd_perl = $(PERL) $< $(flavour) > $@
-targets += aesp10-ppc.S ghashp10-ppc.S
+targets += aesp10-ppc.S ghashp10-ppc.S aesp8-ppc.S ghashp8-ppc.S
$(obj)/aesp10-ppc.S $(obj)/ghashp10-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
$(call if_changed,perl)
+$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
+ $(call if_changed,perl)
+
OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y
OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y
+OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y
diff --git a/arch/powerpc/crypto/aes.c b/arch/powerpc/crypto/aes.c
new file mode 100644
index 0000000000..ec06189fbf
--- /dev/null
+++ b/arch/powerpc/crypto/aes.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AES routines supporting VMX instructions on the Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <asm/simd.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+#include <crypto/internal/cipher.h>
+#include <crypto/internal/simd.h>
+
+#include "aesp8-ppc.h"
+
+struct p8_aes_ctx {
+ struct crypto_cipher *fallback;
+ struct aes_key enc_key;
+ struct aes_key dec_key;
+};
+
+static int p8_aes_init(struct crypto_tfm *tfm)
+{
+ const char *alg = crypto_tfm_alg_name(tfm);
+ struct crypto_cipher *fallback;
+ struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ fallback = crypto_alloc_cipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ printk(KERN_ERR
+ "Failed to allocate transformation for '%s': %ld\n",
+ alg, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+
+ crypto_cipher_set_flags(fallback,
+ crypto_cipher_get_flags((struct
+ crypto_cipher *)
+ tfm));
+ ctx->fallback = fallback;
+
+ return 0;
+}
+
+static void p8_aes_exit(struct crypto_tfm *tfm)
+{
+ struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->fallback) {
+ crypto_free_cipher(ctx->fallback);
+ ctx->fallback = NULL;
+ }
+}
+
+static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ int ret;
+ struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+ ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ ret |= crypto_cipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
+}
+
+static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (!crypto_simd_usable()) {
+ crypto_cipher_encrypt_one(ctx->fallback, dst, src);
+ } else {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ aes_p8_encrypt(src, dst, &ctx->enc_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+ }
+}
+
+static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (!crypto_simd_usable()) {
+ crypto_cipher_decrypt_one(ctx->fallback, dst, src);
+ } else {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ aes_p8_decrypt(src, dst, &ctx->dec_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+ }
+}
+
+struct crypto_alg p8_aes_alg = {
+ .cra_name = "aes",
+ .cra_driver_name = "p8_aes",
+ .cra_module = THIS_MODULE,
+ .cra_priority = 1000,
+ .cra_type = NULL,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_alignmask = 0,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p8_aes_ctx),
+ .cra_init = p8_aes_init,
+ .cra_exit = p8_aes_exit,
+ .cra_cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = p8_aes_setkey,
+ .cia_encrypt = p8_aes_encrypt,
+ .cia_decrypt = p8_aes_decrypt,
+ },
+};
diff --git a/arch/powerpc/crypto/aes_cbc.c b/arch/powerpc/crypto/aes_cbc.c
new file mode 100644
index 0000000000..ed0debc7ac
--- /dev/null
+++ b/arch/powerpc/crypto/aes_cbc.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AES CBC routines supporting VMX instructions on the Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ */
+
+#include <asm/simd.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+
+#include "aesp8-ppc.h"
+
+struct p8_aes_cbc_ctx {
+ struct crypto_skcipher *fallback;
+ struct aes_key enc_key;
+ struct aes_key dec_key;
+};
+
+static int p8_aes_cbc_init(struct crypto_skcipher *tfm)
+{
+ struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *fallback;
+
+ fallback = crypto_alloc_skcipher("cbc(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(fallback)) {
+ pr_err("Failed to allocate cbc(aes) fallback: %ld\n",
+ PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(fallback));
+ ctx->fallback = fallback;
+ return 0;
+}
+
+static void p8_aes_cbc_exit(struct crypto_skcipher *tfm)
+{
+ struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->fallback);
+}
+
+static int p8_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+ ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
+}
+
+static int p8_aes_cbc_crypt(struct skcipher_request *req, int enc)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int ret;
+
+ if (!crypto_simd_usable()) {
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ return enc ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+ }
+
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) != 0) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ aes_p8_cbc_encrypt(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE),
+ enc ? &ctx->enc_key : &ctx->dec_key,
+ walk.iv, enc);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
+ }
+ return ret;
+}
+
+static int p8_aes_cbc_encrypt(struct skcipher_request *req)
+{
+ return p8_aes_cbc_crypt(req, 1);
+}
+
+static int p8_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ return p8_aes_cbc_crypt(req, 0);
+}
+
+struct skcipher_alg p8_aes_cbc_alg = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "p8_aes_cbc",
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 2000,
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
+ .setkey = p8_aes_cbc_setkey,
+ .encrypt = p8_aes_cbc_encrypt,
+ .decrypt = p8_aes_cbc_decrypt,
+ .init = p8_aes_cbc_init,
+ .exit = p8_aes_cbc_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+};
diff --git a/arch/powerpc/crypto/aes_ctr.c b/arch/powerpc/crypto/aes_ctr.c
new file mode 100644
index 0000000000..9a3da8cd62
--- /dev/null
+++ b/arch/powerpc/crypto/aes_ctr.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AES CTR routines supporting VMX instructions on the Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ */
+
+#include <asm/simd.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+
+#include "aesp8-ppc.h"
+
+struct p8_aes_ctr_ctx {
+ struct crypto_skcipher *fallback;
+ struct aes_key enc_key;
+};
+
+static int p8_aes_ctr_init(struct crypto_skcipher *tfm)
+{
+ struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *fallback;
+
+ fallback = crypto_alloc_skcipher("ctr(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(fallback)) {
+ pr_err("Failed to allocate ctr(aes) fallback: %ld\n",
+ PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(fallback));
+ ctx->fallback = fallback;
+ return 0;
+}
+
+static void p8_aes_ctr_exit(struct crypto_skcipher *tfm)
+{
+ struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->fallback);
+}
+
+static int p8_aes_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
+}
+
+static void p8_aes_ctr_final(const struct p8_aes_ctr_ctx *ctx,
+ struct skcipher_walk *walk)
+{
+ u8 *ctrblk = walk->iv;
+ u8 keystream[AES_BLOCK_SIZE];
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ unsigned int nbytes = walk->nbytes;
+
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ crypto_xor_cpy(dst, keystream, src, nbytes);
+ crypto_inc(ctrblk, AES_BLOCK_SIZE);
+}
+
+static int p8_aes_ctr_crypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int ret;
+
+ if (!crypto_simd_usable()) {
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ return crypto_skcipher_encrypt(subreq);
+ }
+
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ nbytes / AES_BLOCK_SIZE,
+ &ctx->enc_key, walk.iv);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ do {
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+ } while ((nbytes -= AES_BLOCK_SIZE) >= AES_BLOCK_SIZE);
+
+ ret = skcipher_walk_done(&walk, nbytes);
+ }
+ if (nbytes) {
+ p8_aes_ctr_final(ctx, &walk);
+ ret = skcipher_walk_done(&walk, 0);
+ }
+ return ret;
+}
+
+struct skcipher_alg p8_aes_ctr_alg = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "p8_aes_ctr",
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 2000,
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
+ .setkey = p8_aes_ctr_setkey,
+ .encrypt = p8_aes_ctr_crypt,
+ .decrypt = p8_aes_ctr_crypt,
+ .init = p8_aes_ctr_init,
+ .exit = p8_aes_ctr_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+};
diff --git a/arch/powerpc/crypto/aes_xts.c b/arch/powerpc/crypto/aes_xts.c
new file mode 100644
index 0000000000..dabbccb415
--- /dev/null
+++ b/arch/powerpc/crypto/aes_xts.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AES XTS routines supporting VMX In-core instructions on Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * Author: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
+ */
+
+#include <asm/simd.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/xts.h>
+
+#include "aesp8-ppc.h"
+
+struct p8_aes_xts_ctx {
+ struct crypto_skcipher *fallback;
+ struct aes_key enc_key;
+ struct aes_key dec_key;
+ struct aes_key tweak_key;
+};
+
+static int p8_aes_xts_init(struct crypto_skcipher *tfm)
+{
+ struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *fallback;
+
+ fallback = crypto_alloc_skcipher("xts(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(fallback)) {
+ pr_err("Failed to allocate xts(aes) fallback: %ld\n",
+ PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(fallback));
+ ctx->fallback = fallback;
+ return 0;
+}
+
+static void p8_aes_xts_exit(struct crypto_skcipher *tfm)
+{
+ struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->fallback);
+}
+
+static int p8_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ ret = xts_verify_key(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key);
+ ret |= aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
+ ret |= aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
+}
+
+static int p8_aes_xts_crypt(struct skcipher_request *req, int enc)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ u8 tweak[AES_BLOCK_SIZE];
+ int ret;
+
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (!crypto_simd_usable() || (req->cryptlen % XTS_BLOCK_SIZE) != 0) {
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ return enc ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+ }
+
+ ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
+
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+
+ aes_p8_encrypt(walk.iv, tweak, &ctx->tweak_key);
+
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ while ((nbytes = walk.nbytes) != 0) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ if (enc)
+ aes_p8_xts_encrypt(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE),
+ &ctx->enc_key, NULL, tweak);
+ else
+ aes_p8_xts_decrypt(walk.src.virt.addr,
+ walk.dst.virt.addr,
+ round_down(nbytes, AES_BLOCK_SIZE),
+ &ctx->dec_key, NULL, tweak);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
+ }
+ return ret;
+}
+
+static int p8_aes_xts_encrypt(struct skcipher_request *req)
+{
+ return p8_aes_xts_crypt(req, 1);
+}
+
+static int p8_aes_xts_decrypt(struct skcipher_request *req)
+{
+ return p8_aes_xts_crypt(req, 0);
+}
+
+struct skcipher_alg p8_aes_xts_alg = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "p8_aes_xts",
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 2000,
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct p8_aes_xts_ctx),
+ .setkey = p8_aes_xts_setkey,
+ .encrypt = p8_aes_xts_encrypt,
+ .decrypt = p8_aes_xts_decrypt,
+ .init = p8_aes_xts_init,
+ .exit = p8_aes_xts_exit,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+};
diff --git a/arch/powerpc/crypto/aesp8-ppc.h b/arch/powerpc/crypto/aesp8-ppc.h
new file mode 100644
index 0000000000..5764d44383
--- /dev/null
+++ b/arch/powerpc/crypto/aesp8-ppc.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/types.h>
+#include <crypto/aes.h>
+
+struct aes_key {
+ u8 key[AES_MAX_KEYLENGTH];
+ int rounds;
+};
+
+extern struct shash_alg p8_ghash_alg;
+extern struct crypto_alg p8_aes_alg;
+extern struct skcipher_alg p8_aes_cbc_alg;
+extern struct skcipher_alg p8_aes_ctr_alg;
+extern struct skcipher_alg p8_aes_xts_alg;
+
+int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
+ struct aes_key *key);
+int aes_p8_set_decrypt_key(const u8 *userKey, const int bits,
+ struct aes_key *key);
+void aes_p8_encrypt(const u8 *in, u8 *out, const struct aes_key *key);
+void aes_p8_decrypt(const u8 *in, u8 *out, const struct aes_key *key);
+void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len,
+ const struct aes_key *key, u8 *iv, const int enc);
+void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out,
+ size_t len, const struct aes_key *key,
+ const u8 *iv);
+void aes_p8_xts_encrypt(const u8 *in, u8 *out, size_t len,
+ const struct aes_key *key1, const struct aes_key *key2, u8 *iv);
+void aes_p8_xts_decrypt(const u8 *in, u8 *out, size_t len,
+ const struct aes_key *key1, const struct aes_key *key2, u8 *iv);
diff --git a/arch/powerpc/crypto/aesp8-ppc.pl b/arch/powerpc/crypto/aesp8-ppc.pl
new file mode 100644
index 0000000000..f729589d79
--- /dev/null
+++ b/arch/powerpc/crypto/aesp8-ppc.pl
@@ -0,0 +1,3889 @@
+#! /usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+
+# This code is taken from CRYPTOGAMs[1] and is included here using the option
+# in the license to distribute the code under the GPL. Therefore this program
+# is free software; you can redistribute it and/or modify it under the terms of
+# the GNU General Public License version 2 as published by the Free Software
+# Foundation.
+#
+# [1] https://www.openssl.org/~appro/cryptogams/
+
+# Copyright (c) 2006-2017, CRYPTOGAMS by <appro@openssl.org>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain copyright notices,
+# this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# * Neither the name of the CRYPTOGAMS nor the names of its
+# copyright holder and contributors may be used to endorse or
+# promote products derived from this software without specific
+# prior written permission.
+#
+# ALTERNATIVELY, provided that this notice is retained in full, this
+# product may be distributed under the terms of the GNU General Public
+# License (GPL), in which case the provisions of the GPL apply INSTEAD OF
+# those given above.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see https://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for AES instructions as per PowerISA
+# specification version 2.07, first implemented by POWER8 processor.
+# The module is endian-agnostic in sense that it supports both big-
+# and little-endian cases. Data alignment in parallelizable modes is
+# handled with VSX loads and stores, which implies MSR.VSX flag being
+# set. It should also be noted that ISA specification doesn't prohibit
+# alignment exceptions for these instructions on page boundaries.
+# Initially alignment was handled in pure AltiVec/VMX way [when data
+# is aligned programmatically, which in turn guarantees exception-
+# free execution], but it turned to hamper performance when vcipher
+# instructions are interleaved. It's reckoned that eventual
+# misalignment penalties at page boundaries are in average lower
+# than additional overhead in pure AltiVec approach.
+#
+# May 2016
+#
+# Add XTS subroutine, 9x on little- and 12x improvement on big-endian
+# systems were measured.
+#
+######################################################################
+# Current large-block performance in cycles per byte processed with
+# 128-bit key (less is better).
+#
+# CBC en-/decrypt CTR XTS
+# POWER8[le] 3.96/0.72 0.74 1.1
+# POWER8[be] 3.75/0.65 0.66 1.0
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+ $SIZE_T =8;
+ $LRSAVE =2*$SIZE_T;
+ $STU ="stdu";
+ $POP ="ld";
+ $PUSH ="std";
+ $UCMP ="cmpld";
+ $SHL ="sldi";
+} elsif ($flavour =~ /32/) {
+ $SIZE_T =4;
+ $LRSAVE =$SIZE_T;
+ $STU ="stwu";
+ $POP ="lwz";
+ $PUSH ="stw";
+ $UCMP ="cmplw";
+ $SHL ="slwi";
+} else { die "nonsense $flavour"; }
+
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$FRAME=8*$SIZE_T;
+$prefix="aes_p8";
+
+$sp="r1";
+$vrsave="r12";
+
+#########################################################################
+{{{ # Key setup procedures #
+my ($inp,$bits,$out,$ptr,$cnt,$rounds)=map("r$_",(3..8));
+my ($zero,$in0,$in1,$key,$rcon,$mask,$tmp)=map("v$_",(0..6));
+my ($stage,$outperm,$outmask,$outhead,$outtail)=map("v$_",(7..11));
+
+$code.=<<___;
+.machine "any"
+
+.text
+
+.align 7
+rcon:
+.long 0x01000000, 0x01000000, 0x01000000, 0x01000000 ?rev
+.long 0x1b000000, 0x1b000000, 0x1b000000, 0x1b000000 ?rev
+.long 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c ?rev
+.long 0,0,0,0 ?asis
+.long 0x0f102132, 0x43546576, 0x8798a9ba, 0xcbdcedfe
+Lconsts:
+ mflr r0
+ bcl 20,31,\$+4
+ mflr $ptr #vvvvv "distance between . and rcon
+ addi $ptr,$ptr,-0x58
+ mtlr r0
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
+.asciz "AES for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
+
+.globl .${prefix}_set_encrypt_key
+Lset_encrypt_key:
+ mflr r11
+ $PUSH r11,$LRSAVE($sp)
+
+ li $ptr,-1
+ ${UCMP}i $inp,0
+ beq- Lenc_key_abort # if ($inp==0) return -1;
+ ${UCMP}i $out,0
+ beq- Lenc_key_abort # if ($out==0) return -1;
+ li $ptr,-2
+ cmpwi $bits,128
+ blt- Lenc_key_abort
+ cmpwi $bits,256
+ bgt- Lenc_key_abort
+ andi. r0,$bits,0x3f
+ bne- Lenc_key_abort
+
+ lis r0,0xfff0
+ mfspr $vrsave,256
+ mtspr 256,r0
+
+ bl Lconsts
+ mtlr r11
+
+ neg r9,$inp
+ lvx $in0,0,$inp
+ addi $inp,$inp,15 # 15 is not typo
+ lvsr $key,0,r9 # borrow $key
+ li r8,0x20
+ cmpwi $bits,192
+ lvx $in1,0,$inp
+ le?vspltisb $mask,0x0f # borrow $mask
+ lvx $rcon,0,$ptr
+ le?vxor $key,$key,$mask # adjust for byte swap
+ lvx $mask,r8,$ptr
+ addi $ptr,$ptr,0x10
+ vperm $in0,$in0,$in1,$key # align [and byte swap in LE]
+ li $cnt,8
+ vxor $zero,$zero,$zero
+ mtctr $cnt
+
+ ?lvsr $outperm,0,$out
+ vspltisb $outmask,-1
+ lvx $outhead,0,$out
+ ?vperm $outmask,$zero,$outmask,$outperm
+
+ blt Loop128
+ addi $inp,$inp,8
+ beq L192
+ addi $inp,$inp,8
+ b L256
+
+.align 4
+Loop128:
+ vperm $key,$in0,$in0,$mask # rotate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vcipherlast $key,$key,$rcon
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vadduwm $rcon,$rcon,$rcon
+ vxor $in0,$in0,$key
+ bdnz Loop128
+
+ lvx $rcon,0,$ptr # last two round keys
+
+ vperm $key,$in0,$in0,$mask # rotate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vcipherlast $key,$key,$rcon
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vadduwm $rcon,$rcon,$rcon
+ vxor $in0,$in0,$key
+
+ vperm $key,$in0,$in0,$mask # rotate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vcipherlast $key,$key,$rcon
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vxor $in0,$in0,$key
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ stvx $stage,0,$out
+
+ addi $inp,$out,15 # 15 is not typo
+ addi $out,$out,0x50
+
+ li $rounds,10
+ b Ldone
+
+.align 4
+L192:
+ lvx $tmp,0,$inp
+ li $cnt,4
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ stvx $stage,0,$out
+ addi $out,$out,16
+ vperm $in1,$in1,$tmp,$key # align [and byte swap in LE]
+ vspltisb $key,8 # borrow $key
+ mtctr $cnt
+ vsububm $mask,$mask,$key # adjust the mask
+
+Loop192:
+ vperm $key,$in1,$in1,$mask # roate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vcipherlast $key,$key,$rcon
+
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+
+ vsldoi $stage,$zero,$in1,8
+ vspltw $tmp,$in0,3
+ vxor $tmp,$tmp,$in1
+ vsldoi $in1,$zero,$in1,12 # >>32
+ vadduwm $rcon,$rcon,$rcon
+ vxor $in1,$in1,$tmp
+ vxor $in0,$in0,$key
+ vxor $in1,$in1,$key
+ vsldoi $stage,$stage,$in0,8
+
+ vperm $key,$in1,$in1,$mask # rotate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vperm $outtail,$stage,$stage,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vcipherlast $key,$key,$rcon
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vsldoi $stage,$in0,$in1,8
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vperm $outtail,$stage,$stage,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vspltw $tmp,$in0,3
+ vxor $tmp,$tmp,$in1
+ vsldoi $in1,$zero,$in1,12 # >>32
+ vadduwm $rcon,$rcon,$rcon
+ vxor $in1,$in1,$tmp
+ vxor $in0,$in0,$key
+ vxor $in1,$in1,$key
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ stvx $stage,0,$out
+ addi $inp,$out,15 # 15 is not typo
+ addi $out,$out,16
+ bdnz Loop192
+
+ li $rounds,12
+ addi $out,$out,0x20
+ b Ldone
+
+.align 4
+L256:
+ lvx $tmp,0,$inp
+ li $cnt,7
+ li $rounds,14
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ stvx $stage,0,$out
+ addi $out,$out,16
+ vperm $in1,$in1,$tmp,$key # align [and byte swap in LE]
+ mtctr $cnt
+
+Loop256:
+ vperm $key,$in1,$in1,$mask # rotate-n-splat
+ vsldoi $tmp,$zero,$in0,12 # >>32
+ vperm $outtail,$in1,$in1,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ vcipherlast $key,$key,$rcon
+ stvx $stage,0,$out
+ addi $out,$out,16
+
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in0,$in0,$tmp
+ vadduwm $rcon,$rcon,$rcon
+ vxor $in0,$in0,$key
+ vperm $outtail,$in0,$in0,$outperm # rotate
+ vsel $stage,$outhead,$outtail,$outmask
+ vmr $outhead,$outtail
+ stvx $stage,0,$out
+ addi $inp,$out,15 # 15 is not typo
+ addi $out,$out,16
+ bdz Ldone
+
+ vspltw $key,$in0,3 # just splat
+ vsldoi $tmp,$zero,$in1,12 # >>32
+ vsbox $key,$key
+
+ vxor $in1,$in1,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in1,$in1,$tmp
+ vsldoi $tmp,$zero,$tmp,12 # >>32
+ vxor $in1,$in1,$tmp
+
+ vxor $in1,$in1,$key
+ b Loop256
+
+.align 4
+Ldone:
+ lvx $in1,0,$inp # redundant in aligned case
+ vsel $in1,$outhead,$in1,$outmask
+ stvx $in1,0,$inp
+ li $ptr,0
+ mtspr 256,$vrsave
+ stw $rounds,0($out)
+
+Lenc_key_abort:
+ mr r3,$ptr
+ blr
+ .long 0
+ .byte 0,12,0x14,1,0,0,3,0
+ .long 0
+.size .${prefix}_set_encrypt_key,.-.${prefix}_set_encrypt_key
+
+.globl .${prefix}_set_decrypt_key
+ $STU $sp,-$FRAME($sp)
+ mflr r10
+ $PUSH r10,$FRAME+$LRSAVE($sp)
+ bl Lset_encrypt_key
+ mtlr r10
+
+ cmpwi r3,0
+ bne- Ldec_key_abort
+
+ slwi $cnt,$rounds,4
+ subi $inp,$out,240 # first round key
+ srwi $rounds,$rounds,1
+ add $out,$inp,$cnt # last round key
+ mtctr $rounds
+
+Ldeckey:
+ lwz r0, 0($inp)
+ lwz r6, 4($inp)
+ lwz r7, 8($inp)
+ lwz r8, 12($inp)
+ addi $inp,$inp,16
+ lwz r9, 0($out)
+ lwz r10,4($out)
+ lwz r11,8($out)
+ lwz r12,12($out)
+ stw r0, 0($out)
+ stw r6, 4($out)
+ stw r7, 8($out)
+ stw r8, 12($out)
+ subi $out,$out,16
+ stw r9, -16($inp)
+ stw r10,-12($inp)
+ stw r11,-8($inp)
+ stw r12,-4($inp)
+ bdnz Ldeckey
+
+ xor r3,r3,r3 # return value
+Ldec_key_abort:
+ addi $sp,$sp,$FRAME
+ blr
+ .long 0
+ .byte 0,12,4,1,0x80,0,3,0
+ .long 0
+.size .${prefix}_set_decrypt_key,.-.${prefix}_set_decrypt_key
+___
+}}}
+#########################################################################
+{{{ # Single block en- and decrypt procedures #
+sub gen_block () {
+my $dir = shift;
+my $n = $dir eq "de" ? "n" : "";
+my ($inp,$out,$key,$rounds,$idx)=map("r$_",(3..7));
+
+$code.=<<___;
+.globl .${prefix}_${dir}crypt
+ lwz $rounds,240($key)
+ lis r0,0xfc00
+ mfspr $vrsave,256
+ li $idx,15 # 15 is not typo
+ mtspr 256,r0
+
+ lvx v0,0,$inp
+ neg r11,$out
+ lvx v1,$idx,$inp
+ lvsl v2,0,$inp # inpperm
+ le?vspltisb v4,0x0f
+ ?lvsl v3,0,r11 # outperm
+ le?vxor v2,v2,v4
+ li $idx,16
+ vperm v0,v0,v1,v2 # align [and byte swap in LE]
+ lvx v1,0,$key
+ ?lvsl v5,0,$key # keyperm
+ srwi $rounds,$rounds,1
+ lvx v2,$idx,$key
+ addi $idx,$idx,16
+ subi $rounds,$rounds,1
+ ?vperm v1,v1,v2,v5 # align round key
+
+ vxor v0,v0,v1
+ lvx v1,$idx,$key
+ addi $idx,$idx,16
+ mtctr $rounds
+
+Loop_${dir}c:
+ ?vperm v2,v2,v1,v5
+ v${n}cipher v0,v0,v2
+ lvx v2,$idx,$key
+ addi $idx,$idx,16
+ ?vperm v1,v1,v2,v5
+ v${n}cipher v0,v0,v1
+ lvx v1,$idx,$key
+ addi $idx,$idx,16
+ bdnz Loop_${dir}c
+
+ ?vperm v2,v2,v1,v5
+ v${n}cipher v0,v0,v2
+ lvx v2,$idx,$key
+ ?vperm v1,v1,v2,v5
+ v${n}cipherlast v0,v0,v1
+
+ vspltisb v2,-1
+ vxor v1,v1,v1
+ li $idx,15 # 15 is not typo
+ ?vperm v2,v1,v2,v3 # outmask
+ le?vxor v3,v3,v4
+ lvx v1,0,$out # outhead
+ vperm v0,v0,v0,v3 # rotate [and byte swap in LE]
+ vsel v1,v1,v0,v2
+ lvx v4,$idx,$out
+ stvx v1,0,$out
+ vsel v0,v0,v4,v2
+ stvx v0,$idx,$out
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+.size .${prefix}_${dir}crypt,.-.${prefix}_${dir}crypt
+___
+}
+&gen_block("en");
+&gen_block("de");
+}}}
+#########################################################################
+{{{ # CBC en- and decrypt procedures #
+my ($inp,$out,$len,$key,$ivp,$enc,$rounds,$idx)=map("r$_",(3..10));
+my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3));
+my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm)=
+ map("v$_",(4..10));
+$code.=<<___;
+.globl .${prefix}_cbc_encrypt
+ ${UCMP}i $len,16
+ bltlr-
+
+ cmpwi $enc,0 # test direction
+ lis r0,0xffe0
+ mfspr $vrsave,256
+ mtspr 256,r0
+
+ li $idx,15
+ vxor $rndkey0,$rndkey0,$rndkey0
+ le?vspltisb $tmp,0x0f
+
+ lvx $ivec,0,$ivp # load [unaligned] iv
+ lvsl $inpperm,0,$ivp
+ lvx $inptail,$idx,$ivp
+ le?vxor $inpperm,$inpperm,$tmp
+ vperm $ivec,$ivec,$inptail,$inpperm
+
+ neg r11,$inp
+ ?lvsl $keyperm,0,$key # prepare for unaligned key
+ lwz $rounds,240($key)
+
+ lvsr $inpperm,0,r11 # prepare for unaligned load
+ lvx $inptail,0,$inp
+ addi $inp,$inp,15 # 15 is not typo
+ le?vxor $inpperm,$inpperm,$tmp
+
+ ?lvsr $outperm,0,$out # prepare for unaligned store
+ vspltisb $outmask,-1
+ lvx $outhead,0,$out
+ ?vperm $outmask,$rndkey0,$outmask,$outperm
+ le?vxor $outperm,$outperm,$tmp
+
+ srwi $rounds,$rounds,1
+ li $idx,16
+ subi $rounds,$rounds,1
+ beq Lcbc_dec
+
+Lcbc_enc:
+ vmr $inout,$inptail
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+ mtctr $rounds
+ subi $len,$len,16 # len-=16
+
+ lvx $rndkey0,0,$key
+ vperm $inout,$inout,$inptail,$inpperm
+ lvx $rndkey1,$idx,$key
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key
+ addi $idx,$idx,16
+ vxor $inout,$inout,$ivec
+
+Loop_cbc_enc:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipher $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key
+ addi $idx,$idx,16
+ bdnz Loop_cbc_enc
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key
+ li $idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipherlast $ivec,$inout,$rndkey0
+ ${UCMP}i $len,16
+
+ vperm $tmp,$ivec,$ivec,$outperm
+ vsel $inout,$outhead,$tmp,$outmask
+ vmr $outhead,$tmp
+ stvx $inout,0,$out
+ addi $out,$out,16
+ bge Lcbc_enc
+
+ b Lcbc_done
+
+.align 4
+Lcbc_dec:
+ ${UCMP}i $len,128
+ bge _aesp8_cbc_decrypt8x
+ vmr $tmp,$inptail
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+ mtctr $rounds
+ subi $len,$len,16 # len-=16
+
+ lvx $rndkey0,0,$key
+ vperm $tmp,$tmp,$inptail,$inpperm
+ lvx $rndkey1,$idx,$key
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$tmp,$rndkey0
+ lvx $rndkey0,$idx,$key
+ addi $idx,$idx,16
+
+Loop_cbc_dec:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vncipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vncipher $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key
+ addi $idx,$idx,16
+ bdnz Loop_cbc_dec
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vncipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key
+ li $idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vncipherlast $inout,$inout,$rndkey0
+ ${UCMP}i $len,16
+
+ vxor $inout,$inout,$ivec
+ vmr $ivec,$tmp
+ vperm $tmp,$inout,$inout,$outperm
+ vsel $inout,$outhead,$tmp,$outmask
+ vmr $outhead,$tmp
+ stvx $inout,0,$out
+ addi $out,$out,16
+ bge Lcbc_dec
+
+Lcbc_done:
+ addi $out,$out,-1
+ lvx $inout,0,$out # redundant in aligned case
+ vsel $inout,$outhead,$inout,$outmask
+ stvx $inout,0,$out
+
+ neg $enc,$ivp # write [unaligned] iv
+ li $idx,15 # 15 is not typo
+ vxor $rndkey0,$rndkey0,$rndkey0
+ vspltisb $outmask,-1
+ le?vspltisb $tmp,0x0f
+ ?lvsl $outperm,0,$enc
+ ?vperm $outmask,$rndkey0,$outmask,$outperm
+ le?vxor $outperm,$outperm,$tmp
+ lvx $outhead,0,$ivp
+ vperm $ivec,$ivec,$ivec,$outperm
+ vsel $inout,$outhead,$ivec,$outmask
+ lvx $inptail,$idx,$ivp
+ stvx $inout,0,$ivp
+ vsel $inout,$ivec,$inptail,$outmask
+ stvx $inout,$idx,$ivp
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,6,0
+ .long 0
+___
+#########################################################################
+{{ # Optimized CBC decrypt procedure #
+my $key_="r11";
+my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31));
+my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13));
+my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(14..21));
+my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
+ # v26-v31 last 6 round keys
+my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment
+
+$code.=<<___;
+.align 5
+_aesp8_cbc_decrypt8x:
+ $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+ li r10,`$FRAME+8*16+15`
+ li r11,`$FRAME+8*16+31`
+ stvx v20,r10,$sp # ABI says so
+ addi r10,r10,32
+ stvx v21,r11,$sp
+ addi r11,r11,32
+ stvx v22,r10,$sp
+ addi r10,r10,32
+ stvx v23,r11,$sp
+ addi r11,r11,32
+ stvx v24,r10,$sp
+ addi r10,r10,32
+ stvx v25,r11,$sp
+ addi r11,r11,32
+ stvx v26,r10,$sp
+ addi r10,r10,32
+ stvx v27,r11,$sp
+ addi r11,r11,32
+ stvx v28,r10,$sp
+ addi r10,r10,32
+ stvx v29,r11,$sp
+ addi r11,r11,32
+ stvx v30,r10,$sp
+ stvx v31,r11,$sp
+ li r0,-1
+ stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
+ li $x10,0x10
+ $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ li $x20,0x20
+ $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ li $x30,0x30
+ $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ li $x40,0x40
+ $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ li $x50,0x50
+ $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ li $x60,0x60
+ $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ li $x70,0x70
+ mtspr 256,r0
+
+ subi $rounds,$rounds,3 # -4 in total
+ subi $len,$len,128 # bias
+
+ lvx $rndkey0,$x00,$key # load key schedule
+ lvx v30,$x10,$key
+ addi $key,$key,0x20
+ lvx v31,$x00,$key
+ ?vperm $rndkey0,$rndkey0,v30,$keyperm
+ addi $key_,$sp,$FRAME+15
+ mtctr $rounds
+
+Load_cbc_dec_key:
+ ?vperm v24,v30,v31,$keyperm
+ lvx v30,$x10,$key
+ addi $key,$key,0x20
+ stvx v24,$x00,$key_ # off-load round[1]
+ ?vperm v25,v31,v30,$keyperm
+ lvx v31,$x00,$key
+ stvx v25,$x10,$key_ # off-load round[2]
+ addi $key_,$key_,0x20
+ bdnz Load_cbc_dec_key
+
+ lvx v26,$x10,$key
+ ?vperm v24,v30,v31,$keyperm
+ lvx v27,$x20,$key
+ stvx v24,$x00,$key_ # off-load round[3]
+ ?vperm v25,v31,v26,$keyperm
+ lvx v28,$x30,$key
+ stvx v25,$x10,$key_ # off-load round[4]
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ ?vperm v26,v26,v27,$keyperm
+ lvx v29,$x40,$key
+ ?vperm v27,v27,v28,$keyperm
+ lvx v30,$x50,$key
+ ?vperm v28,v28,v29,$keyperm
+ lvx v31,$x60,$key
+ ?vperm v29,v29,v30,$keyperm
+ lvx $out0,$x70,$key # borrow $out0
+ ?vperm v30,v30,v31,$keyperm
+ lvx v24,$x00,$key_ # pre-load round[1]
+ ?vperm v31,v31,$out0,$keyperm
+ lvx v25,$x10,$key_ # pre-load round[2]
+
+ #lvx $inptail,0,$inp # "caller" already did this
+ #addi $inp,$inp,15 # 15 is not typo
+ subi $inp,$inp,15 # undo "caller"
+
+ le?li $idx,8
+ lvx_u $in0,$x00,$inp # load first 8 "words"
+ le?lvsl $inpperm,0,$idx
+ le?vspltisb $tmp,0x0f
+ lvx_u $in1,$x10,$inp
+ le?vxor $inpperm,$inpperm,$tmp # transform for lvx_u/stvx_u
+ lvx_u $in2,$x20,$inp
+ le?vperm $in0,$in0,$in0,$inpperm
+ lvx_u $in3,$x30,$inp
+ le?vperm $in1,$in1,$in1,$inpperm
+ lvx_u $in4,$x40,$inp
+ le?vperm $in2,$in2,$in2,$inpperm
+ vxor $out0,$in0,$rndkey0
+ lvx_u $in5,$x50,$inp
+ le?vperm $in3,$in3,$in3,$inpperm
+ vxor $out1,$in1,$rndkey0
+ lvx_u $in6,$x60,$inp
+ le?vperm $in4,$in4,$in4,$inpperm
+ vxor $out2,$in2,$rndkey0
+ lvx_u $in7,$x70,$inp
+ addi $inp,$inp,0x80
+ le?vperm $in5,$in5,$in5,$inpperm
+ vxor $out3,$in3,$rndkey0
+ le?vperm $in6,$in6,$in6,$inpperm
+ vxor $out4,$in4,$rndkey0
+ le?vperm $in7,$in7,$in7,$inpperm
+ vxor $out5,$in5,$rndkey0
+ vxor $out6,$in6,$rndkey0
+ vxor $out7,$in7,$rndkey0
+
+ mtctr $rounds
+ b Loop_cbc_dec8x
+.align 5
+Loop_cbc_dec8x:
+ vncipher $out0,$out0,v24
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+ vncipher $out5,$out5,v24
+ vncipher $out6,$out6,v24
+ vncipher $out7,$out7,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vncipher $out0,$out0,v25
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ vncipher $out5,$out5,v25
+ vncipher $out6,$out6,v25
+ vncipher $out7,$out7,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_cbc_dec8x
+
+ subic $len,$len,128 # $len-=128
+ vncipher $out0,$out0,v24
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+ vncipher $out5,$out5,v24
+ vncipher $out6,$out6,v24
+ vncipher $out7,$out7,v24
+
+ subfe. r0,r0,r0 # borrow?-1:0
+ vncipher $out0,$out0,v25
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ vncipher $out5,$out5,v25
+ vncipher $out6,$out6,v25
+ vncipher $out7,$out7,v25
+
+ and r0,r0,$len
+ vncipher $out0,$out0,v26
+ vncipher $out1,$out1,v26
+ vncipher $out2,$out2,v26
+ vncipher $out3,$out3,v26
+ vncipher $out4,$out4,v26
+ vncipher $out5,$out5,v26
+ vncipher $out6,$out6,v26
+ vncipher $out7,$out7,v26
+
+ add $inp,$inp,r0 # $inp is adjusted in such
+ # way that at exit from the
+ # loop inX-in7 are loaded
+ # with last "words"
+ vncipher $out0,$out0,v27
+ vncipher $out1,$out1,v27
+ vncipher $out2,$out2,v27
+ vncipher $out3,$out3,v27
+ vncipher $out4,$out4,v27
+ vncipher $out5,$out5,v27
+ vncipher $out6,$out6,v27
+ vncipher $out7,$out7,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vncipher $out0,$out0,v28
+ vncipher $out1,$out1,v28
+ vncipher $out2,$out2,v28
+ vncipher $out3,$out3,v28
+ vncipher $out4,$out4,v28
+ vncipher $out5,$out5,v28
+ vncipher $out6,$out6,v28
+ vncipher $out7,$out7,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+
+ vncipher $out0,$out0,v29
+ vncipher $out1,$out1,v29
+ vncipher $out2,$out2,v29
+ vncipher $out3,$out3,v29
+ vncipher $out4,$out4,v29
+ vncipher $out5,$out5,v29
+ vncipher $out6,$out6,v29
+ vncipher $out7,$out7,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+
+ vncipher $out0,$out0,v30
+ vxor $ivec,$ivec,v31 # xor with last round key
+ vncipher $out1,$out1,v30
+ vxor $in0,$in0,v31
+ vncipher $out2,$out2,v30
+ vxor $in1,$in1,v31
+ vncipher $out3,$out3,v30
+ vxor $in2,$in2,v31
+ vncipher $out4,$out4,v30
+ vxor $in3,$in3,v31
+ vncipher $out5,$out5,v30
+ vxor $in4,$in4,v31
+ vncipher $out6,$out6,v30
+ vxor $in5,$in5,v31
+ vncipher $out7,$out7,v30
+ vxor $in6,$in6,v31
+
+ vncipherlast $out0,$out0,$ivec
+ vncipherlast $out1,$out1,$in0
+ lvx_u $in0,$x00,$inp # load next input block
+ vncipherlast $out2,$out2,$in1
+ lvx_u $in1,$x10,$inp
+ vncipherlast $out3,$out3,$in2
+ le?vperm $in0,$in0,$in0,$inpperm
+ lvx_u $in2,$x20,$inp
+ vncipherlast $out4,$out4,$in3
+ le?vperm $in1,$in1,$in1,$inpperm
+ lvx_u $in3,$x30,$inp
+ vncipherlast $out5,$out5,$in4
+ le?vperm $in2,$in2,$in2,$inpperm
+ lvx_u $in4,$x40,$inp
+ vncipherlast $out6,$out6,$in5
+ le?vperm $in3,$in3,$in3,$inpperm
+ lvx_u $in5,$x50,$inp
+ vncipherlast $out7,$out7,$in6
+ le?vperm $in4,$in4,$in4,$inpperm
+ lvx_u $in6,$x60,$inp
+ vmr $ivec,$in7
+ le?vperm $in5,$in5,$in5,$inpperm
+ lvx_u $in7,$x70,$inp
+ addi $inp,$inp,0x80
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ le?vperm $out1,$out1,$out1,$inpperm
+ stvx_u $out0,$x00,$out
+ le?vperm $in6,$in6,$in6,$inpperm
+ vxor $out0,$in0,$rndkey0
+ le?vperm $out2,$out2,$out2,$inpperm
+ stvx_u $out1,$x10,$out
+ le?vperm $in7,$in7,$in7,$inpperm
+ vxor $out1,$in1,$rndkey0
+ le?vperm $out3,$out3,$out3,$inpperm
+ stvx_u $out2,$x20,$out
+ vxor $out2,$in2,$rndkey0
+ le?vperm $out4,$out4,$out4,$inpperm
+ stvx_u $out3,$x30,$out
+ vxor $out3,$in3,$rndkey0
+ le?vperm $out5,$out5,$out5,$inpperm
+ stvx_u $out4,$x40,$out
+ vxor $out4,$in4,$rndkey0
+ le?vperm $out6,$out6,$out6,$inpperm
+ stvx_u $out5,$x50,$out
+ vxor $out5,$in5,$rndkey0
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out6,$x60,$out
+ vxor $out6,$in6,$rndkey0
+ stvx_u $out7,$x70,$out
+ addi $out,$out,0x80
+ vxor $out7,$in7,$rndkey0
+
+ mtctr $rounds
+ beq Loop_cbc_dec8x # did $len-=128 borrow?
+
+ addic. $len,$len,128
+ beq Lcbc_dec8x_done
+ nop
+ nop
+
+Loop_cbc_dec8x_tail: # up to 7 "words" tail...
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+ vncipher $out5,$out5,v24
+ vncipher $out6,$out6,v24
+ vncipher $out7,$out7,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ vncipher $out5,$out5,v25
+ vncipher $out6,$out6,v25
+ vncipher $out7,$out7,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_cbc_dec8x_tail
+
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+ vncipher $out5,$out5,v24
+ vncipher $out6,$out6,v24
+ vncipher $out7,$out7,v24
+
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ vncipher $out5,$out5,v25
+ vncipher $out6,$out6,v25
+ vncipher $out7,$out7,v25
+
+ vncipher $out1,$out1,v26
+ vncipher $out2,$out2,v26
+ vncipher $out3,$out3,v26
+ vncipher $out4,$out4,v26
+ vncipher $out5,$out5,v26
+ vncipher $out6,$out6,v26
+ vncipher $out7,$out7,v26
+
+ vncipher $out1,$out1,v27
+ vncipher $out2,$out2,v27
+ vncipher $out3,$out3,v27
+ vncipher $out4,$out4,v27
+ vncipher $out5,$out5,v27
+ vncipher $out6,$out6,v27
+ vncipher $out7,$out7,v27
+
+ vncipher $out1,$out1,v28
+ vncipher $out2,$out2,v28
+ vncipher $out3,$out3,v28
+ vncipher $out4,$out4,v28
+ vncipher $out5,$out5,v28
+ vncipher $out6,$out6,v28
+ vncipher $out7,$out7,v28
+
+ vncipher $out1,$out1,v29
+ vncipher $out2,$out2,v29
+ vncipher $out3,$out3,v29
+ vncipher $out4,$out4,v29
+ vncipher $out5,$out5,v29
+ vncipher $out6,$out6,v29
+ vncipher $out7,$out7,v29
+
+ vncipher $out1,$out1,v30
+ vxor $ivec,$ivec,v31 # last round key
+ vncipher $out2,$out2,v30
+ vxor $in1,$in1,v31
+ vncipher $out3,$out3,v30
+ vxor $in2,$in2,v31
+ vncipher $out4,$out4,v30
+ vxor $in3,$in3,v31
+ vncipher $out5,$out5,v30
+ vxor $in4,$in4,v31
+ vncipher $out6,$out6,v30
+ vxor $in5,$in5,v31
+ vncipher $out7,$out7,v30
+ vxor $in6,$in6,v31
+
+ cmplwi $len,32 # switch($len)
+ blt Lcbc_dec8x_one
+ nop
+ beq Lcbc_dec8x_two
+ cmplwi $len,64
+ blt Lcbc_dec8x_three
+ nop
+ beq Lcbc_dec8x_four
+ cmplwi $len,96
+ blt Lcbc_dec8x_five
+ nop
+ beq Lcbc_dec8x_six
+
+Lcbc_dec8x_seven:
+ vncipherlast $out1,$out1,$ivec
+ vncipherlast $out2,$out2,$in1
+ vncipherlast $out3,$out3,$in2
+ vncipherlast $out4,$out4,$in3
+ vncipherlast $out5,$out5,$in4
+ vncipherlast $out6,$out6,$in5
+ vncipherlast $out7,$out7,$in6
+ vmr $ivec,$in7
+
+ le?vperm $out1,$out1,$out1,$inpperm
+ le?vperm $out2,$out2,$out2,$inpperm
+ stvx_u $out1,$x00,$out
+ le?vperm $out3,$out3,$out3,$inpperm
+ stvx_u $out2,$x10,$out
+ le?vperm $out4,$out4,$out4,$inpperm
+ stvx_u $out3,$x20,$out
+ le?vperm $out5,$out5,$out5,$inpperm
+ stvx_u $out4,$x30,$out
+ le?vperm $out6,$out6,$out6,$inpperm
+ stvx_u $out5,$x40,$out
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out6,$x50,$out
+ stvx_u $out7,$x60,$out
+ addi $out,$out,0x70
+ b Lcbc_dec8x_done
+
+.align 5
+Lcbc_dec8x_six:
+ vncipherlast $out2,$out2,$ivec
+ vncipherlast $out3,$out3,$in2
+ vncipherlast $out4,$out4,$in3
+ vncipherlast $out5,$out5,$in4
+ vncipherlast $out6,$out6,$in5
+ vncipherlast $out7,$out7,$in6
+ vmr $ivec,$in7
+
+ le?vperm $out2,$out2,$out2,$inpperm
+ le?vperm $out3,$out3,$out3,$inpperm
+ stvx_u $out2,$x00,$out
+ le?vperm $out4,$out4,$out4,$inpperm
+ stvx_u $out3,$x10,$out
+ le?vperm $out5,$out5,$out5,$inpperm
+ stvx_u $out4,$x20,$out
+ le?vperm $out6,$out6,$out6,$inpperm
+ stvx_u $out5,$x30,$out
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out6,$x40,$out
+ stvx_u $out7,$x50,$out
+ addi $out,$out,0x60
+ b Lcbc_dec8x_done
+
+.align 5
+Lcbc_dec8x_five:
+ vncipherlast $out3,$out3,$ivec
+ vncipherlast $out4,$out4,$in3
+ vncipherlast $out5,$out5,$in4
+ vncipherlast $out6,$out6,$in5
+ vncipherlast $out7,$out7,$in6
+ vmr $ivec,$in7
+
+ le?vperm $out3,$out3,$out3,$inpperm
+ le?vperm $out4,$out4,$out4,$inpperm
+ stvx_u $out3,$x00,$out
+ le?vperm $out5,$out5,$out5,$inpperm
+ stvx_u $out4,$x10,$out
+ le?vperm $out6,$out6,$out6,$inpperm
+ stvx_u $out5,$x20,$out
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out6,$x30,$out
+ stvx_u $out7,$x40,$out
+ addi $out,$out,0x50
+ b Lcbc_dec8x_done
+
+.align 5
+Lcbc_dec8x_four:
+ vncipherlast $out4,$out4,$ivec
+ vncipherlast $out5,$out5,$in4
+ vncipherlast $out6,$out6,$in5
+ vncipherlast $out7,$out7,$in6
+ vmr $ivec,$in7
+
+ le?vperm $out4,$out4,$out4,$inpperm
+ le?vperm $out5,$out5,$out5,$inpperm
+ stvx_u $out4,$x00,$out
+ le?vperm $out6,$out6,$out6,$inpperm
+ stvx_u $out5,$x10,$out
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out6,$x20,$out
+ stvx_u $out7,$x30,$out
+ addi $out,$out,0x40
+ b Lcbc_dec8x_done
+
+.align 5
+Lcbc_dec8x_three:
+ vncipherlast $out5,$out5,$ivec
+ vncipherlast $out6,$out6,$in5
+ vncipherlast $out7,$out7,$in6
+ vmr $ivec,$in7
+
+ le?vperm $out5,$out5,$out5,$inpperm
+ le?vperm $out6,$out6,$out6,$inpperm
+ stvx_u $out5,$x00,$out
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out6,$x10,$out
+ stvx_u $out7,$x20,$out
+ addi $out,$out,0x30
+ b Lcbc_dec8x_done
+
+.align 5
+Lcbc_dec8x_two:
+ vncipherlast $out6,$out6,$ivec
+ vncipherlast $out7,$out7,$in6
+ vmr $ivec,$in7
+
+ le?vperm $out6,$out6,$out6,$inpperm
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out6,$x00,$out
+ stvx_u $out7,$x10,$out
+ addi $out,$out,0x20
+ b Lcbc_dec8x_done
+
+.align 5
+Lcbc_dec8x_one:
+ vncipherlast $out7,$out7,$ivec
+ vmr $ivec,$in7
+
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out7,0,$out
+ addi $out,$out,0x10
+
+Lcbc_dec8x_done:
+ le?vperm $ivec,$ivec,$ivec,$inpperm
+ stvx_u $ivec,0,$ivp # write [unaligned] iv
+
+ li r10,`$FRAME+15`
+ li r11,`$FRAME+31`
+ stvx $inpperm,r10,$sp # wipe copies of round keys
+ addi r10,r10,32
+ stvx $inpperm,r11,$sp
+ addi r11,r11,32
+ stvx $inpperm,r10,$sp
+ addi r10,r10,32
+ stvx $inpperm,r11,$sp
+ addi r11,r11,32
+ stvx $inpperm,r10,$sp
+ addi r10,r10,32
+ stvx $inpperm,r11,$sp
+ addi r11,r11,32
+ stvx $inpperm,r10,$sp
+ addi r10,r10,32
+ stvx $inpperm,r11,$sp
+ addi r11,r11,32
+
+ mtspr 256,$vrsave
+ lvx v20,r10,$sp # ABI says so
+ addi r10,r10,32
+ lvx v21,r11,$sp
+ addi r11,r11,32
+ lvx v22,r10,$sp
+ addi r10,r10,32
+ lvx v23,r11,$sp
+ addi r11,r11,32
+ lvx v24,r10,$sp
+ addi r10,r10,32
+ lvx v25,r11,$sp
+ addi r11,r11,32
+ lvx v26,r10,$sp
+ addi r10,r10,32
+ lvx v27,r11,$sp
+ addi r11,r11,32
+ lvx v28,r10,$sp
+ addi r10,r10,32
+ lvx v29,r11,$sp
+ addi r11,r11,32
+ lvx v30,r10,$sp
+ lvx v31,r11,$sp
+ $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0x80,6,6,0
+ .long 0
+.size .${prefix}_cbc_encrypt,.-.${prefix}_cbc_encrypt
+___
+}} }}}
+
+#########################################################################
+{{{ # CTR procedure[s] #
+
+####################### WARNING: Here be dragons! #######################
+#
+# This code is written as 'ctr32', based on a 32-bit counter used
+# upstream. The kernel does *not* use a 32-bit counter. The kernel uses
+# a 128-bit counter.
+#
+# This leads to subtle changes from the upstream code: the counter
+# is incremented with vaddu_q_m rather than vaddu_w_m. This occurs in
+# both the bulk (8 blocks at a time) path, and in the individual block
+# path. Be aware of this when doing updates.
+#
+# See:
+# 1d4aa0b4c181 ("crypto: vmx - Fixing AES-CTR counter bug")
+# 009b30ac7444 ("crypto: vmx - CTR: always increment IV as quadword")
+# https://github.com/openssl/openssl/pull/8942
+#
+#########################################################################
+my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10));
+my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3));
+my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)=
+ map("v$_",(4..11));
+my $dat=$tmp;
+
+$code.=<<___;
+.globl .${prefix}_ctr32_encrypt_blocks
+ ${UCMP}i $len,1
+ bltlr-
+
+ lis r0,0xfff0
+ mfspr $vrsave,256
+ mtspr 256,r0
+
+ li $idx,15
+ vxor $rndkey0,$rndkey0,$rndkey0
+ le?vspltisb $tmp,0x0f
+
+ lvx $ivec,0,$ivp # load [unaligned] iv
+ lvsl $inpperm,0,$ivp
+ lvx $inptail,$idx,$ivp
+ vspltisb $one,1
+ le?vxor $inpperm,$inpperm,$tmp
+ vperm $ivec,$ivec,$inptail,$inpperm
+ vsldoi $one,$rndkey0,$one,1
+
+ neg r11,$inp
+ ?lvsl $keyperm,0,$key # prepare for unaligned key
+ lwz $rounds,240($key)
+
+ lvsr $inpperm,0,r11 # prepare for unaligned load
+ lvx $inptail,0,$inp
+ addi $inp,$inp,15 # 15 is not typo
+ le?vxor $inpperm,$inpperm,$tmp
+
+ srwi $rounds,$rounds,1
+ li $idx,16
+ subi $rounds,$rounds,1
+
+ ${UCMP}i $len,8
+ bge _aesp8_ctr32_encrypt8x
+
+ ?lvsr $outperm,0,$out # prepare for unaligned store
+ vspltisb $outmask,-1
+ lvx $outhead,0,$out
+ ?vperm $outmask,$rndkey0,$outmask,$outperm
+ le?vxor $outperm,$outperm,$tmp
+
+ lvx $rndkey0,0,$key
+ mtctr $rounds
+ lvx $rndkey1,$idx,$key
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$ivec,$rndkey0
+ lvx $rndkey0,$idx,$key
+ addi $idx,$idx,16
+ b Loop_ctr32_enc
+
+.align 5
+Loop_ctr32_enc:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipher $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key
+ addi $idx,$idx,16
+ bdnz Loop_ctr32_enc
+
+ vadduqm $ivec,$ivec,$one # Kernel change for 128-bit
+ vmr $dat,$inptail
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+ subic. $len,$len,1 # blocks--
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key
+ vperm $dat,$dat,$inptail,$inpperm
+ li $idx,16
+ ?vperm $rndkey1,$rndkey0,$rndkey1,$keyperm
+ lvx $rndkey0,0,$key
+ vxor $dat,$dat,$rndkey1 # last round key
+ vcipherlast $inout,$inout,$dat
+
+ lvx $rndkey1,$idx,$key
+ addi $idx,$idx,16
+ vperm $inout,$inout,$inout,$outperm
+ vsel $dat,$outhead,$inout,$outmask
+ mtctr $rounds
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vmr $outhead,$inout
+ vxor $inout,$ivec,$rndkey0
+ lvx $rndkey0,$idx,$key
+ addi $idx,$idx,16
+ stvx $dat,0,$out
+ addi $out,$out,16
+ bne Loop_ctr32_enc
+
+ addi $out,$out,-1
+ lvx $inout,0,$out # redundant in aligned case
+ vsel $inout,$outhead,$inout,$outmask
+ stvx $inout,0,$out
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,6,0
+ .long 0
+___
+#########################################################################
+{{ # Optimized CTR procedure #
+my $key_="r11";
+my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31));
+my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10,12..14));
+my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(15..22));
+my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
+ # v26-v31 last 6 round keys
+my ($tmp,$keyperm)=($in3,$in4); # aliases with "caller", redundant assignment
+my ($two,$three,$four)=($outhead,$outperm,$outmask);
+
+$code.=<<___;
+.align 5
+_aesp8_ctr32_encrypt8x:
+ $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+ li r10,`$FRAME+8*16+15`
+ li r11,`$FRAME+8*16+31`
+ stvx v20,r10,$sp # ABI says so
+ addi r10,r10,32
+ stvx v21,r11,$sp
+ addi r11,r11,32
+ stvx v22,r10,$sp
+ addi r10,r10,32
+ stvx v23,r11,$sp
+ addi r11,r11,32
+ stvx v24,r10,$sp
+ addi r10,r10,32
+ stvx v25,r11,$sp
+ addi r11,r11,32
+ stvx v26,r10,$sp
+ addi r10,r10,32
+ stvx v27,r11,$sp
+ addi r11,r11,32
+ stvx v28,r10,$sp
+ addi r10,r10,32
+ stvx v29,r11,$sp
+ addi r11,r11,32
+ stvx v30,r10,$sp
+ stvx v31,r11,$sp
+ li r0,-1
+ stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
+ li $x10,0x10
+ $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ li $x20,0x20
+ $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ li $x30,0x30
+ $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ li $x40,0x40
+ $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ li $x50,0x50
+ $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ li $x60,0x60
+ $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ li $x70,0x70
+ mtspr 256,r0
+
+ subi $rounds,$rounds,3 # -4 in total
+
+ lvx $rndkey0,$x00,$key # load key schedule
+ lvx v30,$x10,$key
+ addi $key,$key,0x20
+ lvx v31,$x00,$key
+ ?vperm $rndkey0,$rndkey0,v30,$keyperm
+ addi $key_,$sp,$FRAME+15
+ mtctr $rounds
+
+Load_ctr32_enc_key:
+ ?vperm v24,v30,v31,$keyperm
+ lvx v30,$x10,$key
+ addi $key,$key,0x20
+ stvx v24,$x00,$key_ # off-load round[1]
+ ?vperm v25,v31,v30,$keyperm
+ lvx v31,$x00,$key
+ stvx v25,$x10,$key_ # off-load round[2]
+ addi $key_,$key_,0x20
+ bdnz Load_ctr32_enc_key
+
+ lvx v26,$x10,$key
+ ?vperm v24,v30,v31,$keyperm
+ lvx v27,$x20,$key
+ stvx v24,$x00,$key_ # off-load round[3]
+ ?vperm v25,v31,v26,$keyperm
+ lvx v28,$x30,$key
+ stvx v25,$x10,$key_ # off-load round[4]
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ ?vperm v26,v26,v27,$keyperm
+ lvx v29,$x40,$key
+ ?vperm v27,v27,v28,$keyperm
+ lvx v30,$x50,$key
+ ?vperm v28,v28,v29,$keyperm
+ lvx v31,$x60,$key
+ ?vperm v29,v29,v30,$keyperm
+ lvx $out0,$x70,$key # borrow $out0
+ ?vperm v30,v30,v31,$keyperm
+ lvx v24,$x00,$key_ # pre-load round[1]
+ ?vperm v31,v31,$out0,$keyperm
+ lvx v25,$x10,$key_ # pre-load round[2]
+
+ vadduqm $two,$one,$one
+ subi $inp,$inp,15 # undo "caller"
+ $SHL $len,$len,4
+
+ vadduqm $out1,$ivec,$one # counter values ...
+ vadduqm $out2,$ivec,$two # (do all ctr adds as 128-bit)
+ vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0]
+ le?li $idx,8
+ vadduqm $out3,$out1,$two
+ vxor $out1,$out1,$rndkey0
+ le?lvsl $inpperm,0,$idx
+ vadduqm $out4,$out2,$two
+ vxor $out2,$out2,$rndkey0
+ le?vspltisb $tmp,0x0f
+ vadduqm $out5,$out3,$two
+ vxor $out3,$out3,$rndkey0
+ le?vxor $inpperm,$inpperm,$tmp # transform for lvx_u/stvx_u
+ vadduqm $out6,$out4,$two
+ vxor $out4,$out4,$rndkey0
+ vadduqm $out7,$out5,$two
+ vxor $out5,$out5,$rndkey0
+ vadduqm $ivec,$out6,$two # next counter value
+ vxor $out6,$out6,$rndkey0
+ vxor $out7,$out7,$rndkey0
+
+ mtctr $rounds
+ b Loop_ctr32_enc8x
+.align 5
+Loop_ctr32_enc8x:
+ vcipher $out0,$out0,v24
+ vcipher $out1,$out1,v24
+ vcipher $out2,$out2,v24
+ vcipher $out3,$out3,v24
+ vcipher $out4,$out4,v24
+ vcipher $out5,$out5,v24
+ vcipher $out6,$out6,v24
+ vcipher $out7,$out7,v24
+Loop_ctr32_enc8x_middle:
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vcipher $out0,$out0,v25
+ vcipher $out1,$out1,v25
+ vcipher $out2,$out2,v25
+ vcipher $out3,$out3,v25
+ vcipher $out4,$out4,v25
+ vcipher $out5,$out5,v25
+ vcipher $out6,$out6,v25
+ vcipher $out7,$out7,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_ctr32_enc8x
+
+ subic r11,$len,256 # $len-256, borrow $key_
+ vcipher $out0,$out0,v24
+ vcipher $out1,$out1,v24
+ vcipher $out2,$out2,v24
+ vcipher $out3,$out3,v24
+ vcipher $out4,$out4,v24
+ vcipher $out5,$out5,v24
+ vcipher $out6,$out6,v24
+ vcipher $out7,$out7,v24
+
+ subfe r0,r0,r0 # borrow?-1:0
+ vcipher $out0,$out0,v25
+ vcipher $out1,$out1,v25
+ vcipher $out2,$out2,v25
+ vcipher $out3,$out3,v25
+ vcipher $out4,$out4,v25
+ vcipher $out5,$out5,v25
+ vcipher $out6,$out6,v25
+ vcipher $out7,$out7,v25
+
+ and r0,r0,r11
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vcipher $out0,$out0,v26
+ vcipher $out1,$out1,v26
+ vcipher $out2,$out2,v26
+ vcipher $out3,$out3,v26
+ vcipher $out4,$out4,v26
+ vcipher $out5,$out5,v26
+ vcipher $out6,$out6,v26
+ vcipher $out7,$out7,v26
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+
+ subic $len,$len,129 # $len-=129
+ vcipher $out0,$out0,v27
+ addi $len,$len,1 # $len-=128 really
+ vcipher $out1,$out1,v27
+ vcipher $out2,$out2,v27
+ vcipher $out3,$out3,v27
+ vcipher $out4,$out4,v27
+ vcipher $out5,$out5,v27
+ vcipher $out6,$out6,v27
+ vcipher $out7,$out7,v27
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+
+ vcipher $out0,$out0,v28
+ lvx_u $in0,$x00,$inp # load input
+ vcipher $out1,$out1,v28
+ lvx_u $in1,$x10,$inp
+ vcipher $out2,$out2,v28
+ lvx_u $in2,$x20,$inp
+ vcipher $out3,$out3,v28
+ lvx_u $in3,$x30,$inp
+ vcipher $out4,$out4,v28
+ lvx_u $in4,$x40,$inp
+ vcipher $out5,$out5,v28
+ lvx_u $in5,$x50,$inp
+ vcipher $out6,$out6,v28
+ lvx_u $in6,$x60,$inp
+ vcipher $out7,$out7,v28
+ lvx_u $in7,$x70,$inp
+ addi $inp,$inp,0x80
+
+ vcipher $out0,$out0,v29
+ le?vperm $in0,$in0,$in0,$inpperm
+ vcipher $out1,$out1,v29
+ le?vperm $in1,$in1,$in1,$inpperm
+ vcipher $out2,$out2,v29
+ le?vperm $in2,$in2,$in2,$inpperm
+ vcipher $out3,$out3,v29
+ le?vperm $in3,$in3,$in3,$inpperm
+ vcipher $out4,$out4,v29
+ le?vperm $in4,$in4,$in4,$inpperm
+ vcipher $out5,$out5,v29
+ le?vperm $in5,$in5,$in5,$inpperm
+ vcipher $out6,$out6,v29
+ le?vperm $in6,$in6,$in6,$inpperm
+ vcipher $out7,$out7,v29
+ le?vperm $in7,$in7,$in7,$inpperm
+
+ add $inp,$inp,r0 # $inp is adjusted in such
+ # way that at exit from the
+ # loop inX-in7 are loaded
+ # with last "words"
+ subfe. r0,r0,r0 # borrow?-1:0
+ vcipher $out0,$out0,v30
+ vxor $in0,$in0,v31 # xor with last round key
+ vcipher $out1,$out1,v30
+ vxor $in1,$in1,v31
+ vcipher $out2,$out2,v30
+ vxor $in2,$in2,v31
+ vcipher $out3,$out3,v30
+ vxor $in3,$in3,v31
+ vcipher $out4,$out4,v30
+ vxor $in4,$in4,v31
+ vcipher $out5,$out5,v30
+ vxor $in5,$in5,v31
+ vcipher $out6,$out6,v30
+ vxor $in6,$in6,v31
+ vcipher $out7,$out7,v30
+ vxor $in7,$in7,v31
+
+ bne Lctr32_enc8x_break # did $len-129 borrow?
+
+ vcipherlast $in0,$out0,$in0
+ vcipherlast $in1,$out1,$in1
+ vadduqm $out1,$ivec,$one # counter values ...
+ vcipherlast $in2,$out2,$in2
+ vadduqm $out2,$ivec,$two
+ vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0]
+ vcipherlast $in3,$out3,$in3
+ vadduqm $out3,$out1,$two
+ vxor $out1,$out1,$rndkey0
+ vcipherlast $in4,$out4,$in4
+ vadduqm $out4,$out2,$two
+ vxor $out2,$out2,$rndkey0
+ vcipherlast $in5,$out5,$in5
+ vadduqm $out5,$out3,$two
+ vxor $out3,$out3,$rndkey0
+ vcipherlast $in6,$out6,$in6
+ vadduqm $out6,$out4,$two
+ vxor $out4,$out4,$rndkey0
+ vcipherlast $in7,$out7,$in7
+ vadduqm $out7,$out5,$two
+ vxor $out5,$out5,$rndkey0
+ le?vperm $in0,$in0,$in0,$inpperm
+ vadduqm $ivec,$out6,$two # next counter value
+ vxor $out6,$out6,$rndkey0
+ le?vperm $in1,$in1,$in1,$inpperm
+ vxor $out7,$out7,$rndkey0
+ mtctr $rounds
+
+ vcipher $out0,$out0,v24
+ stvx_u $in0,$x00,$out
+ le?vperm $in2,$in2,$in2,$inpperm
+ vcipher $out1,$out1,v24
+ stvx_u $in1,$x10,$out
+ le?vperm $in3,$in3,$in3,$inpperm
+ vcipher $out2,$out2,v24
+ stvx_u $in2,$x20,$out
+ le?vperm $in4,$in4,$in4,$inpperm
+ vcipher $out3,$out3,v24
+ stvx_u $in3,$x30,$out
+ le?vperm $in5,$in5,$in5,$inpperm
+ vcipher $out4,$out4,v24
+ stvx_u $in4,$x40,$out
+ le?vperm $in6,$in6,$in6,$inpperm
+ vcipher $out5,$out5,v24
+ stvx_u $in5,$x50,$out
+ le?vperm $in7,$in7,$in7,$inpperm
+ vcipher $out6,$out6,v24
+ stvx_u $in6,$x60,$out
+ vcipher $out7,$out7,v24
+ stvx_u $in7,$x70,$out
+ addi $out,$out,0x80
+
+ b Loop_ctr32_enc8x_middle
+
+.align 5
+Lctr32_enc8x_break:
+ cmpwi $len,-0x60
+ blt Lctr32_enc8x_one
+ nop
+ beq Lctr32_enc8x_two
+ cmpwi $len,-0x40
+ blt Lctr32_enc8x_three
+ nop
+ beq Lctr32_enc8x_four
+ cmpwi $len,-0x20
+ blt Lctr32_enc8x_five
+ nop
+ beq Lctr32_enc8x_six
+ cmpwi $len,0x00
+ blt Lctr32_enc8x_seven
+
+Lctr32_enc8x_eight:
+ vcipherlast $out0,$out0,$in0
+ vcipherlast $out1,$out1,$in1
+ vcipherlast $out2,$out2,$in2
+ vcipherlast $out3,$out3,$in3
+ vcipherlast $out4,$out4,$in4
+ vcipherlast $out5,$out5,$in5
+ vcipherlast $out6,$out6,$in6
+ vcipherlast $out7,$out7,$in7
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ le?vperm $out1,$out1,$out1,$inpperm
+ stvx_u $out0,$x00,$out
+ le?vperm $out2,$out2,$out2,$inpperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$inpperm
+ stvx_u $out2,$x20,$out
+ le?vperm $out4,$out4,$out4,$inpperm
+ stvx_u $out3,$x30,$out
+ le?vperm $out5,$out5,$out5,$inpperm
+ stvx_u $out4,$x40,$out
+ le?vperm $out6,$out6,$out6,$inpperm
+ stvx_u $out5,$x50,$out
+ le?vperm $out7,$out7,$out7,$inpperm
+ stvx_u $out6,$x60,$out
+ stvx_u $out7,$x70,$out
+ addi $out,$out,0x80
+ b Lctr32_enc8x_done
+
+.align 5
+Lctr32_enc8x_seven:
+ vcipherlast $out0,$out0,$in1
+ vcipherlast $out1,$out1,$in2
+ vcipherlast $out2,$out2,$in3
+ vcipherlast $out3,$out3,$in4
+ vcipherlast $out4,$out4,$in5
+ vcipherlast $out5,$out5,$in6
+ vcipherlast $out6,$out6,$in7
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ le?vperm $out1,$out1,$out1,$inpperm
+ stvx_u $out0,$x00,$out
+ le?vperm $out2,$out2,$out2,$inpperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$inpperm
+ stvx_u $out2,$x20,$out
+ le?vperm $out4,$out4,$out4,$inpperm
+ stvx_u $out3,$x30,$out
+ le?vperm $out5,$out5,$out5,$inpperm
+ stvx_u $out4,$x40,$out
+ le?vperm $out6,$out6,$out6,$inpperm
+ stvx_u $out5,$x50,$out
+ stvx_u $out6,$x60,$out
+ addi $out,$out,0x70
+ b Lctr32_enc8x_done
+
+.align 5
+Lctr32_enc8x_six:
+ vcipherlast $out0,$out0,$in2
+ vcipherlast $out1,$out1,$in3
+ vcipherlast $out2,$out2,$in4
+ vcipherlast $out3,$out3,$in5
+ vcipherlast $out4,$out4,$in6
+ vcipherlast $out5,$out5,$in7
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ le?vperm $out1,$out1,$out1,$inpperm
+ stvx_u $out0,$x00,$out
+ le?vperm $out2,$out2,$out2,$inpperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$inpperm
+ stvx_u $out2,$x20,$out
+ le?vperm $out4,$out4,$out4,$inpperm
+ stvx_u $out3,$x30,$out
+ le?vperm $out5,$out5,$out5,$inpperm
+ stvx_u $out4,$x40,$out
+ stvx_u $out5,$x50,$out
+ addi $out,$out,0x60
+ b Lctr32_enc8x_done
+
+.align 5
+Lctr32_enc8x_five:
+ vcipherlast $out0,$out0,$in3
+ vcipherlast $out1,$out1,$in4
+ vcipherlast $out2,$out2,$in5
+ vcipherlast $out3,$out3,$in6
+ vcipherlast $out4,$out4,$in7
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ le?vperm $out1,$out1,$out1,$inpperm
+ stvx_u $out0,$x00,$out
+ le?vperm $out2,$out2,$out2,$inpperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$inpperm
+ stvx_u $out2,$x20,$out
+ le?vperm $out4,$out4,$out4,$inpperm
+ stvx_u $out3,$x30,$out
+ stvx_u $out4,$x40,$out
+ addi $out,$out,0x50
+ b Lctr32_enc8x_done
+
+.align 5
+Lctr32_enc8x_four:
+ vcipherlast $out0,$out0,$in4
+ vcipherlast $out1,$out1,$in5
+ vcipherlast $out2,$out2,$in6
+ vcipherlast $out3,$out3,$in7
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ le?vperm $out1,$out1,$out1,$inpperm
+ stvx_u $out0,$x00,$out
+ le?vperm $out2,$out2,$out2,$inpperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$inpperm
+ stvx_u $out2,$x20,$out
+ stvx_u $out3,$x30,$out
+ addi $out,$out,0x40
+ b Lctr32_enc8x_done
+
+.align 5
+Lctr32_enc8x_three:
+ vcipherlast $out0,$out0,$in5
+ vcipherlast $out1,$out1,$in6
+ vcipherlast $out2,$out2,$in7
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ le?vperm $out1,$out1,$out1,$inpperm
+ stvx_u $out0,$x00,$out
+ le?vperm $out2,$out2,$out2,$inpperm
+ stvx_u $out1,$x10,$out
+ stvx_u $out2,$x20,$out
+ addi $out,$out,0x30
+ b Lctr32_enc8x_done
+
+.align 5
+Lctr32_enc8x_two:
+ vcipherlast $out0,$out0,$in6
+ vcipherlast $out1,$out1,$in7
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ le?vperm $out1,$out1,$out1,$inpperm
+ stvx_u $out0,$x00,$out
+ stvx_u $out1,$x10,$out
+ addi $out,$out,0x20
+ b Lctr32_enc8x_done
+
+.align 5
+Lctr32_enc8x_one:
+ vcipherlast $out0,$out0,$in7
+
+ le?vperm $out0,$out0,$out0,$inpperm
+ stvx_u $out0,0,$out
+ addi $out,$out,0x10
+
+Lctr32_enc8x_done:
+ li r10,`$FRAME+15`
+ li r11,`$FRAME+31`
+ stvx $inpperm,r10,$sp # wipe copies of round keys
+ addi r10,r10,32
+ stvx $inpperm,r11,$sp
+ addi r11,r11,32
+ stvx $inpperm,r10,$sp
+ addi r10,r10,32
+ stvx $inpperm,r11,$sp
+ addi r11,r11,32
+ stvx $inpperm,r10,$sp
+ addi r10,r10,32
+ stvx $inpperm,r11,$sp
+ addi r11,r11,32
+ stvx $inpperm,r10,$sp
+ addi r10,r10,32
+ stvx $inpperm,r11,$sp
+ addi r11,r11,32
+
+ mtspr 256,$vrsave
+ lvx v20,r10,$sp # ABI says so
+ addi r10,r10,32
+ lvx v21,r11,$sp
+ addi r11,r11,32
+ lvx v22,r10,$sp
+ addi r10,r10,32
+ lvx v23,r11,$sp
+ addi r11,r11,32
+ lvx v24,r10,$sp
+ addi r10,r10,32
+ lvx v25,r11,$sp
+ addi r11,r11,32
+ lvx v26,r10,$sp
+ addi r10,r10,32
+ lvx v27,r11,$sp
+ addi r11,r11,32
+ lvx v28,r10,$sp
+ addi r10,r10,32
+ lvx v29,r11,$sp
+ addi r11,r11,32
+ lvx v30,r10,$sp
+ lvx v31,r11,$sp
+ $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0x80,6,6,0
+ .long 0
+.size .${prefix}_ctr32_encrypt_blocks,.-.${prefix}_ctr32_encrypt_blocks
+___
+}} }}}
+
+#########################################################################
+{{{ # XTS procedures #
+# int aes_p8_xts_[en|de]crypt(const char *inp, char *out, size_t len, #
+# const AES_KEY *key1, const AES_KEY *key2, #
+# [const] unsigned char iv[16]); #
+# If $key2 is NULL, then a "tweak chaining" mode is engaged, in which #
+# input tweak value is assumed to be encrypted already, and last tweak #
+# value, one suitable for consecutive call on same chunk of data, is #
+# written back to original buffer. In addition, in "tweak chaining" #
+# mode only complete input blocks are processed. #
+
+my ($inp,$out,$len,$key1,$key2,$ivp,$rounds,$idx) = map("r$_",(3..10));
+my ($rndkey0,$rndkey1,$inout) = map("v$_",(0..2));
+my ($output,$inptail,$inpperm,$leperm,$keyperm) = map("v$_",(3..7));
+my ($tweak,$seven,$eighty7,$tmp,$tweak1) = map("v$_",(8..12));
+my $taillen = $key2;
+
+ ($inp,$idx) = ($idx,$inp); # reassign
+
+$code.=<<___;
+.globl .${prefix}_xts_encrypt
+ mr $inp,r3 # reassign
+ li r3,-1
+ ${UCMP}i $len,16
+ bltlr-
+
+ lis r0,0xfff0
+ mfspr r12,256 # save vrsave
+ li r11,0
+ mtspr 256,r0
+
+ vspltisb $seven,0x07 # 0x070707..07
+ le?lvsl $leperm,r11,r11
+ le?vspltisb $tmp,0x0f
+ le?vxor $leperm,$leperm,$seven
+
+ li $idx,15
+ lvx $tweak,0,$ivp # load [unaligned] iv
+ lvsl $inpperm,0,$ivp
+ lvx $inptail,$idx,$ivp
+ le?vxor $inpperm,$inpperm,$tmp
+ vperm $tweak,$tweak,$inptail,$inpperm
+
+ neg r11,$inp
+ lvsr $inpperm,0,r11 # prepare for unaligned load
+ lvx $inout,0,$inp
+ addi $inp,$inp,15 # 15 is not typo
+ le?vxor $inpperm,$inpperm,$tmp
+
+ ${UCMP}i $key2,0 # key2==NULL?
+ beq Lxts_enc_no_key2
+
+ ?lvsl $keyperm,0,$key2 # prepare for unaligned key
+ lwz $rounds,240($key2)
+ srwi $rounds,$rounds,1
+ subi $rounds,$rounds,1
+ li $idx,16
+
+ lvx $rndkey0,0,$key2
+ lvx $rndkey1,$idx,$key2
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $tweak,$tweak,$rndkey0
+ lvx $rndkey0,$idx,$key2
+ addi $idx,$idx,16
+ mtctr $rounds
+
+Ltweak_xts_enc:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $tweak,$tweak,$rndkey1
+ lvx $rndkey1,$idx,$key2
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipher $tweak,$tweak,$rndkey0
+ lvx $rndkey0,$idx,$key2
+ addi $idx,$idx,16
+ bdnz Ltweak_xts_enc
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $tweak,$tweak,$rndkey1
+ lvx $rndkey1,$idx,$key2
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipherlast $tweak,$tweak,$rndkey0
+
+ li $ivp,0 # don't chain the tweak
+ b Lxts_enc
+
+Lxts_enc_no_key2:
+ li $idx,-16
+ and $len,$len,$idx # in "tweak chaining"
+ # mode only complete
+ # blocks are processed
+Lxts_enc:
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+
+ ?lvsl $keyperm,0,$key1 # prepare for unaligned key
+ lwz $rounds,240($key1)
+ srwi $rounds,$rounds,1
+ subi $rounds,$rounds,1
+ li $idx,16
+
+ vslb $eighty7,$seven,$seven # 0x808080..80
+ vor $eighty7,$eighty7,$seven # 0x878787..87
+ vspltisb $tmp,1 # 0x010101..01
+ vsldoi $eighty7,$eighty7,$tmp,15 # 0x870101..01
+
+ ${UCMP}i $len,96
+ bge _aesp8_xts_encrypt6x
+
+ andi. $taillen,$len,15
+ subic r0,$len,32
+ subi $taillen,$taillen,16
+ subfe r0,r0,r0
+ and r0,r0,$taillen
+ add $inp,$inp,r0
+
+ lvx $rndkey0,0,$key1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ vperm $inout,$inout,$inptail,$inpperm
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$inout,$tweak
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+ mtctr $rounds
+ b Loop_xts_enc
+
+.align 5
+Loop_xts_enc:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipher $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+ bdnz Loop_xts_enc
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ li $idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $rndkey0,$rndkey0,$tweak
+ vcipherlast $output,$inout,$rndkey0
+
+ le?vperm $tmp,$output,$output,$leperm
+ be?nop
+ le?stvx_u $tmp,0,$out
+ be?stvx_u $output,0,$out
+ addi $out,$out,16
+
+ subic. $len,$len,16
+ beq Lxts_enc_done
+
+ vmr $inout,$inptail
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+ lvx $rndkey0,0,$key1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+
+ subic r0,$len,32
+ subfe r0,r0,r0
+ and r0,r0,$taillen
+ add $inp,$inp,r0
+
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $tweak,$tweak,$tmp
+
+ vperm $inout,$inout,$inptail,$inpperm
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$inout,$tweak
+ vxor $output,$output,$rndkey0 # just in case $len<16
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+
+ mtctr $rounds
+ ${UCMP}i $len,16
+ bge Loop_xts_enc
+
+ vxor $output,$output,$tweak
+ lvsr $inpperm,0,$len # $inpperm is no longer needed
+ vxor $inptail,$inptail,$inptail # $inptail is no longer needed
+ vspltisb $tmp,-1
+ vperm $inptail,$inptail,$tmp,$inpperm
+ vsel $inout,$inout,$output,$inptail
+
+ subi r11,$out,17
+ subi $out,$out,16
+ mtctr $len
+ li $len,16
+Loop_xts_enc_steal:
+ lbzu r0,1(r11)
+ stb r0,16(r11)
+ bdnz Loop_xts_enc_steal
+
+ mtctr $rounds
+ b Loop_xts_enc # one more time...
+
+Lxts_enc_done:
+ ${UCMP}i $ivp,0
+ beq Lxts_enc_ret
+
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $tweak,$tweak,$tmp
+
+ le?vperm $tweak,$tweak,$tweak,$leperm
+ stvx_u $tweak,0,$ivp
+
+Lxts_enc_ret:
+ mtspr 256,r12 # restore vrsave
+ li r3,0
+ blr
+ .long 0
+ .byte 0,12,0x04,0,0x80,6,6,0
+ .long 0
+.size .${prefix}_xts_encrypt,.-.${prefix}_xts_encrypt
+
+.globl .${prefix}_xts_decrypt
+ mr $inp,r3 # reassign
+ li r3,-1
+ ${UCMP}i $len,16
+ bltlr-
+
+ lis r0,0xfff8
+ mfspr r12,256 # save vrsave
+ li r11,0
+ mtspr 256,r0
+
+ andi. r0,$len,15
+ neg r0,r0
+ andi. r0,r0,16
+ sub $len,$len,r0
+
+ vspltisb $seven,0x07 # 0x070707..07
+ le?lvsl $leperm,r11,r11
+ le?vspltisb $tmp,0x0f
+ le?vxor $leperm,$leperm,$seven
+
+ li $idx,15
+ lvx $tweak,0,$ivp # load [unaligned] iv
+ lvsl $inpperm,0,$ivp
+ lvx $inptail,$idx,$ivp
+ le?vxor $inpperm,$inpperm,$tmp
+ vperm $tweak,$tweak,$inptail,$inpperm
+
+ neg r11,$inp
+ lvsr $inpperm,0,r11 # prepare for unaligned load
+ lvx $inout,0,$inp
+ addi $inp,$inp,15 # 15 is not typo
+ le?vxor $inpperm,$inpperm,$tmp
+
+ ${UCMP}i $key2,0 # key2==NULL?
+ beq Lxts_dec_no_key2
+
+ ?lvsl $keyperm,0,$key2 # prepare for unaligned key
+ lwz $rounds,240($key2)
+ srwi $rounds,$rounds,1
+ subi $rounds,$rounds,1
+ li $idx,16
+
+ lvx $rndkey0,0,$key2
+ lvx $rndkey1,$idx,$key2
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $tweak,$tweak,$rndkey0
+ lvx $rndkey0,$idx,$key2
+ addi $idx,$idx,16
+ mtctr $rounds
+
+Ltweak_xts_dec:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $tweak,$tweak,$rndkey1
+ lvx $rndkey1,$idx,$key2
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipher $tweak,$tweak,$rndkey0
+ lvx $rndkey0,$idx,$key2
+ addi $idx,$idx,16
+ bdnz Ltweak_xts_dec
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $tweak,$tweak,$rndkey1
+ lvx $rndkey1,$idx,$key2
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipherlast $tweak,$tweak,$rndkey0
+
+ li $ivp,0 # don't chain the tweak
+ b Lxts_dec
+
+Lxts_dec_no_key2:
+ neg $idx,$len
+ andi. $idx,$idx,15
+ add $len,$len,$idx # in "tweak chaining"
+ # mode only complete
+ # blocks are processed
+Lxts_dec:
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+
+ ?lvsl $keyperm,0,$key1 # prepare for unaligned key
+ lwz $rounds,240($key1)
+ srwi $rounds,$rounds,1
+ subi $rounds,$rounds,1
+ li $idx,16
+
+ vslb $eighty7,$seven,$seven # 0x808080..80
+ vor $eighty7,$eighty7,$seven # 0x878787..87
+ vspltisb $tmp,1 # 0x010101..01
+ vsldoi $eighty7,$eighty7,$tmp,15 # 0x870101..01
+
+ ${UCMP}i $len,96
+ bge _aesp8_xts_decrypt6x
+
+ lvx $rndkey0,0,$key1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ vperm $inout,$inout,$inptail,$inpperm
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$inout,$tweak
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+ mtctr $rounds
+
+ ${UCMP}i $len,16
+ blt Ltail_xts_dec
+ be?b Loop_xts_dec
+
+.align 5
+Loop_xts_dec:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vncipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vncipher $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+ bdnz Loop_xts_dec
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vncipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ li $idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $rndkey0,$rndkey0,$tweak
+ vncipherlast $output,$inout,$rndkey0
+
+ le?vperm $tmp,$output,$output,$leperm
+ be?nop
+ le?stvx_u $tmp,0,$out
+ be?stvx_u $output,0,$out
+ addi $out,$out,16
+
+ subic. $len,$len,16
+ beq Lxts_dec_done
+
+ vmr $inout,$inptail
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+ lvx $rndkey0,0,$key1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $tweak,$tweak,$tmp
+
+ vperm $inout,$inout,$inptail,$inpperm
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$inout,$tweak
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+
+ mtctr $rounds
+ ${UCMP}i $len,16
+ bge Loop_xts_dec
+
+Ltail_xts_dec:
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak1,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $tweak1,$tweak1,$tmp
+
+ subi $inp,$inp,16
+ add $inp,$inp,$len
+
+ vxor $inout,$inout,$tweak # :-(
+ vxor $inout,$inout,$tweak1 # :-)
+
+Loop_xts_dec_short:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vncipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vncipher $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+ bdnz Loop_xts_dec_short
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vncipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ li $idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $rndkey0,$rndkey0,$tweak1
+ vncipherlast $output,$inout,$rndkey0
+
+ le?vperm $tmp,$output,$output,$leperm
+ be?nop
+ le?stvx_u $tmp,0,$out
+ be?stvx_u $output,0,$out
+
+ vmr $inout,$inptail
+ lvx $inptail,0,$inp
+ #addi $inp,$inp,16
+ lvx $rndkey0,0,$key1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ vperm $inout,$inout,$inptail,$inpperm
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+
+ lvsr $inpperm,0,$len # $inpperm is no longer needed
+ vxor $inptail,$inptail,$inptail # $inptail is no longer needed
+ vspltisb $tmp,-1
+ vperm $inptail,$inptail,$tmp,$inpperm
+ vsel $inout,$inout,$output,$inptail
+
+ vxor $rndkey0,$rndkey0,$tweak
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+
+ subi r11,$out,1
+ mtctr $len
+ li $len,16
+Loop_xts_dec_steal:
+ lbzu r0,1(r11)
+ stb r0,16(r11)
+ bdnz Loop_xts_dec_steal
+
+ mtctr $rounds
+ b Loop_xts_dec # one more time...
+
+Lxts_dec_done:
+ ${UCMP}i $ivp,0
+ beq Lxts_dec_ret
+
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $tweak,$tweak,$tmp
+
+ le?vperm $tweak,$tweak,$tweak,$leperm
+ stvx_u $tweak,0,$ivp
+
+Lxts_dec_ret:
+ mtspr 256,r12 # restore vrsave
+ li r3,0
+ blr
+ .long 0
+ .byte 0,12,0x04,0,0x80,6,6,0
+ .long 0
+.size .${prefix}_xts_decrypt,.-.${prefix}_xts_decrypt
+___
+#########################################################################
+{{ # Optimized XTS procedures #
+my $key_=$key2;
+my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,3,26..31));
+ $x00=0 if ($flavour =~ /osx/);
+my ($in0, $in1, $in2, $in3, $in4, $in5 )=map("v$_",(0..5));
+my ($out0, $out1, $out2, $out3, $out4, $out5)=map("v$_",(7,12..16));
+my ($twk0, $twk1, $twk2, $twk3, $twk4, $twk5)=map("v$_",(17..22));
+my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
+ # v26-v31 last 6 round keys
+my ($keyperm)=($out0); # aliases with "caller", redundant assignment
+my $taillen=$x70;
+
+$code.=<<___;
+.align 5
+_aesp8_xts_encrypt6x:
+ $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+ mflr r11
+ li r7,`$FRAME+8*16+15`
+ li r3,`$FRAME+8*16+31`
+ $PUSH r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
+ stvx v20,r7,$sp # ABI says so
+ addi r7,r7,32
+ stvx v21,r3,$sp
+ addi r3,r3,32
+ stvx v22,r7,$sp
+ addi r7,r7,32
+ stvx v23,r3,$sp
+ addi r3,r3,32
+ stvx v24,r7,$sp
+ addi r7,r7,32
+ stvx v25,r3,$sp
+ addi r3,r3,32
+ stvx v26,r7,$sp
+ addi r7,r7,32
+ stvx v27,r3,$sp
+ addi r3,r3,32
+ stvx v28,r7,$sp
+ addi r7,r7,32
+ stvx v29,r3,$sp
+ addi r3,r3,32
+ stvx v30,r7,$sp
+ stvx v31,r3,$sp
+ li r0,-1
+ stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
+ li $x10,0x10
+ $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ li $x20,0x20
+ $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ li $x30,0x30
+ $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ li $x40,0x40
+ $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ li $x50,0x50
+ $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ li $x60,0x60
+ $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ li $x70,0x70
+ mtspr 256,r0
+
+ xxlor 2, 32+$eighty7, 32+$eighty7
+ vsldoi $eighty7,$tmp,$eighty7,1 # 0x010101..87
+ xxlor 1, 32+$eighty7, 32+$eighty7
+
+ # Load XOR Lconsts.
+ mr $x70, r6
+ bl Lconsts
+ lxvw4x 0, $x40, r6 # load XOR contents
+ mr r6, $x70
+ li $x70,0x70
+
+ subi $rounds,$rounds,3 # -4 in total
+
+ lvx $rndkey0,$x00,$key1 # load key schedule
+ lvx v30,$x10,$key1
+ addi $key1,$key1,0x20
+ lvx v31,$x00,$key1
+ ?vperm $rndkey0,$rndkey0,v30,$keyperm
+ addi $key_,$sp,$FRAME+15
+ mtctr $rounds
+
+Load_xts_enc_key:
+ ?vperm v24,v30,v31,$keyperm
+ lvx v30,$x10,$key1
+ addi $key1,$key1,0x20
+ stvx v24,$x00,$key_ # off-load round[1]
+ ?vperm v25,v31,v30,$keyperm
+ lvx v31,$x00,$key1
+ stvx v25,$x10,$key_ # off-load round[2]
+ addi $key_,$key_,0x20
+ bdnz Load_xts_enc_key
+
+ lvx v26,$x10,$key1
+ ?vperm v24,v30,v31,$keyperm
+ lvx v27,$x20,$key1
+ stvx v24,$x00,$key_ # off-load round[3]
+ ?vperm v25,v31,v26,$keyperm
+ lvx v28,$x30,$key1
+ stvx v25,$x10,$key_ # off-load round[4]
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ ?vperm v26,v26,v27,$keyperm
+ lvx v29,$x40,$key1
+ ?vperm v27,v27,v28,$keyperm
+ lvx v30,$x50,$key1
+ ?vperm v28,v28,v29,$keyperm
+ lvx v31,$x60,$key1
+ ?vperm v29,v29,v30,$keyperm
+ lvx $twk5,$x70,$key1 # borrow $twk5
+ ?vperm v30,v30,v31,$keyperm
+ lvx v24,$x00,$key_ # pre-load round[1]
+ ?vperm v31,v31,$twk5,$keyperm
+ lvx v25,$x10,$key_ # pre-load round[2]
+
+ # Switch to use the following codes with 0x010101..87 to generate tweak.
+ # eighty7 = 0x010101..87
+ # vsrab tmp, tweak, seven # next tweak value, right shift 7 bits
+ # vand tmp, tmp, eighty7 # last byte with carry
+ # vaddubm tweak, tweak, tweak # left shift 1 bit (x2)
+ # xxlor vsx, 0, 0
+ # vpermxor tweak, tweak, tmp, vsx
+
+ vperm $in0,$inout,$inptail,$inpperm
+ subi $inp,$inp,31 # undo "caller"
+ vxor $twk0,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vand $tmp,$tmp,$eighty7
+ vxor $out0,$in0,$twk0
+ xxlor 32+$in1, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in1
+
+ lvx_u $in1,$x10,$inp
+ vxor $twk1,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ le?vperm $in1,$in1,$in1,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out1,$in1,$twk1
+ xxlor 32+$in2, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in2
+
+ lvx_u $in2,$x20,$inp
+ andi. $taillen,$len,15
+ vxor $twk2,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ le?vperm $in2,$in2,$in2,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out2,$in2,$twk2
+ xxlor 32+$in3, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in3
+
+ lvx_u $in3,$x30,$inp
+ sub $len,$len,$taillen
+ vxor $twk3,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ le?vperm $in3,$in3,$in3,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out3,$in3,$twk3
+ xxlor 32+$in4, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in4
+
+ lvx_u $in4,$x40,$inp
+ subi $len,$len,0x60
+ vxor $twk4,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ le?vperm $in4,$in4,$in4,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out4,$in4,$twk4
+ xxlor 32+$in5, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in5
+
+ lvx_u $in5,$x50,$inp
+ addi $inp,$inp,0x60
+ vxor $twk5,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ le?vperm $in5,$in5,$in5,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out5,$in5,$twk5
+ xxlor 32+$in0, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in0
+
+ vxor v31,v31,$rndkey0
+ mtctr $rounds
+ b Loop_xts_enc6x
+
+.align 5
+Loop_xts_enc6x:
+ vcipher $out0,$out0,v24
+ vcipher $out1,$out1,v24
+ vcipher $out2,$out2,v24
+ vcipher $out3,$out3,v24
+ vcipher $out4,$out4,v24
+ vcipher $out5,$out5,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vcipher $out0,$out0,v25
+ vcipher $out1,$out1,v25
+ vcipher $out2,$out2,v25
+ vcipher $out3,$out3,v25
+ vcipher $out4,$out4,v25
+ vcipher $out5,$out5,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_xts_enc6x
+
+ xxlor 32+$eighty7, 1, 1 # 0x010101..87
+
+ subic $len,$len,96 # $len-=96
+ vxor $in0,$twk0,v31 # xor with last round key
+ vcipher $out0,$out0,v24
+ vcipher $out1,$out1,v24
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk0,$tweak,$rndkey0
+ vaddubm $tweak,$tweak,$tweak
+ vcipher $out2,$out2,v24
+ vcipher $out3,$out3,v24
+ vcipher $out4,$out4,v24
+ vcipher $out5,$out5,v24
+
+ subfe. r0,r0,r0 # borrow?-1:0
+ vand $tmp,$tmp,$eighty7
+ vcipher $out0,$out0,v25
+ vcipher $out1,$out1,v25
+ xxlor 32+$in1, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in1
+ vcipher $out2,$out2,v25
+ vcipher $out3,$out3,v25
+ vxor $in1,$twk1,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk1,$tweak,$rndkey0
+ vcipher $out4,$out4,v25
+ vcipher $out5,$out5,v25
+
+ and r0,r0,$len
+ vaddubm $tweak,$tweak,$tweak
+ vcipher $out0,$out0,v26
+ vcipher $out1,$out1,v26
+ vand $tmp,$tmp,$eighty7
+ vcipher $out2,$out2,v26
+ vcipher $out3,$out3,v26
+ xxlor 32+$in2, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in2
+ vcipher $out4,$out4,v26
+ vcipher $out5,$out5,v26
+
+ add $inp,$inp,r0 # $inp is adjusted in such
+ # way that at exit from the
+ # loop inX-in5 are loaded
+ # with last "words"
+ vxor $in2,$twk2,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk2,$tweak,$rndkey0
+ vaddubm $tweak,$tweak,$tweak
+ vcipher $out0,$out0,v27
+ vcipher $out1,$out1,v27
+ vcipher $out2,$out2,v27
+ vcipher $out3,$out3,v27
+ vand $tmp,$tmp,$eighty7
+ vcipher $out4,$out4,v27
+ vcipher $out5,$out5,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ xxlor 32+$in3, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in3
+ vcipher $out0,$out0,v28
+ vcipher $out1,$out1,v28
+ vxor $in3,$twk3,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk3,$tweak,$rndkey0
+ vcipher $out2,$out2,v28
+ vcipher $out3,$out3,v28
+ vaddubm $tweak,$tweak,$tweak
+ vcipher $out4,$out4,v28
+ vcipher $out5,$out5,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+ vand $tmp,$tmp,$eighty7
+
+ vcipher $out0,$out0,v29
+ vcipher $out1,$out1,v29
+ xxlor 32+$in4, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in4
+ vcipher $out2,$out2,v29
+ vcipher $out3,$out3,v29
+ vxor $in4,$twk4,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk4,$tweak,$rndkey0
+ vcipher $out4,$out4,v29
+ vcipher $out5,$out5,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vaddubm $tweak,$tweak,$tweak
+
+ vcipher $out0,$out0,v30
+ vcipher $out1,$out1,v30
+ vand $tmp,$tmp,$eighty7
+ vcipher $out2,$out2,v30
+ vcipher $out3,$out3,v30
+ xxlor 32+$in5, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in5
+ vcipher $out4,$out4,v30
+ vcipher $out5,$out5,v30
+ vxor $in5,$twk5,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk5,$tweak,$rndkey0
+
+ vcipherlast $out0,$out0,$in0
+ lvx_u $in0,$x00,$inp # load next input block
+ vaddubm $tweak,$tweak,$tweak
+ vcipherlast $out1,$out1,$in1
+ lvx_u $in1,$x10,$inp
+ vcipherlast $out2,$out2,$in2
+ le?vperm $in0,$in0,$in0,$leperm
+ lvx_u $in2,$x20,$inp
+ vand $tmp,$tmp,$eighty7
+ vcipherlast $out3,$out3,$in3
+ le?vperm $in1,$in1,$in1,$leperm
+ lvx_u $in3,$x30,$inp
+ vcipherlast $out4,$out4,$in4
+ le?vperm $in2,$in2,$in2,$leperm
+ lvx_u $in4,$x40,$inp
+ xxlor 10, 32+$in0, 32+$in0
+ xxlor 32+$in0, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in0
+ xxlor 32+$in0, 10, 10
+ vcipherlast $tmp,$out5,$in5 # last block might be needed
+ # in stealing mode
+ le?vperm $in3,$in3,$in3,$leperm
+ lvx_u $in5,$x50,$inp
+ addi $inp,$inp,0x60
+ le?vperm $in4,$in4,$in4,$leperm
+ le?vperm $in5,$in5,$in5,$leperm
+
+ le?vperm $out0,$out0,$out0,$leperm
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk0
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ vxor $out1,$in1,$twk1
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ vxor $out2,$in2,$twk2
+ le?vperm $out4,$out4,$out4,$leperm
+ stvx_u $out3,$x30,$out
+ vxor $out3,$in3,$twk3
+ le?vperm $out5,$tmp,$tmp,$leperm
+ stvx_u $out4,$x40,$out
+ vxor $out4,$in4,$twk4
+ le?stvx_u $out5,$x50,$out
+ be?stvx_u $tmp, $x50,$out
+ vxor $out5,$in5,$twk5
+ addi $out,$out,0x60
+
+ mtctr $rounds
+ beq Loop_xts_enc6x # did $len-=96 borrow?
+
+ xxlor 32+$eighty7, 2, 2 # 0x010101..87
+
+ addic. $len,$len,0x60
+ beq Lxts_enc6x_zero
+ cmpwi $len,0x20
+ blt Lxts_enc6x_one
+ nop
+ beq Lxts_enc6x_two
+ cmpwi $len,0x40
+ blt Lxts_enc6x_three
+ nop
+ beq Lxts_enc6x_four
+
+Lxts_enc6x_five:
+ vxor $out0,$in1,$twk0
+ vxor $out1,$in2,$twk1
+ vxor $out2,$in3,$twk2
+ vxor $out3,$in4,$twk3
+ vxor $out4,$in5,$twk4
+
+ bl _aesp8_xts_enc5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk5 # unused tweak
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ vxor $tmp,$out4,$twk5 # last block prep for stealing
+ le?vperm $out4,$out4,$out4,$leperm
+ stvx_u $out3,$x30,$out
+ stvx_u $out4,$x40,$out
+ addi $out,$out,0x50
+ bne Lxts_enc6x_steal
+ b Lxts_enc6x_done
+
+.align 4
+Lxts_enc6x_four:
+ vxor $out0,$in2,$twk0
+ vxor $out1,$in3,$twk1
+ vxor $out2,$in4,$twk2
+ vxor $out3,$in5,$twk3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_enc5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk4 # unused tweak
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ vxor $tmp,$out3,$twk4 # last block prep for stealing
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ stvx_u $out3,$x30,$out
+ addi $out,$out,0x40
+ bne Lxts_enc6x_steal
+ b Lxts_enc6x_done
+
+.align 4
+Lxts_enc6x_three:
+ vxor $out0,$in3,$twk0
+ vxor $out1,$in4,$twk1
+ vxor $out2,$in5,$twk2
+ vxor $out3,$out3,$out3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_enc5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk3 # unused tweak
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $tmp,$out2,$twk3 # last block prep for stealing
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ stvx_u $out2,$x20,$out
+ addi $out,$out,0x30
+ bne Lxts_enc6x_steal
+ b Lxts_enc6x_done
+
+.align 4
+Lxts_enc6x_two:
+ vxor $out0,$in4,$twk0
+ vxor $out1,$in5,$twk1
+ vxor $out2,$out2,$out2
+ vxor $out3,$out3,$out3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_enc5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk2 # unused tweak
+ vxor $tmp,$out1,$twk2 # last block prep for stealing
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ stvx_u $out1,$x10,$out
+ addi $out,$out,0x20
+ bne Lxts_enc6x_steal
+ b Lxts_enc6x_done
+
+.align 4
+Lxts_enc6x_one:
+ vxor $out0,$in5,$twk0
+ nop
+Loop_xts_enc1x:
+ vcipher $out0,$out0,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vcipher $out0,$out0,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_xts_enc1x
+
+ add $inp,$inp,$taillen
+ cmpwi $taillen,0
+ vcipher $out0,$out0,v24
+
+ subi $inp,$inp,16
+ vcipher $out0,$out0,v25
+
+ lvsr $inpperm,0,$taillen
+ vcipher $out0,$out0,v26
+
+ lvx_u $in0,0,$inp
+ vcipher $out0,$out0,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vcipher $out0,$out0,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+
+ vcipher $out0,$out0,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vxor $twk0,$twk0,v31
+
+ le?vperm $in0,$in0,$in0,$leperm
+ vcipher $out0,$out0,v30
+
+ vperm $in0,$in0,$in0,$inpperm
+ vcipherlast $out0,$out0,$twk0
+
+ vmr $twk0,$twk1 # unused tweak
+ vxor $tmp,$out0,$twk1 # last block prep for stealing
+ le?vperm $out0,$out0,$out0,$leperm
+ stvx_u $out0,$x00,$out # store output
+ addi $out,$out,0x10
+ bne Lxts_enc6x_steal
+ b Lxts_enc6x_done
+
+.align 4
+Lxts_enc6x_zero:
+ cmpwi $taillen,0
+ beq Lxts_enc6x_done
+
+ add $inp,$inp,$taillen
+ subi $inp,$inp,16
+ lvx_u $in0,0,$inp
+ lvsr $inpperm,0,$taillen # $in5 is no more
+ le?vperm $in0,$in0,$in0,$leperm
+ vperm $in0,$in0,$in0,$inpperm
+ vxor $tmp,$tmp,$twk0
+Lxts_enc6x_steal:
+ vxor $in0,$in0,$twk0
+ vxor $out0,$out0,$out0
+ vspltisb $out1,-1
+ vperm $out0,$out0,$out1,$inpperm
+ vsel $out0,$in0,$tmp,$out0 # $tmp is last block, remember?
+
+ subi r30,$out,17
+ subi $out,$out,16
+ mtctr $taillen
+Loop_xts_enc6x_steal:
+ lbzu r0,1(r30)
+ stb r0,16(r30)
+ bdnz Loop_xts_enc6x_steal
+
+ li $taillen,0
+ mtctr $rounds
+ b Loop_xts_enc1x # one more time...
+
+.align 4
+Lxts_enc6x_done:
+ ${UCMP}i $ivp,0
+ beq Lxts_enc6x_ret
+
+ vxor $tweak,$twk0,$rndkey0
+ le?vperm $tweak,$tweak,$tweak,$leperm
+ stvx_u $tweak,0,$ivp
+
+Lxts_enc6x_ret:
+ mtlr r11
+ li r10,`$FRAME+15`
+ li r11,`$FRAME+31`
+ stvx $seven,r10,$sp # wipe copies of round keys
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+
+ mtspr 256,$vrsave
+ lvx v20,r10,$sp # ABI says so
+ addi r10,r10,32
+ lvx v21,r11,$sp
+ addi r11,r11,32
+ lvx v22,r10,$sp
+ addi r10,r10,32
+ lvx v23,r11,$sp
+ addi r11,r11,32
+ lvx v24,r10,$sp
+ addi r10,r10,32
+ lvx v25,r11,$sp
+ addi r11,r11,32
+ lvx v26,r10,$sp
+ addi r10,r10,32
+ lvx v27,r11,$sp
+ addi r11,r11,32
+ lvx v28,r10,$sp
+ addi r10,r10,32
+ lvx v29,r11,$sp
+ addi r11,r11,32
+ lvx v30,r10,$sp
+ lvx v31,r11,$sp
+ $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+ blr
+ .long 0
+ .byte 0,12,0x04,1,0x80,6,6,0
+ .long 0
+
+.align 5
+_aesp8_xts_enc5x:
+ vcipher $out0,$out0,v24
+ vcipher $out1,$out1,v24
+ vcipher $out2,$out2,v24
+ vcipher $out3,$out3,v24
+ vcipher $out4,$out4,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vcipher $out0,$out0,v25
+ vcipher $out1,$out1,v25
+ vcipher $out2,$out2,v25
+ vcipher $out3,$out3,v25
+ vcipher $out4,$out4,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz _aesp8_xts_enc5x
+
+ add $inp,$inp,$taillen
+ cmpwi $taillen,0
+ vcipher $out0,$out0,v24
+ vcipher $out1,$out1,v24
+ vcipher $out2,$out2,v24
+ vcipher $out3,$out3,v24
+ vcipher $out4,$out4,v24
+
+ subi $inp,$inp,16
+ vcipher $out0,$out0,v25
+ vcipher $out1,$out1,v25
+ vcipher $out2,$out2,v25
+ vcipher $out3,$out3,v25
+ vcipher $out4,$out4,v25
+ vxor $twk0,$twk0,v31
+
+ vcipher $out0,$out0,v26
+ lvsr $inpperm,r0,$taillen # $in5 is no more
+ vcipher $out1,$out1,v26
+ vcipher $out2,$out2,v26
+ vcipher $out3,$out3,v26
+ vcipher $out4,$out4,v26
+ vxor $in1,$twk1,v31
+
+ vcipher $out0,$out0,v27
+ lvx_u $in0,0,$inp
+ vcipher $out1,$out1,v27
+ vcipher $out2,$out2,v27
+ vcipher $out3,$out3,v27
+ vcipher $out4,$out4,v27
+ vxor $in2,$twk2,v31
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vcipher $out0,$out0,v28
+ vcipher $out1,$out1,v28
+ vcipher $out2,$out2,v28
+ vcipher $out3,$out3,v28
+ vcipher $out4,$out4,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+ vxor $in3,$twk3,v31
+
+ vcipher $out0,$out0,v29
+ le?vperm $in0,$in0,$in0,$leperm
+ vcipher $out1,$out1,v29
+ vcipher $out2,$out2,v29
+ vcipher $out3,$out3,v29
+ vcipher $out4,$out4,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vxor $in4,$twk4,v31
+
+ vcipher $out0,$out0,v30
+ vperm $in0,$in0,$in0,$inpperm
+ vcipher $out1,$out1,v30
+ vcipher $out2,$out2,v30
+ vcipher $out3,$out3,v30
+ vcipher $out4,$out4,v30
+
+ vcipherlast $out0,$out0,$twk0
+ vcipherlast $out1,$out1,$in1
+ vcipherlast $out2,$out2,$in2
+ vcipherlast $out3,$out3,$in3
+ vcipherlast $out4,$out4,$in4
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
+
+.align 5
+_aesp8_xts_decrypt6x:
+ $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+ mflr r11
+ li r7,`$FRAME+8*16+15`
+ li r3,`$FRAME+8*16+31`
+ $PUSH r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
+ stvx v20,r7,$sp # ABI says so
+ addi r7,r7,32
+ stvx v21,r3,$sp
+ addi r3,r3,32
+ stvx v22,r7,$sp
+ addi r7,r7,32
+ stvx v23,r3,$sp
+ addi r3,r3,32
+ stvx v24,r7,$sp
+ addi r7,r7,32
+ stvx v25,r3,$sp
+ addi r3,r3,32
+ stvx v26,r7,$sp
+ addi r7,r7,32
+ stvx v27,r3,$sp
+ addi r3,r3,32
+ stvx v28,r7,$sp
+ addi r7,r7,32
+ stvx v29,r3,$sp
+ addi r3,r3,32
+ stvx v30,r7,$sp
+ stvx v31,r3,$sp
+ li r0,-1
+ stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
+ li $x10,0x10
+ $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ li $x20,0x20
+ $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ li $x30,0x30
+ $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ li $x40,0x40
+ $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ li $x50,0x50
+ $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ li $x60,0x60
+ $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ li $x70,0x70
+ mtspr 256,r0
+
+ xxlor 2, 32+$eighty7, 32+$eighty7
+ vsldoi $eighty7,$tmp,$eighty7,1 # 0x010101..87
+ xxlor 1, 32+$eighty7, 32+$eighty7
+
+ # Load XOR Lconsts.
+ mr $x70, r6
+ bl Lconsts
+ lxvw4x 0, $x40, r6 # load XOR contents
+ mr r6, $x70
+ li $x70,0x70
+
+ subi $rounds,$rounds,3 # -4 in total
+
+ lvx $rndkey0,$x00,$key1 # load key schedule
+ lvx v30,$x10,$key1
+ addi $key1,$key1,0x20
+ lvx v31,$x00,$key1
+ ?vperm $rndkey0,$rndkey0,v30,$keyperm
+ addi $key_,$sp,$FRAME+15
+ mtctr $rounds
+
+Load_xts_dec_key:
+ ?vperm v24,v30,v31,$keyperm
+ lvx v30,$x10,$key1
+ addi $key1,$key1,0x20
+ stvx v24,$x00,$key_ # off-load round[1]
+ ?vperm v25,v31,v30,$keyperm
+ lvx v31,$x00,$key1
+ stvx v25,$x10,$key_ # off-load round[2]
+ addi $key_,$key_,0x20
+ bdnz Load_xts_dec_key
+
+ lvx v26,$x10,$key1
+ ?vperm v24,v30,v31,$keyperm
+ lvx v27,$x20,$key1
+ stvx v24,$x00,$key_ # off-load round[3]
+ ?vperm v25,v31,v26,$keyperm
+ lvx v28,$x30,$key1
+ stvx v25,$x10,$key_ # off-load round[4]
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ ?vperm v26,v26,v27,$keyperm
+ lvx v29,$x40,$key1
+ ?vperm v27,v27,v28,$keyperm
+ lvx v30,$x50,$key1
+ ?vperm v28,v28,v29,$keyperm
+ lvx v31,$x60,$key1
+ ?vperm v29,v29,v30,$keyperm
+ lvx $twk5,$x70,$key1 # borrow $twk5
+ ?vperm v30,v30,v31,$keyperm
+ lvx v24,$x00,$key_ # pre-load round[1]
+ ?vperm v31,v31,$twk5,$keyperm
+ lvx v25,$x10,$key_ # pre-load round[2]
+
+ vperm $in0,$inout,$inptail,$inpperm
+ subi $inp,$inp,31 # undo "caller"
+ vxor $twk0,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vand $tmp,$tmp,$eighty7
+ vxor $out0,$in0,$twk0
+ xxlor 32+$in1, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in1
+
+ lvx_u $in1,$x10,$inp
+ vxor $twk1,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ le?vperm $in1,$in1,$in1,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out1,$in1,$twk1
+ xxlor 32+$in2, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in2
+
+ lvx_u $in2,$x20,$inp
+ andi. $taillen,$len,15
+ vxor $twk2,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ le?vperm $in2,$in2,$in2,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out2,$in2,$twk2
+ xxlor 32+$in3, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in3
+
+ lvx_u $in3,$x30,$inp
+ sub $len,$len,$taillen
+ vxor $twk3,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ le?vperm $in3,$in3,$in3,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out3,$in3,$twk3
+ xxlor 32+$in4, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in4
+
+ lvx_u $in4,$x40,$inp
+ subi $len,$len,0x60
+ vxor $twk4,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ le?vperm $in4,$in4,$in4,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out4,$in4,$twk4
+ xxlor 32+$in5, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in5
+
+ lvx_u $in5,$x50,$inp
+ addi $inp,$inp,0x60
+ vxor $twk5,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ le?vperm $in5,$in5,$in5,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out5,$in5,$twk5
+ xxlor 32+$in0, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in0
+
+ vxor v31,v31,$rndkey0
+ mtctr $rounds
+ b Loop_xts_dec6x
+
+.align 5
+Loop_xts_dec6x:
+ vncipher $out0,$out0,v24
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+ vncipher $out5,$out5,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vncipher $out0,$out0,v25
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ vncipher $out5,$out5,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_xts_dec6x
+
+ xxlor 32+$eighty7, 1, 1 # 0x010101..87
+
+ subic $len,$len,96 # $len-=96
+ vxor $in0,$twk0,v31 # xor with last round key
+ vncipher $out0,$out0,v24
+ vncipher $out1,$out1,v24
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk0,$tweak,$rndkey0
+ vaddubm $tweak,$tweak,$tweak
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+ vncipher $out5,$out5,v24
+
+ subfe. r0,r0,r0 # borrow?-1:0
+ vand $tmp,$tmp,$eighty7
+ vncipher $out0,$out0,v25
+ vncipher $out1,$out1,v25
+ xxlor 32+$in1, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in1
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vxor $in1,$twk1,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk1,$tweak,$rndkey0
+ vncipher $out4,$out4,v25
+ vncipher $out5,$out5,v25
+
+ and r0,r0,$len
+ vaddubm $tweak,$tweak,$tweak
+ vncipher $out0,$out0,v26
+ vncipher $out1,$out1,v26
+ vand $tmp,$tmp,$eighty7
+ vncipher $out2,$out2,v26
+ vncipher $out3,$out3,v26
+ xxlor 32+$in2, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in2
+ vncipher $out4,$out4,v26
+ vncipher $out5,$out5,v26
+
+ add $inp,$inp,r0 # $inp is adjusted in such
+ # way that at exit from the
+ # loop inX-in5 are loaded
+ # with last "words"
+ vxor $in2,$twk2,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk2,$tweak,$rndkey0
+ vaddubm $tweak,$tweak,$tweak
+ vncipher $out0,$out0,v27
+ vncipher $out1,$out1,v27
+ vncipher $out2,$out2,v27
+ vncipher $out3,$out3,v27
+ vand $tmp,$tmp,$eighty7
+ vncipher $out4,$out4,v27
+ vncipher $out5,$out5,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ xxlor 32+$in3, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in3
+ vncipher $out0,$out0,v28
+ vncipher $out1,$out1,v28
+ vxor $in3,$twk3,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk3,$tweak,$rndkey0
+ vncipher $out2,$out2,v28
+ vncipher $out3,$out3,v28
+ vaddubm $tweak,$tweak,$tweak
+ vncipher $out4,$out4,v28
+ vncipher $out5,$out5,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+ vand $tmp,$tmp,$eighty7
+
+ vncipher $out0,$out0,v29
+ vncipher $out1,$out1,v29
+ xxlor 32+$in4, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in4
+ vncipher $out2,$out2,v29
+ vncipher $out3,$out3,v29
+ vxor $in4,$twk4,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk4,$tweak,$rndkey0
+ vncipher $out4,$out4,v29
+ vncipher $out5,$out5,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vaddubm $tweak,$tweak,$tweak
+
+ vncipher $out0,$out0,v30
+ vncipher $out1,$out1,v30
+ vand $tmp,$tmp,$eighty7
+ vncipher $out2,$out2,v30
+ vncipher $out3,$out3,v30
+ xxlor 32+$in5, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in5
+ vncipher $out4,$out4,v30
+ vncipher $out5,$out5,v30
+ vxor $in5,$twk5,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk5,$tweak,$rndkey0
+
+ vncipherlast $out0,$out0,$in0
+ lvx_u $in0,$x00,$inp # load next input block
+ vaddubm $tweak,$tweak,$tweak
+ vncipherlast $out1,$out1,$in1
+ lvx_u $in1,$x10,$inp
+ vncipherlast $out2,$out2,$in2
+ le?vperm $in0,$in0,$in0,$leperm
+ lvx_u $in2,$x20,$inp
+ vand $tmp,$tmp,$eighty7
+ vncipherlast $out3,$out3,$in3
+ le?vperm $in1,$in1,$in1,$leperm
+ lvx_u $in3,$x30,$inp
+ vncipherlast $out4,$out4,$in4
+ le?vperm $in2,$in2,$in2,$leperm
+ lvx_u $in4,$x40,$inp
+ xxlor 10, 32+$in0, 32+$in0
+ xxlor 32+$in0, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in0
+ xxlor 32+$in0, 10, 10
+ vncipherlast $out5,$out5,$in5
+ le?vperm $in3,$in3,$in3,$leperm
+ lvx_u $in5,$x50,$inp
+ addi $inp,$inp,0x60
+ le?vperm $in4,$in4,$in4,$leperm
+ le?vperm $in5,$in5,$in5,$leperm
+
+ le?vperm $out0,$out0,$out0,$leperm
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk0
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ vxor $out1,$in1,$twk1
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ vxor $out2,$in2,$twk2
+ le?vperm $out4,$out4,$out4,$leperm
+ stvx_u $out3,$x30,$out
+ vxor $out3,$in3,$twk3
+ le?vperm $out5,$out5,$out5,$leperm
+ stvx_u $out4,$x40,$out
+ vxor $out4,$in4,$twk4
+ stvx_u $out5,$x50,$out
+ vxor $out5,$in5,$twk5
+ addi $out,$out,0x60
+
+ mtctr $rounds
+ beq Loop_xts_dec6x # did $len-=96 borrow?
+
+ xxlor 32+$eighty7, 2, 2 # 0x010101..87
+
+ addic. $len,$len,0x60
+ beq Lxts_dec6x_zero
+ cmpwi $len,0x20
+ blt Lxts_dec6x_one
+ nop
+ beq Lxts_dec6x_two
+ cmpwi $len,0x40
+ blt Lxts_dec6x_three
+ nop
+ beq Lxts_dec6x_four
+
+Lxts_dec6x_five:
+ vxor $out0,$in1,$twk0
+ vxor $out1,$in2,$twk1
+ vxor $out2,$in3,$twk2
+ vxor $out3,$in4,$twk3
+ vxor $out4,$in5,$twk4
+
+ bl _aesp8_xts_dec5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk5 # unused tweak
+ vxor $twk1,$tweak,$rndkey0
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk1
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ le?vperm $out4,$out4,$out4,$leperm
+ stvx_u $out3,$x30,$out
+ stvx_u $out4,$x40,$out
+ addi $out,$out,0x50
+ bne Lxts_dec6x_steal
+ b Lxts_dec6x_done
+
+.align 4
+Lxts_dec6x_four:
+ vxor $out0,$in2,$twk0
+ vxor $out1,$in3,$twk1
+ vxor $out2,$in4,$twk2
+ vxor $out3,$in5,$twk3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_dec5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk4 # unused tweak
+ vmr $twk1,$twk5
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk5
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ stvx_u $out3,$x30,$out
+ addi $out,$out,0x40
+ bne Lxts_dec6x_steal
+ b Lxts_dec6x_done
+
+.align 4
+Lxts_dec6x_three:
+ vxor $out0,$in3,$twk0
+ vxor $out1,$in4,$twk1
+ vxor $out2,$in5,$twk2
+ vxor $out3,$out3,$out3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_dec5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk3 # unused tweak
+ vmr $twk1,$twk4
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk4
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ stvx_u $out2,$x20,$out
+ addi $out,$out,0x30
+ bne Lxts_dec6x_steal
+ b Lxts_dec6x_done
+
+.align 4
+Lxts_dec6x_two:
+ vxor $out0,$in4,$twk0
+ vxor $out1,$in5,$twk1
+ vxor $out2,$out2,$out2
+ vxor $out3,$out3,$out3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_dec5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk2 # unused tweak
+ vmr $twk1,$twk3
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk3
+ stvx_u $out1,$x10,$out
+ addi $out,$out,0x20
+ bne Lxts_dec6x_steal
+ b Lxts_dec6x_done
+
+.align 4
+Lxts_dec6x_one:
+ vxor $out0,$in5,$twk0
+ nop
+Loop_xts_dec1x:
+ vncipher $out0,$out0,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vncipher $out0,$out0,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_xts_dec1x
+
+ subi r0,$taillen,1
+ vncipher $out0,$out0,v24
+
+ andi. r0,r0,16
+ cmpwi $taillen,0
+ vncipher $out0,$out0,v25
+
+ sub $inp,$inp,r0
+ vncipher $out0,$out0,v26
+
+ lvx_u $in0,0,$inp
+ vncipher $out0,$out0,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vncipher $out0,$out0,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+
+ vncipher $out0,$out0,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vxor $twk0,$twk0,v31
+
+ le?vperm $in0,$in0,$in0,$leperm
+ vncipher $out0,$out0,v30
+
+ mtctr $rounds
+ vncipherlast $out0,$out0,$twk0
+
+ vmr $twk0,$twk1 # unused tweak
+ vmr $twk1,$twk2
+ le?vperm $out0,$out0,$out0,$leperm
+ stvx_u $out0,$x00,$out # store output
+ addi $out,$out,0x10
+ vxor $out0,$in0,$twk2
+ bne Lxts_dec6x_steal
+ b Lxts_dec6x_done
+
+.align 4
+Lxts_dec6x_zero:
+ cmpwi $taillen,0
+ beq Lxts_dec6x_done
+
+ lvx_u $in0,0,$inp
+ le?vperm $in0,$in0,$in0,$leperm
+ vxor $out0,$in0,$twk1
+Lxts_dec6x_steal:
+ vncipher $out0,$out0,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vncipher $out0,$out0,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Lxts_dec6x_steal
+
+ add $inp,$inp,$taillen
+ vncipher $out0,$out0,v24
+
+ cmpwi $taillen,0
+ vncipher $out0,$out0,v25
+
+ lvx_u $in0,0,$inp
+ vncipher $out0,$out0,v26
+
+ lvsr $inpperm,0,$taillen # $in5 is no more
+ vncipher $out0,$out0,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vncipher $out0,$out0,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+
+ vncipher $out0,$out0,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vxor $twk1,$twk1,v31
+
+ le?vperm $in0,$in0,$in0,$leperm
+ vncipher $out0,$out0,v30
+
+ vperm $in0,$in0,$in0,$inpperm
+ vncipherlast $tmp,$out0,$twk1
+
+ le?vperm $out0,$tmp,$tmp,$leperm
+ le?stvx_u $out0,0,$out
+ be?stvx_u $tmp,0,$out
+
+ vxor $out0,$out0,$out0
+ vspltisb $out1,-1
+ vperm $out0,$out0,$out1,$inpperm
+ vsel $out0,$in0,$tmp,$out0
+ vxor $out0,$out0,$twk0
+
+ subi r30,$out,1
+ mtctr $taillen
+Loop_xts_dec6x_steal:
+ lbzu r0,1(r30)
+ stb r0,16(r30)
+ bdnz Loop_xts_dec6x_steal
+
+ li $taillen,0
+ mtctr $rounds
+ b Loop_xts_dec1x # one more time...
+
+.align 4
+Lxts_dec6x_done:
+ ${UCMP}i $ivp,0
+ beq Lxts_dec6x_ret
+
+ vxor $tweak,$twk0,$rndkey0
+ le?vperm $tweak,$tweak,$tweak,$leperm
+ stvx_u $tweak,0,$ivp
+
+Lxts_dec6x_ret:
+ mtlr r11
+ li r10,`$FRAME+15`
+ li r11,`$FRAME+31`
+ stvx $seven,r10,$sp # wipe copies of round keys
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+
+ mtspr 256,$vrsave
+ lvx v20,r10,$sp # ABI says so
+ addi r10,r10,32
+ lvx v21,r11,$sp
+ addi r11,r11,32
+ lvx v22,r10,$sp
+ addi r10,r10,32
+ lvx v23,r11,$sp
+ addi r11,r11,32
+ lvx v24,r10,$sp
+ addi r10,r10,32
+ lvx v25,r11,$sp
+ addi r11,r11,32
+ lvx v26,r10,$sp
+ addi r10,r10,32
+ lvx v27,r11,$sp
+ addi r11,r11,32
+ lvx v28,r10,$sp
+ addi r10,r10,32
+ lvx v29,r11,$sp
+ addi r11,r11,32
+ lvx v30,r10,$sp
+ lvx v31,r11,$sp
+ $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+ blr
+ .long 0
+ .byte 0,12,0x04,1,0x80,6,6,0
+ .long 0
+
+.align 5
+_aesp8_xts_dec5x:
+ vncipher $out0,$out0,v24
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vncipher $out0,$out0,v25
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz _aesp8_xts_dec5x
+
+ subi r0,$taillen,1
+ vncipher $out0,$out0,v24
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+
+ andi. r0,r0,16
+ cmpwi $taillen,0
+ vncipher $out0,$out0,v25
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ vxor $twk0,$twk0,v31
+
+ sub $inp,$inp,r0
+ vncipher $out0,$out0,v26
+ vncipher $out1,$out1,v26
+ vncipher $out2,$out2,v26
+ vncipher $out3,$out3,v26
+ vncipher $out4,$out4,v26
+ vxor $in1,$twk1,v31
+
+ vncipher $out0,$out0,v27
+ lvx_u $in0,0,$inp
+ vncipher $out1,$out1,v27
+ vncipher $out2,$out2,v27
+ vncipher $out3,$out3,v27
+ vncipher $out4,$out4,v27
+ vxor $in2,$twk2,v31
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vncipher $out0,$out0,v28
+ vncipher $out1,$out1,v28
+ vncipher $out2,$out2,v28
+ vncipher $out3,$out3,v28
+ vncipher $out4,$out4,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+ vxor $in3,$twk3,v31
+
+ vncipher $out0,$out0,v29
+ le?vperm $in0,$in0,$in0,$leperm
+ vncipher $out1,$out1,v29
+ vncipher $out2,$out2,v29
+ vncipher $out3,$out3,v29
+ vncipher $out4,$out4,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vxor $in4,$twk4,v31
+
+ vncipher $out0,$out0,v30
+ vncipher $out1,$out1,v30
+ vncipher $out2,$out2,v30
+ vncipher $out3,$out3,v30
+ vncipher $out4,$out4,v30
+
+ vncipherlast $out0,$out0,$twk0
+ vncipherlast $out1,$out1,$in1
+ vncipherlast $out2,$out2,$in2
+ vncipherlast $out3,$out3,$in3
+ vncipherlast $out4,$out4,$in4
+ mtctr $rounds
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
+___
+}} }}}
+
+my $consts=1;
+foreach(split("\n",$code)) {
+ s/\`([^\`]*)\`/eval($1)/geo;
+
+ # constants table endian-specific conversion
+ if ($consts && m/\.(long|byte)\s+(.+)\s+(\?[a-z]*)$/o) {
+ my $conv=$3;
+ my @bytes=();
+
+ # convert to endian-agnostic format
+ if ($1 eq "long") {
+ foreach (split(/,\s*/,$2)) {
+ my $l = /^0/?oct:int;
+ push @bytes,($l>>24)&0xff,($l>>16)&0xff,($l>>8)&0xff,$l&0xff;
+ }
+ } else {
+ @bytes = map(/^0/?oct:int,split(/,\s*/,$2));
+ }
+
+ # little-endian conversion
+ if ($flavour =~ /le$/o) {
+ SWITCH: for($conv) {
+ /\?inv/ && do { @bytes=map($_^0xf,@bytes); last; };
+ /\?rev/ && do { @bytes=reverse(@bytes); last; };
+ }
+ }
+
+ #emit
+ print ".byte\t",join(',',map (sprintf("0x%02x",$_),@bytes)),"\n";
+ next;
+ }
+ $consts=0 if (m/Lconsts:/o); # end of table
+
+ # instructions prefixed with '?' are endian-specific and need
+ # to be adjusted accordingly...
+ if ($flavour =~ /le$/o) { # little-endian
+ s/le\?//o or
+ s/be\?/#be#/o or
+ s/\?lvsr/lvsl/o or
+ s/\?lvsl/lvsr/o or
+ s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/o or
+ s/\?(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/o or
+ s/\?(vspltw\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9])/$1$2 3-$3/o;
+ } else { # big-endian
+ s/le\?/#le#/o or
+ s/be\?//o or
+ s/\?([a-z]+)/$1/o;
+ }
+
+ print $_,"\n";
+}
+
+close STDOUT;
diff --git a/arch/powerpc/crypto/ghash.c b/arch/powerpc/crypto/ghash.c
new file mode 100644
index 0000000000..77eca20bc7
--- /dev/null
+++ b/arch/powerpc/crypto/ghash.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GHASH routines supporting VMX instructions on the Power 8
+ *
+ * Copyright (C) 2015, 2019 International Business Machines Inc.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ *
+ * Extended by Daniel Axtens <dja@axtens.net> to replace the fallback
+ * mechanism. The new approach is based on arm64 code, which is:
+ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <asm/simd.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+#include <crypto/ghash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
+#include <crypto/b128ops.h>
+#include "aesp8-ppc.h"
+
+void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
+void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
+void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
+ const u8 *in, size_t len);
+
+struct p8_ghash_ctx {
+ /* key used by vector asm */
+ u128 htable[16];
+ /* key used by software fallback */
+ be128 key;
+};
+
+struct p8_ghash_desc_ctx {
+ u64 shash[2];
+ u8 buffer[GHASH_DIGEST_SIZE];
+ int bytes;
+};
+
+static int p8_ghash_init(struct shash_desc *desc)
+{
+ struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ dctx->bytes = 0;
+ memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
+ return 0;
+}
+
+static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm));
+
+ if (keylen != GHASH_BLOCK_SIZE)
+ return -EINVAL;
+
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ gcm_init_p8(ctx->htable, (const u64 *) key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ memcpy(&ctx->key, key, GHASH_BLOCK_SIZE);
+
+ return 0;
+}
+
+static inline void __ghash_block(struct p8_ghash_ctx *ctx,
+ struct p8_ghash_desc_ctx *dctx)
+{
+ if (crypto_simd_usable()) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ gcm_ghash_p8(dctx->shash, ctx->htable,
+ dctx->buffer, GHASH_DIGEST_SIZE);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+ } else {
+ crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE);
+ gf128mul_lle((be128 *)dctx->shash, &ctx->key);
+ }
+}
+
+static inline void __ghash_blocks(struct p8_ghash_ctx *ctx,
+ struct p8_ghash_desc_ctx *dctx,
+ const u8 *src, unsigned int srclen)
+{
+ if (crypto_simd_usable()) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ gcm_ghash_p8(dctx->shash, ctx->htable,
+ src, srclen);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+ } else {
+ while (srclen >= GHASH_BLOCK_SIZE) {
+ crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE);
+ gf128mul_lle((be128 *)dctx->shash, &ctx->key);
+ srclen -= GHASH_BLOCK_SIZE;
+ src += GHASH_BLOCK_SIZE;
+ }
+ }
+}
+
+static int p8_ghash_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ unsigned int len;
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+ struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (dctx->bytes) {
+ if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
+ memcpy(dctx->buffer + dctx->bytes, src,
+ srclen);
+ dctx->bytes += srclen;
+ return 0;
+ }
+ memcpy(dctx->buffer + dctx->bytes, src,
+ GHASH_DIGEST_SIZE - dctx->bytes);
+
+ __ghash_block(ctx, dctx);
+
+ src += GHASH_DIGEST_SIZE - dctx->bytes;
+ srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
+ dctx->bytes = 0;
+ }
+ len = srclen & ~(GHASH_DIGEST_SIZE - 1);
+ if (len) {
+ __ghash_blocks(ctx, dctx, src, len);
+ src += len;
+ srclen -= len;
+ }
+ if (srclen) {
+ memcpy(dctx->buffer, src, srclen);
+ dctx->bytes = srclen;
+ }
+ return 0;
+}
+
+static int p8_ghash_final(struct shash_desc *desc, u8 *out)
+{
+ int i;
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+ struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (dctx->bytes) {
+ for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
+ dctx->buffer[i] = 0;
+ __ghash_block(ctx, dctx);
+ dctx->bytes = 0;
+ }
+ memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
+ return 0;
+}
+
+struct shash_alg p8_ghash_alg = {
+ .digestsize = GHASH_DIGEST_SIZE,
+ .init = p8_ghash_init,
+ .update = p8_ghash_update,
+ .final = p8_ghash_final,
+ .setkey = p8_ghash_setkey,
+ .descsize = sizeof(struct p8_ghash_desc_ctx)
+ + sizeof(struct ghash_desc_ctx),
+ .base = {
+ .cra_name = "ghash",
+ .cra_driver_name = "p8_ghash",
+ .cra_priority = 1000,
+ .cra_blocksize = GHASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p8_ghash_ctx),
+ .cra_module = THIS_MODULE,
+ },
+};
diff --git a/arch/powerpc/crypto/ghashp8-ppc.pl b/arch/powerpc/crypto/ghashp8-ppc.pl
new file mode 100644
index 0000000000..041e633c21
--- /dev/null
+++ b/arch/powerpc/crypto/ghashp8-ppc.pl
@@ -0,0 +1,243 @@
+#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
+
+# This code is taken from the OpenSSL project but the author (Andy Polyakov)
+# has relicensed it under the GPLv2. Therefore this program is free software;
+# you can redistribute it and/or modify it under the terms of the GNU General
+# Public License version 2 as published by the Free Software Foundation.
+#
+# The original headers, including the original license headers, are
+# included below for completeness.
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see https://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# GHASH for PowerISA v2.07.
+#
+# July 2014
+#
+# Accurate performance measurements are problematic, because it's
+# always virtualized setup with possibly throttled processor.
+# Relative comparison is therefore more informative. This initial
+# version is ~2.1x slower than hardware-assisted AES-128-CTR, ~12x
+# faster than "4-bit" integer-only compiler-generated 64-bit code.
+# "Initial version" means that there is room for futher improvement.
+
+$flavour=shift;
+$output =shift;
+
+if ($flavour =~ /64/) {
+ $SIZE_T=8;
+ $LRSAVE=2*$SIZE_T;
+ $STU="stdu";
+ $POP="ld";
+ $PUSH="std";
+} elsif ($flavour =~ /32/) {
+ $SIZE_T=4;
+ $LRSAVE=$SIZE_T;
+ $STU="stwu";
+ $POP="lwz";
+ $PUSH="stw";
+} else { die "nonsense $flavour"; }
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output" || die "can't call $xlate: $!";
+
+my ($Xip,$Htbl,$inp,$len)=map("r$_",(3..6)); # argument block
+
+my ($Xl,$Xm,$Xh,$IN)=map("v$_",(0..3));
+my ($zero,$t0,$t1,$t2,$xC2,$H,$Hh,$Hl,$lemask)=map("v$_",(4..12));
+my $vrsave="r12";
+
+$code=<<___;
+.machine "any"
+
+.text
+
+.globl .gcm_init_p8
+ lis r0,0xfff0
+ li r8,0x10
+ mfspr $vrsave,256
+ li r9,0x20
+ mtspr 256,r0
+ li r10,0x30
+ lvx_u $H,0,r4 # load H
+ le?xor r7,r7,r7
+ le?addi r7,r7,0x8 # need a vperm start with 08
+ le?lvsr 5,0,r7
+ le?vspltisb 6,0x0f
+ le?vxor 5,5,6 # set a b-endian mask
+ le?vperm $H,$H,$H,5
+
+ vspltisb $xC2,-16 # 0xf0
+ vspltisb $t0,1 # one
+ vaddubm $xC2,$xC2,$xC2 # 0xe0
+ vxor $zero,$zero,$zero
+ vor $xC2,$xC2,$t0 # 0xe1
+ vsldoi $xC2,$xC2,$zero,15 # 0xe1...
+ vsldoi $t1,$zero,$t0,1 # ...1
+ vaddubm $xC2,$xC2,$xC2 # 0xc2...
+ vspltisb $t2,7
+ vor $xC2,$xC2,$t1 # 0xc2....01
+ vspltb $t1,$H,0 # most significant byte
+ vsl $H,$H,$t0 # H<<=1
+ vsrab $t1,$t1,$t2 # broadcast carry bit
+ vand $t1,$t1,$xC2
+ vxor $H,$H,$t1 # twisted H
+
+ vsldoi $H,$H,$H,8 # twist even more ...
+ vsldoi $xC2,$zero,$xC2,8 # 0xc2.0
+ vsldoi $Hl,$zero,$H,8 # ... and split
+ vsldoi $Hh,$H,$zero,8
+
+ stvx_u $xC2,0,r3 # save pre-computed table
+ stvx_u $Hl,r8,r3
+ stvx_u $H, r9,r3
+ stvx_u $Hh,r10,r3
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
+.size .gcm_init_p8,.-.gcm_init_p8
+
+.globl .gcm_gmult_p8
+ lis r0,0xfff8
+ li r8,0x10
+ mfspr $vrsave,256
+ li r9,0x20
+ mtspr 256,r0
+ li r10,0x30
+ lvx_u $IN,0,$Xip # load Xi
+
+ lvx_u $Hl,r8,$Htbl # load pre-computed table
+ le?lvsl $lemask,r0,r0
+ lvx_u $H, r9,$Htbl
+ le?vspltisb $t0,0x07
+ lvx_u $Hh,r10,$Htbl
+ le?vxor $lemask,$lemask,$t0
+ lvx_u $xC2,0,$Htbl
+ le?vperm $IN,$IN,$IN,$lemask
+ vxor $zero,$zero,$zero
+
+ vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
+ vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
+ vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
+
+ vpmsumd $t2,$Xl,$xC2 # 1st phase
+
+ vsldoi $t0,$Xm,$zero,8
+ vsldoi $t1,$zero,$Xm,8
+ vxor $Xl,$Xl,$t0
+ vxor $Xh,$Xh,$t1
+
+ vsldoi $Xl,$Xl,$Xl,8
+ vxor $Xl,$Xl,$t2
+
+ vsldoi $t1,$Xl,$Xl,8 # 2nd phase
+ vpmsumd $Xl,$Xl,$xC2
+ vxor $t1,$t1,$Xh
+ vxor $Xl,$Xl,$t1
+
+ le?vperm $Xl,$Xl,$Xl,$lemask
+ stvx_u $Xl,0,$Xip # write out Xi
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
+.size .gcm_gmult_p8,.-.gcm_gmult_p8
+
+.globl .gcm_ghash_p8
+ lis r0,0xfff8
+ li r8,0x10
+ mfspr $vrsave,256
+ li r9,0x20
+ mtspr 256,r0
+ li r10,0x30
+ lvx_u $Xl,0,$Xip # load Xi
+
+ lvx_u $Hl,r8,$Htbl # load pre-computed table
+ le?lvsl $lemask,r0,r0
+ lvx_u $H, r9,$Htbl
+ le?vspltisb $t0,0x07
+ lvx_u $Hh,r10,$Htbl
+ le?vxor $lemask,$lemask,$t0
+ lvx_u $xC2,0,$Htbl
+ le?vperm $Xl,$Xl,$Xl,$lemask
+ vxor $zero,$zero,$zero
+
+ lvx_u $IN,0,$inp
+ addi $inp,$inp,16
+ subi $len,$len,16
+ le?vperm $IN,$IN,$IN,$lemask
+ vxor $IN,$IN,$Xl
+ b Loop
+
+.align 5
+Loop:
+ subic $len,$len,16
+ vpmsumd $Xl,$IN,$Hl # H.lo·Xi.lo
+ subfe. r0,r0,r0 # borrow?-1:0
+ vpmsumd $Xm,$IN,$H # H.hi·Xi.lo+H.lo·Xi.hi
+ and r0,r0,$len
+ vpmsumd $Xh,$IN,$Hh # H.hi·Xi.hi
+ add $inp,$inp,r0
+
+ vpmsumd $t2,$Xl,$xC2 # 1st phase
+
+ vsldoi $t0,$Xm,$zero,8
+ vsldoi $t1,$zero,$Xm,8
+ vxor $Xl,$Xl,$t0
+ vxor $Xh,$Xh,$t1
+
+ vsldoi $Xl,$Xl,$Xl,8
+ vxor $Xl,$Xl,$t2
+ lvx_u $IN,0,$inp
+ addi $inp,$inp,16
+
+ vsldoi $t1,$Xl,$Xl,8 # 2nd phase
+ vpmsumd $Xl,$Xl,$xC2
+ le?vperm $IN,$IN,$IN,$lemask
+ vxor $t1,$t1,$Xh
+ vxor $IN,$IN,$t1
+ vxor $IN,$IN,$Xl
+ beq Loop # did $len-=16 borrow?
+
+ vxor $Xl,$Xl,$t1
+ le?vperm $Xl,$Xl,$Xl,$lemask
+ stvx_u $Xl,0,$Xip # write out Xi
+
+ mtspr 256,$vrsave
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
+.size .gcm_ghash_p8,.-.gcm_ghash_p8
+
+.asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+___
+
+foreach (split("\n",$code)) {
+ if ($flavour =~ /le$/o) { # little-endian
+ s/le\?//o or
+ s/be\?/#be#/o;
+ } else {
+ s/le\?/#le#/o or
+ s/be\?//o;
+ }
+ print $_,"\n";
+}
+
+close STDOUT; # enforce flush
diff --git a/arch/powerpc/crypto/vmx.c b/arch/powerpc/crypto/vmx.c
new file mode 100644
index 0000000000..7eb713cc87
--- /dev/null
+++ b/arch/powerpc/crypto/vmx.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Routines supporting VMX instructions on the Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <asm/cputable.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+
+#include "aesp8-ppc.h"
+
+static int __init p8_init(void)
+{
+ int ret;
+
+ ret = crypto_register_shash(&p8_ghash_alg);
+ if (ret)
+ goto err;
+
+ ret = crypto_register_alg(&p8_aes_alg);
+ if (ret)
+ goto err_unregister_ghash;
+
+ ret = crypto_register_skcipher(&p8_aes_cbc_alg);
+ if (ret)
+ goto err_unregister_aes;
+
+ ret = crypto_register_skcipher(&p8_aes_ctr_alg);
+ if (ret)
+ goto err_unregister_aes_cbc;
+
+ ret = crypto_register_skcipher(&p8_aes_xts_alg);
+ if (ret)
+ goto err_unregister_aes_ctr;
+
+ return 0;
+
+err_unregister_aes_ctr:
+ crypto_unregister_skcipher(&p8_aes_ctr_alg);
+err_unregister_aes_cbc:
+ crypto_unregister_skcipher(&p8_aes_cbc_alg);
+err_unregister_aes:
+ crypto_unregister_alg(&p8_aes_alg);
+err_unregister_ghash:
+ crypto_unregister_shash(&p8_ghash_alg);
+err:
+ return ret;
+}
+
+static void __exit p8_exit(void)
+{
+ crypto_unregister_skcipher(&p8_aes_xts_alg);
+ crypto_unregister_skcipher(&p8_aes_ctr_alg);
+ crypto_unregister_skcipher(&p8_aes_cbc_alg);
+ crypto_unregister_alg(&p8_aes_alg);
+ crypto_unregister_shash(&p8_ghash_alg);
+}
+
+module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, p8_init);
+module_exit(p8_exit);
+
+MODULE_AUTHOR("Marcelo Cerri<mhcerri@br.ibm.com>");
+MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions "
+ "support on Power 8");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0.0");
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/arch/powerpc/include/asm/backlight.h b/arch/powerpc/include/asm/backlight.h
index 1b5eab62ed..061a910d74 100644
--- a/arch/powerpc/include/asm/backlight.h
+++ b/arch/powerpc/include/asm/backlight.h
@@ -10,15 +10,14 @@
#define __ASM_POWERPC_BACKLIGHT_H
#ifdef __KERNEL__
-#include <linux/fb.h>
#include <linux/mutex.h>
+struct backlight_device;
+
/* For locking instructions, see the implementation file */
extern struct backlight_device *pmac_backlight;
extern struct mutex pmac_backlight_mutex;
-extern int pmac_backlight_curve_lookup(struct fb_info *info, int value);
-
extern int pmac_has_backlight_type(const char *type);
extern void pmac_backlight_key(int direction);
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 6e70ae5116..faf3e3b4e4 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -269,8 +269,6 @@ int hash__create_section_mapping(unsigned long start, unsigned long end,
int nid, pgprot_t prot);
int hash__remove_section_mapping(unsigned long start, unsigned long end);
-void hash__kernel_map_pages(struct page *page, int numpages, int enable);
-
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
index 2fce3498b0..ced7ee8b42 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
@@ -45,9 +45,9 @@ static inline int hugepd_ok(hugepd_t hpd)
/*
* This should never get called
*/
-static inline int get_hugepd_cache_index(int index)
+static __always_inline int get_hugepd_cache_index(int index)
{
- BUG();
+ BUILD_BUG();
}
#endif /* CONFIG_HUGETLB_PAGE */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 927d585652..fac5615e6b 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1027,16 +1027,6 @@ static inline void vmemmap_remove_mapping(unsigned long start,
}
#endif
-#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
-static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
-{
- if (radix_enabled())
- radix__kernel_map_pages(page, numpages, enable);
- else
- hash__kernel_map_pages(page, numpages, enable);
-}
-#endif
-
static inline pte_t pmd_pte(pmd_t pmd)
{
return __pte_raw(pmd_raw(pmd));
@@ -1158,20 +1148,6 @@ pud_hugepage_update(struct mm_struct *mm, unsigned long addr, pud_t *pudp,
}
/*
- * returns true for pmd migration entries, THP, devmap, hugetlb
- * But compile time dependent on THP config
- */
-static inline int pmd_large(pmd_t pmd)
-{
- return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
-}
-
-static inline int pud_large(pud_t pud)
-{
- return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
-}
-
-/*
* For radix we should always find H_PAGE_HASHPTE zero. Hence
* the below will work for radix too
*/
@@ -1451,18 +1427,16 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
}
/*
- * Like pmd_huge() and pmd_large(), but works regardless of config options
+ * Like pmd_huge(), but works regardless of config options
*/
-#define pmd_is_leaf pmd_is_leaf
-#define pmd_leaf pmd_is_leaf
-static inline bool pmd_is_leaf(pmd_t pmd)
+#define pmd_leaf pmd_leaf
+static inline bool pmd_leaf(pmd_t pmd)
{
return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
}
-#define pud_is_leaf pud_is_leaf
-#define pud_leaf pud_is_leaf
-static inline bool pud_is_leaf(pud_t pud)
+#define pud_leaf pud_leaf
+static inline bool pud_leaf(pud_t pud)
{
return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
}
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 357e23a403..8f55ff74bb 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -362,8 +362,6 @@ int radix__create_section_mapping(unsigned long start, unsigned long end,
int radix__remove_section_mapping(unsigned long start, unsigned long end);
#endif /* CONFIG_MEMORY_HOTPLUG */
-void radix__kernel_map_pages(struct page *page, int numpages, int enable);
-
#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
#define vmemmap_can_optimize vmemmap_can_optimize
bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap);
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 8765d51583..07a204d210 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -454,6 +454,9 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_ARCH_300 | CPU_FTR_ARCH_31 | \
CPU_FTR_DAWR | CPU_FTR_DAWR1 | \
CPU_FTR_DEXCR_NPHIE)
+
+#define CPU_FTRS_POWER11 CPU_FTRS_POWER10
+
#define CPU_FTRS_CELL (CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -542,19 +545,20 @@ enum {
#define CPU_FTRS_DT_CPU_BASE (~0ul)
#endif
+/* pseries may disable DBELL with ibm,pi-features */
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define CPU_FTRS_ALWAYS \
- (CPU_FTRS_POSSIBLE & ~CPU_FTR_HVMODE & CPU_FTRS_POWER7 & \
- CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER9 & \
- CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_POWER9_DD2_2 & \
+ (CPU_FTRS_POSSIBLE & ~CPU_FTR_HVMODE & ~CPU_FTR_DBELL & \
+ CPU_FTRS_POWER7 & CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & \
+ CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_POWER9_DD2_2 & \
CPU_FTRS_POWER10 & CPU_FTRS_DT_CPU_BASE)
#else
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \
CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \
CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \
- ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & CPU_FTRS_POWER9 & \
- CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_POWER9_DD2_2 & \
+ ~CPU_FTR_HVMODE & ~CPU_FTR_DBELL & CPU_FTRS_POSSIBLE & \
+ CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_POWER9_DD2_2 & \
CPU_FTRS_POWER10 & CPU_FTRS_DT_CPU_BASE)
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
#endif
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index a41e542ba9..7a8495660c 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -524,7 +524,7 @@ long plpar_hcall_norets_notrace(unsigned long opcode, ...);
* Used for all but the craziest of phyp interfaces (see plpar_hcall9)
*/
#define PLPAR_HCALL_BUFSIZE 4
-long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...);
+long plpar_hcall(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL_BUFSIZE], ...);
/**
* plpar_hcall_raw: - Make a hypervisor call without calculating hcall stats
@@ -538,7 +538,7 @@ long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...);
* plpar_hcall, but plpar_hcall_raw works in real mode and does not
* calculate hypervisor call statistics.
*/
-long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
+long plpar_hcall_raw(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL_BUFSIZE], ...);
/**
* plpar_hcall9: - Make a pseries hypervisor call with up to 9 return arguments
@@ -549,8 +549,8 @@ long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
* PLPAR_HCALL9_BUFSIZE to size the return argument buffer.
*/
#define PLPAR_HCALL9_BUFSIZE 9
-long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...);
-long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...);
+long plpar_hcall9(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL9_BUFSIZE], ...);
+long plpar_hcall9_raw(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL9_BUFSIZE], ...);
/* pseries hcall tracing */
extern struct static_key hcall_tracepoint_key;
@@ -570,7 +570,7 @@ struct hvcall_mpp_data {
unsigned long backing_mem;
};
-int h_get_mpp(struct hvcall_mpp_data *);
+long h_get_mpp(struct hvcall_mpp_data *mpp_data);
struct hvcall_mpp_x_data {
unsigned long coalesced_bytes;
diff --git a/arch/powerpc/include/asm/ibmebus.h b/arch/powerpc/include/asm/ibmebus.h
index 6f33253a36..46fe406f46 100644
--- a/arch/powerpc/include/asm/ibmebus.h
+++ b/arch/powerpc/include/asm/ibmebus.h
@@ -48,7 +48,7 @@
struct platform_driver;
-extern struct bus_type ibmebus_bus_type;
+extern const struct bus_type ibmebus_bus_type;
int ibmebus_register_driver(struct platform_driver *drv);
void ibmebus_unregister_driver(struct platform_driver *drv);
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index a4196ab1d0..7b610864b3 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -97,7 +97,7 @@ DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
static inline bool is_implicit_soft_masked(struct pt_regs *regs)
{
- if (regs->msr & MSR_PR)
+ if (user_mode(regs))
return false;
if (regs->nip >= (unsigned long)__end_soft_masked)
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 08c550ed49..ba2e13bb87 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -585,12 +585,12 @@ __do_out_asm(_rec_outl, "stwbrx")
#define __do_inw(port) _rec_inw(port)
#define __do_inl(port) _rec_inl(port)
#else /* CONFIG_PPC32 */
-#define __do_outb(val, port) writeb(val,(PCI_IO_ADDR)_IO_BASE+port);
-#define __do_outw(val, port) writew(val,(PCI_IO_ADDR)_IO_BASE+port);
-#define __do_outl(val, port) writel(val,(PCI_IO_ADDR)_IO_BASE+port);
-#define __do_inb(port) readb((PCI_IO_ADDR)_IO_BASE + port);
-#define __do_inw(port) readw((PCI_IO_ADDR)_IO_BASE + port);
-#define __do_inl(port) readl((PCI_IO_ADDR)_IO_BASE + port);
+#define __do_outb(val, port) writeb(val,(PCI_IO_ADDR)(_IO_BASE+port));
+#define __do_outw(val, port) writew(val,(PCI_IO_ADDR)(_IO_BASE+port));
+#define __do_outl(val, port) writel(val,(PCI_IO_ADDR)(_IO_BASE+port));
+#define __do_inb(port) readb((PCI_IO_ADDR)(_IO_BASE + port));
+#define __do_inw(port) readw((PCI_IO_ADDR)(_IO_BASE + port));
+#define __do_inl(port) readl((PCI_IO_ADDR)(_IO_BASE + port));
#endif /* !CONFIG_PPC32 */
#ifdef CONFIG_EEH
@@ -606,12 +606,12 @@ __do_out_asm(_rec_outl, "stwbrx")
#define __do_writesw(a, b, n) _outsw(PCI_FIX_ADDR(a),(b),(n))
#define __do_writesl(a, b, n) _outsl(PCI_FIX_ADDR(a),(b),(n))
-#define __do_insb(p, b, n) readsb((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
-#define __do_insw(p, b, n) readsw((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
-#define __do_insl(p, b, n) readsl((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
-#define __do_outsb(p, b, n) writesb((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
-#define __do_outsw(p, b, n) writesw((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
-#define __do_outsl(p, b, n) writesl((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+#define __do_insb(p, b, n) readsb((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
+#define __do_insw(p, b, n) readsw((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
+#define __do_insl(p, b, n) readsl((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
+#define __do_outsb(p, b, n) writesb((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
+#define __do_outsw(p, b, n) writesw((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
+#define __do_outsl(p, b, n) writesl((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
#define __do_memset_io(addr, c, n) \
_memset_io(PCI_FIX_ADDR(addr), c, n)
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index e1b43aa121..fdb90e24dc 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -55,59 +55,18 @@
typedef void (*crash_shutdown_t)(void);
#ifdef CONFIG_KEXEC_CORE
-
-/*
- * This function is responsible for capturing register states if coming
- * via panic or invoking dump using sysrq-trigger.
- */
-static inline void crash_setup_regs(struct pt_regs *newregs,
- struct pt_regs *oldregs)
-{
- if (oldregs)
- memcpy(newregs, oldregs, sizeof(*newregs));
- else
- ppc_save_regs(newregs);
-}
+struct kimage;
+struct pt_regs;
extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
master to copy new code to 0 */
-extern int crashing_cpu;
-extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
-extern void crash_ipi_callback(struct pt_regs *);
-extern int crash_wake_offline;
-
-struct kimage;
-struct pt_regs;
extern void default_machine_kexec(struct kimage *image);
-extern void default_machine_crash_shutdown(struct pt_regs *regs);
-extern int crash_shutdown_register(crash_shutdown_t handler);
-extern int crash_shutdown_unregister(crash_shutdown_t handler);
-
-extern void crash_kexec_prepare(void);
-extern void crash_kexec_secondary(struct pt_regs *regs);
-int __init overlaps_crashkernel(unsigned long start, unsigned long size);
-extern void reserve_crashkernel(void);
extern void machine_kexec_mask_interrupts(void);
-static inline bool kdump_in_progress(void)
-{
- return crashing_cpu >= 0;
-}
-
void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer,
unsigned long start_address) __noreturn;
-
void kexec_copy_flush(struct kimage *image);
-#if defined(CONFIG_CRASH_DUMP)
-bool is_kdump_kernel(void);
-#define is_kdump_kernel is_kdump_kernel
-#if defined(CONFIG_PPC_RTAS)
-void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
-#define crash_free_reserved_phys_range crash_free_reserved_phys_range
-#endif /* CONFIG_PPC_RTAS */
-#endif /* CONFIG_CRASH_DUMP */
-
#ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_elf64_ops;
@@ -152,15 +111,56 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
#endif /* CONFIG_KEXEC_FILE */
-#else /* !CONFIG_KEXEC_CORE */
-static inline void crash_kexec_secondary(struct pt_regs *regs) { }
+#endif /* CONFIG_KEXEC_CORE */
+
+#ifdef CONFIG_CRASH_RESERVE
+int __init overlaps_crashkernel(unsigned long start, unsigned long size);
+extern void reserve_crashkernel(void);
+#else
+static inline void reserve_crashkernel(void) {}
+static inline int overlaps_crashkernel(unsigned long start, unsigned long size) { return 0; }
+#endif
-static inline int overlaps_crashkernel(unsigned long start, unsigned long size)
+#if defined(CONFIG_CRASH_DUMP)
+/*
+ * This function is responsible for capturing register states if coming
+ * via panic or invoking dump using sysrq-trigger.
+ */
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
{
- return 0;
+ if (oldregs)
+ memcpy(newregs, oldregs, sizeof(*newregs));
+ else
+ ppc_save_regs(newregs);
+}
+
+extern int crashing_cpu;
+extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
+extern void crash_ipi_callback(struct pt_regs *regs);
+extern int crash_wake_offline;
+
+extern int crash_shutdown_register(crash_shutdown_t handler);
+extern int crash_shutdown_unregister(crash_shutdown_t handler);
+extern void default_machine_crash_shutdown(struct pt_regs *regs);
+
+extern void crash_kexec_prepare(void);
+extern void crash_kexec_secondary(struct pt_regs *regs);
+
+static inline bool kdump_in_progress(void)
+{
+ return crashing_cpu >= 0;
}
-static inline void reserve_crashkernel(void) { ; }
+bool is_kdump_kernel(void);
+#define is_kdump_kernel is_kdump_kernel
+#if defined(CONFIG_PPC_RTAS)
+void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
+#define crash_free_reserved_phys_range crash_free_reserved_phys_range
+#endif /* CONFIG_PPC_RTAS */
+
+#else /* !CONFIG_CRASH_DUMP */
+static inline void crash_kexec_secondary(struct pt_regs *regs) { }
static inline int crash_shutdown_register(crash_shutdown_t handler)
{
@@ -183,7 +183,7 @@ static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
}
-#endif /* CONFIG_KEXEC_CORE */
+#endif /* CONFIG_CRASH_DUMP */
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/kexec.h>
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index d31a5ec155..1862f94335 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -22,6 +22,7 @@ struct pci_host_bridge;
struct machdep_calls {
const char *name;
const char *compatible;
+ const char * const *compatibles;
#ifdef CONFIG_PPC64
#ifdef CONFIG_PM
void (*iommu_restore)(void);
diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h
index 3a07c62973..9203ff6acb 100644
--- a/arch/powerpc/include/asm/macio.h
+++ b/arch/powerpc/include/asm/macio.h
@@ -6,7 +6,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
-extern struct bus_type macio_bus_type;
+extern const struct bus_type macio_bus_type;
/* MacIO device driver is defined later */
struct macio_driver;
@@ -126,7 +126,7 @@ static inline struct pci_dev *macio_get_pci_dev(struct macio_dev *mdev)
struct macio_driver
{
int (*probe)(struct macio_dev* dev, const struct of_device_id *match);
- int (*remove)(struct macio_dev* dev);
+ void (*remove)(struct macio_dev *dev);
int (*suspend)(struct macio_dev* dev, pm_message_t state);
int (*resume)(struct macio_dev* dev);
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index d8b7e246a3..3b72c7ed24 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -133,6 +133,7 @@
#define MMU_FTRS_POWER8 MMU_FTRS_POWER6
#define MMU_FTRS_POWER9 MMU_FTRS_POWER6
#define MMU_FTRS_POWER10 MMU_FTRS_POWER6
+#define MMU_FTRS_POWER11 MMU_FTRS_POWER6
#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
MMU_FTR_CI_LARGE_PAGE
#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
@@ -330,17 +331,10 @@ static __always_inline bool early_radix_enabled(void)
return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
-#ifdef CONFIG_STRICT_KERNEL_RWX
static inline bool strict_kernel_rwx_enabled(void)
{
- return rodata_enabled;
+ return IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && rodata_enabled;
}
-#else
-static inline bool strict_kernel_rwx_enabled(void)
-{
- return false;
-}
-#endif
static inline bool strict_module_rwx_enabled(void)
{
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 58353c5bd3..0c03a98986 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -336,7 +336,7 @@ struct mpic
#endif
};
-extern struct bus_type mpic_subsys;
+extern const struct bus_type mpic_subsys;
/*
* MPIC flags (passed to mpic_alloc)
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index e667d455ec..1d58da9467 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -163,9 +163,7 @@ struct paca_struct {
u64 kstack; /* Saved Kernel stack addr */
u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
u64 saved_msr; /* MSR saved here by enter_rtas */
-#ifdef CONFIG_PPC64
u64 exit_save_r1; /* Syscall/interrupt R1 save */
-#endif
#ifdef CONFIG_PPC_BOOK3E_64
u16 trap_save; /* Used when bad stack is encountered */
#endif
@@ -214,8 +212,6 @@ struct paca_struct {
/* Non-maskable exceptions that are not performance critical */
u64 exnmi[EX_SIZE]; /* used for system reset (nmi) */
u64 exmc[EX_SIZE]; /* used for machine checks */
-#endif
-#ifdef CONFIG_PPC_BOOK3S_64
/* Exclusive stacks for system reset and machine check exception. */
void *nmi_emergency_sp;
void *mc_emergency_sp;
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index e5fcc79b5b..e411e5a70e 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -21,7 +21,7 @@
* page size. When using 64K pages however, whether we are really supporting
* 64K pages in HW or not is irrelevant to those definitions.
*/
-#define PAGE_SHIFT CONFIG_PPC_PAGE_SHIFT
+#define PAGE_SHIFT CONFIG_PAGE_SHIFT
#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 9224f23065..239709a2f6 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -41,6 +41,8 @@ struct mm_struct;
#ifndef __ASSEMBLY__
+#define PFN_PTE_SHIFT PTE_RPN_SHIFT
+
void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pte, unsigned int nr);
#define set_ptes set_ptes
@@ -99,10 +101,6 @@ void poking_init(void);
extern unsigned long ioremap_bot;
extern const pgprot_t protection_map[16];
-#ifndef CONFIG_TRANSPARENT_HUGEPAGE
-#define pmd_large(pmd) 0
-#endif
-
/* can we use this in kvm */
unsigned long vmalloc_to_phys(void *vmalloc_addr);
@@ -180,30 +178,6 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p)
}
#endif
-#ifndef pmd_is_leaf
-#define pmd_is_leaf pmd_is_leaf
-static inline bool pmd_is_leaf(pmd_t pmd)
-{
- return false;
-}
-#endif
-
-#ifndef pud_is_leaf
-#define pud_is_leaf pud_is_leaf
-static inline bool pud_is_leaf(pud_t pud)
-{
- return false;
-}
-#endif
-
-#ifndef p4d_is_leaf
-#define p4d_is_leaf p4d_is_leaf
-static inline bool p4d_is_leaf(p4d_t p4d)
-{
- return false;
-}
-#endif
-
#define pmd_pgtable pmd_pgtable
static inline pgtable_t pmd_pgtable(pmd_t pmd)
{
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 005601243d..076ae60b4a 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -510,6 +510,7 @@
#define PPC_RAW_STB(r, base, i) (0x98000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i))
#define PPC_RAW_LBZ(r, base, i) (0x88000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
#define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
+#define PPC_RAW_LHA(r, base, i) (0xa8000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
#define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
#define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_RAW_LWBRX(r, base, b) (0x7c00042c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
@@ -532,6 +533,7 @@
#define PPC_RAW_MULW(d, a, b) (0x7c0001d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_MULHWU(d, a, b) (0x7c000016 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_MULI(d, a, i) (0x1c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_DIVW(d, a, b) (0x7c0003d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_DIVWU(d, a, b) (0x7c000396 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_DIVDU(d, a, b) (0x7c000392 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_DIVDE(t, a, b) (0x7c000352 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
@@ -550,6 +552,8 @@
#define PPC_RAW_XOR(d, a, b) (0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_XORI(d, a, i) (0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
#define PPC_RAW_XORIS(d, a, i) (0x6c000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_EXTSB(d, a) (0x7c000774 | ___PPC_RA(d) | ___PPC_RS(a))
+#define PPC_RAW_EXTSH(d, a) (0x7c000734 | ___PPC_RA(d) | ___PPC_RS(a))
#define PPC_RAW_EXTSW(d, a) (0x7c0007b4 | ___PPC_RA(d) | ___PPC_RS(a))
#define PPC_RAW_SLW(d, a, s) (0x7c000030 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
#define PPC_RAW_SLD(d, a, s) (0x7c000036 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index e7792aa135..1d1018c1e4 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -201,11 +201,13 @@
#ifdef CONFIG_PPC64_ELF_ABI_V2
#define STK_GOT 24
-#define __STK_PARAM(i) (32 + ((i)-3)*8)
+#define STK_PARAM_AREA 32
#else
#define STK_GOT 40
-#define __STK_PARAM(i) (48 + ((i)-3)*8)
+#define STK_PARAM_AREA 48
#endif
+
+#define __STK_PARAM(i) (STK_PARAM_AREA + ((i)-3)*8)
#define STK_PARAM(i) __STK_PARAM(__REG_##i)
#ifdef CONFIG_PPC64_ELF_ABI_V2
@@ -506,7 +508,25 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
*/
#define DCBT_BOOK3S_STOP_ALL_STREAM_IDS(scratch) \
lis scratch,0x60000000@h; \
- dcbt 0,scratch,0b01010
+ .machine push; \
+ .machine power4; \
+ dcbt 0,scratch,0b01010; \
+ .machine pop;
+
+#define DCBT_SETUP_STREAMS(from, from_parms, to, to_parms, scratch) \
+ lis scratch,0x8000; /* GO=1 */ \
+ clrldi scratch,scratch,32; \
+ .machine push; \
+ .machine power4; \
+ /* setup read stream 0 */ \
+ dcbt 0,from,0b01000; /* addr from */ \
+ dcbt 0,from_parms,0b01010; /* length and depth from */ \
+ /* setup write stream 1 */ \
+ dcbtst 0,to,0b01000; /* addr to */ \
+ dcbtst 0,to_parms,0b01010; /* length and depth to */ \
+ eieio; \
+ dcbt 0,scratch,0b01010; /* all streams GO */ \
+ .machine pop;
/*
* toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index bb47af9054..d3d1aea009 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1366,6 +1366,7 @@
#define PVR_HX_C2000 0x0066
#define PVR_POWER9 0x004E
#define PVR_POWER10 0x0080
+#define PVR_POWER11 0x0082
#define PVR_BE 0x0070
#define PVR_PA6T 0x0090
@@ -1377,6 +1378,7 @@
#define PVR_ARCH_207 0x0f000004
#define PVR_ARCH_300 0x0f000005
#define PVR_ARCH_31 0x0f000006
+#define PVR_ARCH_31_P11 0x0f000007
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h
index 8359c06d92..9893d2001b 100644
--- a/arch/powerpc/include/asm/reg_fsl_emb.h
+++ b/arch/powerpc/include/asm/reg_fsl_emb.h
@@ -11,17 +11,27 @@
#ifndef __ASSEMBLY__
/* Performance Monitor Registers */
-#define mfpmr(rn) ({unsigned int rval; \
- asm volatile(".machine push; " \
- ".machine e300; " \
- "mfpmr %0," __stringify(rn) ";" \
- ".machine pop; " \
- : "=r" (rval)); rval;})
-#define mtpmr(rn, v) asm volatile(".machine push; " \
- ".machine e300; " \
- "mtpmr " __stringify(rn) ",%0; " \
- ".machine pop; " \
- : : "r" (v))
+static __always_inline unsigned int mfpmr(unsigned int rn)
+{
+ unsigned int rval;
+
+ asm (".machine push; "
+ ".machine e300; "
+ "mfpmr %[rval], %[rn];"
+ ".machine pop;"
+ : [rval] "=r" (rval) : [rn] "i" (rn));
+
+ return rval;
+}
+
+static __always_inline void mtpmr(unsigned int rn, unsigned int val)
+{
+ asm (".machine push; "
+ ".machine e300; "
+ "mtpmr %[rn], %[val];"
+ ".machine pop;"
+ : [val] "=r" (val) : [rn] "i" (rn));
+}
#endif /* __ASSEMBLY__ */
/* Freescale Book E Performance Monitor APU Registers */
diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
index 7ebc807aa8..9a025b776a 100644
--- a/arch/powerpc/include/asm/set_memory.h
+++ b/arch/powerpc/include/asm/set_memory.h
@@ -8,6 +8,7 @@
#define SET_MEMORY_X 3
#define SET_MEMORY_NP 4 /* Set memory non present */
#define SET_MEMORY_P 5 /* Set memory present */
+#define SET_MEMORY_ROX 6
int change_memory_attr(unsigned long addr, int numpages, long action);
@@ -41,4 +42,10 @@ static inline int set_memory_p(unsigned long addr, int numpages)
return change_memory_attr(addr, numpages, SET_MEMORY_P);
}
+static inline int set_memory_rox(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_ROX);
+}
+#define set_memory_rox set_memory_rox
+
#endif
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index aaaa576d0e..b77927ccb0 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -27,6 +27,7 @@
extern int boot_cpuid;
extern int boot_cpu_hwid; /* PPC64 only */
+extern int boot_core_hwid;
extern int spinning_secondaries;
extern u32 *cpu_to_phys_id;
extern bool coregroup_enabled;
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index aee25e3ebf..fc933807dd 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -48,6 +48,10 @@ static inline void disable_kernel_fp(void)
#else
static inline void save_fpu(struct task_struct *t) { }
static inline void flush_fp_to_thread(struct task_struct *t) { }
+static inline void enable_kernel_fp(void)
+{
+ BUILD_BUG();
+}
#endif
#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index b3de6102a9..1ca7d4c4b9 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -19,6 +19,8 @@
#include <linux/pagemap.h>
+static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
+ unsigned long address);
#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
#define tlb_flush tlb_flush
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h
index 82cc2c6704..d9ac3a4f46 100644
--- a/arch/powerpc/include/asm/trace.h
+++ b/arch/powerpc/include/asm/trace.h
@@ -267,6 +267,7 @@ TRACE_EVENT_FN(opal_exit,
);
#endif
+#ifdef CONFIG_PPC_64S_HASH_MMU
TRACE_EVENT(hash_fault,
TP_PROTO(unsigned long addr, unsigned long access, unsigned long trap),
@@ -286,7 +287,7 @@ TRACE_EVENT(hash_fault,
TP_printk("hash fault with addr 0x%lx and access = 0x%lx trap = 0x%lx",
__entry->addr, __entry->access, __entry->trap)
);
-
+#endif
TRACE_EVENT(tlbie,
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index de10437fd2..4cba724c88 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -92,9 +92,25 @@ __pu_failed: \
: label)
#endif
+#ifdef CONFIG_CC_IS_CLANG
+#define DS_FORM_CONSTRAINT "Z<>"
+#else
+#define DS_FORM_CONSTRAINT "YZ<>"
+#endif
+
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __put_user_asm2_goto(x, ptr, label) \
__put_user_asm_goto(x, ptr, label, "std")
+#else
+#define __put_user_asm2_goto(x, addr, label) \
+ asm goto ("1: std%U1%X1 %0,%1 # put_user\n" \
+ EX_TABLE(1b, %l2) \
+ : \
+ : "r" (x), DS_FORM_CONSTRAINT (*addr) \
+ : \
+ : label)
+#endif // CONFIG_PPC_KERNEL_PREFIXED
#else /* __powerpc64__ */
#define __put_user_asm2_goto(x, addr, label) \
asm goto( \
diff --git a/arch/powerpc/include/asm/vdso/gettimeofday.h b/arch/powerpc/include/asm/vdso/gettimeofday.h
index f0a4cf01e8..78302f6c25 100644
--- a/arch/powerpc/include/asm/vdso/gettimeofday.h
+++ b/arch/powerpc/include/asm/vdso/gettimeofday.h
@@ -4,7 +4,6 @@
#ifndef __ASSEMBLY__
-#include <asm/page.h>
#include <asm/vdso/timebase.h>
#include <asm/barrier.h>
#include <asm/unistd.h>
@@ -95,7 +94,7 @@ const struct vdso_data *__arch_get_vdso_data(void);
static __always_inline
const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
{
- return (void *)vd + PAGE_SIZE;
+ return (void *)vd + (1U << CONFIG_PAGE_SHIFT);
}
#endif
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
index cc9b787627..6faf2a9317 100644
--- a/arch/powerpc/include/asm/vio.h
+++ b/arch/powerpc/include/asm/vio.h
@@ -39,7 +39,7 @@
*/
#define VIO_CMO_MIN_ENT 1562624
-extern struct bus_type vio_bus_type;
+extern const struct bus_type vio_bus_type;
struct iommu_table;
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
index 30a12d2086..54653a8634 100644
--- a/arch/powerpc/include/asm/word-at-a-time.h
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -4,8 +4,8 @@
/*
* Word-at-a-time interfaces for PowerPC.
*/
-
-#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/wordpart.h>
#include <asm/asm-compat.h>
#include <asm/extable.h>
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 9f18fa090f..1691297a76 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -28,7 +28,6 @@
#define __KVM_HAVE_PPC_SMT
#define __KVM_HAVE_IRQCHIP
#define __KVM_HAVE_IRQ_LINE
-#define __KVM_HAVE_GUEST_DEBUG
/* Not always available, but if it is, this is the correct offset. */
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -733,4 +732,48 @@ struct kvm_ppc_xive_eq {
#define KVM_XIVE_TIMA_PAGE_OFFSET 0
#define KVM_XIVE_ESB_PAGE_OFFSET 4
+/* for KVM_PPC_GET_PVINFO */
+
+#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
+
+struct kvm_ppc_pvinfo {
+ /* out */
+ __u32 flags;
+ __u32 hcall[4];
+ __u8 pad[108];
+};
+
+/* for KVM_PPC_GET_SMMU_INFO */
+#define KVM_PPC_PAGE_SIZES_MAX_SZ 8
+
+struct kvm_ppc_one_page_size {
+ __u32 page_shift; /* Page shift (or 0) */
+ __u32 pte_enc; /* Encoding in the HPTE (>>12) */
+};
+
+struct kvm_ppc_one_seg_page_size {
+ __u32 page_shift; /* Base page shift of segment (or 0) */
+ __u32 slb_enc; /* SLB encoding for BookS */
+ struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
+};
+
+#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
+#define KVM_PPC_1T_SEGMENTS 0x00000002
+#define KVM_PPC_NO_HASH 0x00000004
+
+struct kvm_ppc_smmu_info {
+ __u64 flags;
+ __u32 slb_size;
+ __u16 data_keys; /* # storage keys supported for data */
+ __u16 instr_keys; /* # storage keys supported for instructions */
+ struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
+};
+
+/* for KVM_PPC_RESIZE_HPT_{PREPARE,COMMIT} */
+struct kvm_ppc_resize_hpt {
+ __u64 flags;
+ __u32 shift;
+ __u32 pad;
+};
+
#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 2919433be3..d3282fbea4 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -55,7 +55,6 @@ CFLAGS_btext.o += -DDISABLE_BRANCH_PROFILING
endif
KCSAN_SANITIZE_early_32.o := n
-KCSAN_SANITIZE_early_64.o := n
KCSAN_SANITIZE_cputable.o := n
KCSAN_SANITIZE_btext.o := n
KCSAN_SANITIZE_paca.o := n
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9f14d95b8b..f029755f9e 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -246,9 +246,7 @@ int main(void)
OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
OFFSET(PACA_DSCR_DEFAULT, paca_struct, dscr_default);
-#ifdef CONFIG_PPC64
OFFSET(PACA_EXIT_SAVE_R1, paca_struct, exit_save_r1);
-#endif
#ifdef CONFIG_PPC_BOOK3E_64
OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
#endif
diff --git a/arch/powerpc/kernel/cpu_specs_book3s_64.h b/arch/powerpc/kernel/cpu_specs_book3s_64.h
index 3ff9757df4..98d4274a1b 100644
--- a/arch/powerpc/kernel/cpu_specs_book3s_64.h
+++ b/arch/powerpc/kernel/cpu_specs_book3s_64.h
@@ -60,6 +60,9 @@
PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \
PPC_FEATURE2_VEC_CRYPTO)
+#define COMMON_USER_POWER11 COMMON_USER_POWER10
+#define COMMON_USER2_POWER11 COMMON_USER2_POWER10
+
static struct cpu_spec cpu_specs[] __initdata = {
{ /* PPC970 */
.pvr_mask = 0xffff0000,
@@ -281,6 +284,20 @@ static struct cpu_spec cpu_specs[] __initdata = {
.cpu_restore = __restore_cpu_power10,
.platform = "power10",
},
+ { /* 3.1-compliant processor, i.e. Power11 "architected" mode */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x0f000007,
+ .cpu_name = "Power11 (architected)",
+ .cpu_features = CPU_FTRS_POWER11,
+ .cpu_user_features = COMMON_USER_POWER11,
+ .cpu_user_features2 = COMMON_USER2_POWER11,
+ .mmu_features = MMU_FTRS_POWER11,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power10,
+ .cpu_restore = __restore_cpu_power10,
+ .platform = "power11",
+ },
{ /* Power7 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x003f0000,
@@ -451,6 +468,23 @@ static struct cpu_spec cpu_specs[] __initdata = {
.machine_check_early = __machine_check_early_realmode_p10,
.platform = "power10",
},
+ { /* Power11 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00820000,
+ .cpu_name = "Power11 (raw)",
+ .cpu_features = CPU_FTRS_POWER11,
+ .cpu_user_features = COMMON_USER_POWER11,
+ .cpu_user_features2 = COMMON_USER2_POWER11,
+ .mmu_features = MMU_FTRS_POWER11,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_power10,
+ .cpu_restore = __restore_cpu_power10,
+ .machine_check_early = __machine_check_early_realmode_p10,
+ .platform = "power11",
+ },
{ /* Cell Broadband Engine */
.pvr_mask = 0xffff0000,
.pvr_value = 0x00700000,
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index c3fb9fdf5b..af4263594e 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -458,6 +458,14 @@ static int __init feat_enable_mce_power10(struct dt_cpu_feature *f)
return 1;
}
+static int __init feat_enable_mce_power11(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->platform = "power11";
+ cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p10;
+
+ return 1;
+}
+
static int __init feat_enable_tm(struct dt_cpu_feature *f)
{
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -648,8 +656,10 @@ static struct dt_cpu_feature_match __initdata
{"pc-relative-addressing", feat_enable, 0},
{"machine-check-power9", feat_enable_mce_power9, 0},
{"machine-check-power10", feat_enable_mce_power10, 0},
+ {"machine-check-power11", feat_enable_mce_power11, 0},
{"performance-monitor-power9", feat_enable_pmu_power9, 0},
{"performance-monitor-power10", feat_enable_pmu_power10, 0},
+ {"performance-monitor-power11", feat_enable_pmu_power10, 0},
{"event-based-branch-v3", feat_enable, 0},
{"random-number-generator", feat_enable, 0},
{"system-call-vectored", feat_disable, 0},
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 1185efebf0..29a8c8e185 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1285,15 +1285,14 @@ spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iommu_group *grp = iommu_group_get(dev);
struct iommu_table_group *table_group;
+ struct iommu_group *grp;
/* At first attach the ownership is already set */
- if (!domain) {
- iommu_group_put(grp);
+ if (!domain)
return 0;
- }
+ grp = iommu_group_get(dev);
table_group = iommu_group_get_iommudata(grp);
/*
* The domain being set to PLATFORM from earlier
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 6f7d4edaa0..7504ceec5c 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -284,15 +284,14 @@ static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
void __do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- void *cursp, *irqsp, *sirqsp;
+ void *cursp, *irqsp;
/* Switch to the irq stack to handle this */
cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
irqsp = hardirq_ctx[raw_smp_processor_id()];
- sirqsp = softirq_ctx[raw_smp_processor_id()];
/* Already there ? If not switch stack and call */
- if (unlikely(cursp == irqsp || cursp == sirqsp))
+ if (unlikely(cursp == irqsp))
__do_irq(regs, current_stack_pointer);
else
call_do_irq(regs, irqsp);
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
index 48e0eaf1ad..5c06448519 100644
--- a/arch/powerpc/kernel/isa-bridge.c
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -46,8 +46,8 @@ static void remap_isa_base(phys_addr_t pa, unsigned long size)
WARN_ON_ONCE(size & ~PAGE_MASK);
if (slab_is_available()) {
- if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
- pgprot_noncached(PAGE_KERNEL)))
+ if (vmap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
+ pgprot_noncached(PAGE_KERNEL)))
vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size);
} else {
early_ioremap_range(ISA_IO_BASE, pa, size,
diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
index 072ebe7f29..f8208c0271 100644
--- a/arch/powerpc/kernel/kprobes-ftrace.c
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -21,6 +21,9 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
struct pt_regs *regs;
int bit;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(nip, parent_nip);
if (bit < 0)
return;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index b20ee72e87..bbca90a5e2 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -134,10 +134,16 @@ void *alloc_insn_page(void)
if (!page)
return NULL;
- if (strict_module_rwx_enabled())
- set_memory_rox((unsigned long)page, 1);
+ if (strict_module_rwx_enabled()) {
+ int err = set_memory_rox((unsigned long)page, 1);
+ if (err)
+ goto error;
+ }
return page;
+error:
+ module_memfree(page);
+ return NULL;
}
int arch_prepare_kprobe(struct kprobe *p)
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 77364729a1..cd8d8883de 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -151,6 +151,9 @@ static void __init move_device_tree(void)
* pa-features property is missing, or a 1/0 to indicate if the feature
* is supported/not supported. Note that the bit numbers are
* big-endian to match the definition in PAPR.
+ * Note: the 'clear' flag clears the feature if the bit is set in the
+ * ibm,pa/pi-features property, it does not set the feature if the
+ * bit is clear.
*/
struct ibm_feature {
unsigned long cpu_features; /* CPU_FTR_xxx bit */
@@ -159,7 +162,7 @@ struct ibm_feature {
unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
unsigned char pabyte; /* byte number in ibm,pa/pi-features */
unsigned char pabit; /* bit number (big-endian) */
- unsigned char invert; /* if 1, pa bit set => clear feature */
+ unsigned char clear; /* if 1, pa bit set => clear feature */
};
static struct ibm_feature ibm_pa_features[] __initdata = {
@@ -193,6 +196,7 @@ static struct ibm_feature ibm_pa_features[] __initdata = {
*/
static struct ibm_feature ibm_pi_features[] __initdata = {
{ .pabyte = 0, .pabit = 3, .mmu_features = MMU_FTR_NX_DSI },
+ { .pabyte = 0, .pabit = 4, .cpu_features = CPU_FTR_DBELL, .clear = 1 },
};
static void __init scan_features(unsigned long node, const unsigned char *ftrs,
@@ -220,12 +224,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
if (fp->pabyte >= ftrs[0])
continue;
bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
- if (bit ^ fp->invert) {
+ if (bit && !fp->clear) {
cur_cpu_spec->cpu_features |= fp->cpu_features;
cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
cur_cpu_spec->mmu_features |= fp->mmu_features;
- } else {
+ } else if (bit == fp->clear) {
cur_cpu_spec->cpu_features &= ~fp->cpu_features;
cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
@@ -368,8 +372,6 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
if (found < 0)
return 0;
- DBG("boot cpu: logical %d physical %d\n", found,
- be32_to_cpu(intserv[found_thread]));
boot_cpuid = found;
if (IS_ENABLED(CONFIG_PPC64))
@@ -382,11 +384,19 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
}
if (boot_cpuid >= nr_cpu_ids) {
- set_nr_cpu_ids(min(CONFIG_NR_CPUS, ALIGN(boot_cpuid + 1, nthreads)));
- pr_warn("Boot CPU %d >= nr_cpu_ids, adjusted nr_cpu_ids to %d\n",
- boot_cpuid, nr_cpu_ids);
+ // Remember boot core for smp_setup_cpu_maps()
+ boot_core_hwid = be32_to_cpu(intserv[0]);
+
+ pr_warn("Boot CPU %d (core hwid %d) >= nr_cpu_ids, adjusted boot CPU to %d\n",
+ boot_cpuid, boot_core_hwid, found_thread);
+
+ // Adjust boot CPU to appear on logical core 0
+ boot_cpuid = found_thread;
}
+ DBG("boot cpu: logical %d physical %d\n", boot_cpuid,
+ be32_to_cpu(intserv[found_thread]));
+
/*
* PAPR defines "logical" PVR values for cpus that
* meet various levels of the architecture:
@@ -465,7 +475,7 @@ static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
tce_alloc_end = *lprop;
#endif
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_RESERVE
lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
if (lprop)
crashk_res.start = *lprop;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index e67effdba8..0ef3582853 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -947,7 +947,7 @@ struct option_vector7 {
} __packed;
struct ibm_arch_vec {
- struct { __be32 mask, val; } pvrs[14];
+ struct { __be32 mask, val; } pvrs[16];
u8 num_vectors;
@@ -1008,6 +1008,14 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
.val = cpu_to_be32(0x00800000),
},
{
+ .mask = cpu_to_be32(0xffff0000), /* POWER11 */
+ .val = cpu_to_be32(0x00820000),
+ },
+ {
+ .mask = cpu_to_be32(0xffffffff), /* P11 compliant */
+ .val = cpu_to_be32(0x0f000007),
+ },
+ {
.mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */
.val = cpu_to_be32(0x0f000006),
},
diff --git a/arch/powerpc/kernel/secure_boot.c b/arch/powerpc/kernel/secure_boot.c
index f9af305d95..9e0efb657f 100644
--- a/arch/powerpc/kernel/secure_boot.c
+++ b/arch/powerpc/kernel/secure_boot.c
@@ -32,8 +32,10 @@ bool is_ppc_secureboot_enabled(void)
if (enabled)
goto out;
- if (!of_property_read_u32(of_root, "ibm,secure-boot", &secureboot))
+ node = of_find_node_by_path("/");
+ if (!of_property_read_u32(node, "ibm,secure-boot", &secureboot))
enabled = (secureboot > 1);
+ of_node_put(node);
out:
pr_info("Secure boot mode %s\n", enabled ? "enabled" : "disabled");
@@ -54,8 +56,10 @@ bool is_ppc_trustedboot_enabled(void)
if (enabled)
goto out;
- if (!of_property_read_u32(of_root, "ibm,trusted-boot", &trustedboot))
+ node = of_find_node_by_path("/");
+ if (!of_property_read_u32(node, "ibm,trusted-boot", &trustedboot))
enabled = (trustedboot > 0);
+ of_node_put(node);
out:
pr_info("Trusted boot mode %s\n", enabled ? "enabled" : "disabled");
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 9b142b9d51..01ed1263e1 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -85,6 +85,7 @@ EXPORT_SYMBOL(machine_id);
int boot_cpuid = -1;
EXPORT_SYMBOL_GPL(boot_cpuid);
+int __initdata boot_core_hwid = -1;
#ifdef CONFIG_PPC64
int boot_cpu_hwid = -1;
@@ -109,7 +110,7 @@ int ppc_do_canonicalize_irqs;
EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
#endif
-#ifdef CONFIG_CRASH_CORE
+#ifdef CONFIG_CRASH_DUMP
/* This keeps a track of which one is the crashing cpu. */
int crashing_cpu = -1;
#endif
@@ -411,6 +412,25 @@ static void __init cpu_init_thread_core_maps(int tpc)
u32 *cpu_to_phys_id = NULL;
+static int assign_threads(unsigned int cpu, unsigned int nthreads, bool present,
+ const __be32 *hw_ids)
+{
+ for (int i = 0; i < nthreads && cpu < nr_cpu_ids; i++) {
+ __be32 hwid;
+
+ hwid = be32_to_cpu(hw_ids[i]);
+
+ DBG(" thread %d -> cpu %d (hard id %d)\n", i, cpu, hwid);
+
+ set_cpu_present(cpu, present);
+ set_cpu_possible(cpu, true);
+ cpu_to_phys_id[cpu] = hwid;
+ cpu++;
+ }
+
+ return cpu;
+}
+
/**
* setup_cpu_maps - initialize the following cpu maps:
* cpu_possible_mask
@@ -446,7 +466,7 @@ void __init smp_setup_cpu_maps(void)
for_each_node_by_type(dn, "cpu") {
const __be32 *intserv;
__be32 cpu_be;
- int j, len;
+ int len;
DBG(" * %pOF...\n", dn);
@@ -468,27 +488,31 @@ void __init smp_setup_cpu_maps(void)
nthreads = len / sizeof(int);
- for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
- bool avail;
-
- DBG(" thread %d -> cpu %d (hard id %d)\n",
- j, cpu, be32_to_cpu(intserv[j]));
+ bool avail = of_device_is_available(dn);
+ if (!avail)
+ avail = !of_property_match_string(dn,
+ "enable-method", "spin-table");
- avail = of_device_is_available(dn);
- if (!avail)
- avail = !of_property_match_string(dn,
- "enable-method", "spin-table");
-
- set_cpu_present(cpu, avail);
- set_cpu_possible(cpu, true);
- cpu_to_phys_id[cpu] = be32_to_cpu(intserv[j]);
- cpu++;
- }
+ if (boot_core_hwid >= 0) {
+ if (cpu == 0) {
+ pr_info("Skipping CPU node %pOF to allow for boot core.\n", dn);
+ cpu = nthreads;
+ continue;
+ }
- if (cpu >= nr_cpu_ids) {
+ if (be32_to_cpu(intserv[0]) == boot_core_hwid) {
+ pr_info("Renumbered boot core %pOF to logical 0\n", dn);
+ assign_threads(0, nthreads, avail, intserv);
+ of_node_put(dn);
+ break;
+ }
+ } else if (cpu >= nr_cpu_ids) {
of_node_put(dn);
break;
}
+
+ if (cpu < nr_cpu_ids)
+ cpu = assign_threads(cpu, nthreads, avail, intserv);
}
/* If no SMT supported, nthreads is forced to 1 */
@@ -616,6 +640,8 @@ static __init void probe_machine(void)
DBG(" %s ...\n", machine_id->name);
if (machine_id->compatible && !of_machine_is_compatible(machine_id->compatible))
continue;
+ if (machine_id->compatibles && !of_machine_compatible_match(machine_id->compatibles))
+ continue;
memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls));
if (ppc_md.probe && !ppc_md.probe())
continue;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 693334c20d..12e53b3d79 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -588,7 +588,7 @@ void smp_send_debugger_break(void)
}
#endif
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_DUMP
void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
int cpu;
@@ -631,7 +631,7 @@ void crash_smp_send_stop(void)
stopped = true;
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_DUMP
if (kexec_crash_image) {
crash_kexec_prepare();
return;
@@ -984,7 +984,7 @@ static bool shared_caches __ro_after_init;
/* cpumask of CPUs with asymmetric SMT dependency */
static int powerpc_smt_flags(void)
{
- int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
+ int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
@@ -1010,9 +1010,9 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(splpar_asym_pack);
static int powerpc_shared_cache_flags(void)
{
if (static_branch_unlikely(&splpar_asym_pack))
- return SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING;
+ return SD_SHARE_LLC | SD_ASYM_PACKING;
- return SD_SHARE_PKG_RESOURCES;
+ return SD_SHARE_LLC;
}
static int powerpc_shared_proc_flags(void)
diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c
index 77fedb190c..f6f868e817 100644
--- a/arch/powerpc/kernel/syscall.c
+++ b/arch/powerpc/kernel/syscall.c
@@ -31,7 +31,7 @@ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0)
user_exit_irqoff();
BUG_ON(regs_is_unrecoverable(regs));
- BUG_ON(!(regs->msr & MSR_PR));
+ BUG_ON(!user_mode(regs));
BUG_ON(arch_irq_disabled_regs(regs));
#ifdef CONFIG_PPC_PKEY
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 11e062b47d..f23430adb6 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -404,7 +404,7 @@ noinstr void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
return;
if (!(regs->msr & MSR_HV))
return;
- if (regs->msr & MSR_PR)
+ if (user_mode(regs))
return;
/*
@@ -1510,7 +1510,7 @@ static void do_program_check(struct pt_regs *regs)
if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
bugaddr += PAGE_OFFSET;
- if (!(regs->msr & MSR_PR) && /* not user-mode */
+ if (!user_mode(regs) &&
report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
regs_add_return_ip(regs, 4);
return;
diff --git a/arch/powerpc/kexec/Makefile b/arch/powerpc/kexec/Makefile
index 0c2abe7f99..8e469c4da3 100644
--- a/arch/powerpc/kexec/Makefile
+++ b/arch/powerpc/kexec/Makefile
@@ -3,11 +3,13 @@
# Makefile for the linux kernel.
#
-obj-y += core.o crash.o core_$(BITS).o
+obj-y += core.o core_$(BITS).o
obj-$(CONFIG_PPC32) += relocate_32.o
obj-$(CONFIG_KEXEC_FILE) += file_load.o ranges.o file_load_$(BITS).o elf_$(BITS).o
+obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o
+obj-$(CONFIG_CRASH_DUMP) += crash.o
# Disable GCOV, KCOV & sanitizers in odd or sensitive code
GCOV_PROFILE_core_$(BITS).o := n
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index 27fa9098a5..b8333a49ea 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -44,43 +44,17 @@ void machine_kexec_mask_interrupts(void) {
}
}
+#ifdef CONFIG_CRASH_DUMP
void machine_crash_shutdown(struct pt_regs *regs)
{
default_machine_crash_shutdown(regs);
}
+#endif
void machine_kexec_cleanup(struct kimage *image)
{
}
-void arch_crash_save_vmcoreinfo(void)
-{
-
-#ifdef CONFIG_NUMA
- VMCOREINFO_SYMBOL(node_data);
- VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
-#endif
-#ifndef CONFIG_NUMA
- VMCOREINFO_SYMBOL(contig_page_data);
-#endif
-#if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP)
- VMCOREINFO_SYMBOL(vmemmap_list);
- VMCOREINFO_SYMBOL(mmu_vmemmap_psize);
- VMCOREINFO_SYMBOL(mmu_psize_defs);
- VMCOREINFO_STRUCT_SIZE(vmemmap_backing);
- VMCOREINFO_OFFSET(vmemmap_backing, list);
- VMCOREINFO_OFFSET(vmemmap_backing, phys);
- VMCOREINFO_OFFSET(vmemmap_backing, virt_addr);
- VMCOREINFO_STRUCT_SIZE(mmu_psize_def);
- VMCOREINFO_OFFSET(mmu_psize_def, shift);
-#endif
- VMCOREINFO_SYMBOL(cur_cpu_spec);
- VMCOREINFO_OFFSET(cpu_spec, cpu_features);
- VMCOREINFO_OFFSET(cpu_spec, mmu_features);
- vmcoreinfo_append_str("NUMBER(RADIX_MMU)=%d\n", early_radix_enabled());
- vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
-}
-
/*
* Do not allocate memory (or fail in any way) in machine_kexec().
* We are past the point of no return, committed to rebooting now.
@@ -105,6 +79,7 @@ void machine_kexec(struct kimage *image)
for(;;);
}
+#ifdef CONFIG_CRASH_RESERVE
void __init reserve_crashkernel(void)
{
unsigned long long crash_size, crash_base, total_mem_sz;
@@ -279,3 +254,4 @@ static int __init kexec_setup(void)
return 0;
}
late_initcall(kexec_setup);
+#endif /* CONFIG_CRASH_RESERVE */
diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c
index 904016cf89..6d8951e8e9 100644
--- a/arch/powerpc/kexec/elf_64.c
+++ b/arch/powerpc/kexec/elf_64.c
@@ -47,7 +47,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
if (ret)
return ERR_PTR(ret);
- if (image->type == KEXEC_TYPE_CRASH) {
+ if (IS_ENABLED(CONFIG_CRASH_DUMP) && image->type == KEXEC_TYPE_CRASH) {
/* min & max buffer values for kdump case */
kbuf.buf_min = pbuf.buf_min = crashk_res.start;
kbuf.buf_max = pbuf.buf_max =
@@ -70,7 +70,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
kexec_dprintk("Loaded purgatory at 0x%lx\n", pbuf.mem);
/* Load additional segments needed for panic kernel */
- if (image->type == KEXEC_TYPE_CRASH) {
+ if (IS_ENABLED(CONFIG_CRASH_DUMP) && image->type == KEXEC_TYPE_CRASH) {
ret = load_crashdump_segments_ppc64(image, &kbuf);
if (ret) {
pr_err("Failed to load kdump kernel segments\n");
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
index 5b4c5cb233..1bc65de617 100644
--- a/arch/powerpc/kexec/file_load_64.c
+++ b/arch/powerpc/kexec/file_load_64.c
@@ -97,119 +97,6 @@ out:
}
/**
- * get_usable_memory_ranges - Get usable memory ranges. This list includes
- * regions like crashkernel, opal/rtas & tce-table,
- * that kdump kernel could use.
- * @mem_ranges: Range list to add the memory ranges to.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int get_usable_memory_ranges(struct crash_mem **mem_ranges)
-{
- int ret;
-
- /*
- * Early boot failure observed on guests when low memory (first memory
- * block?) is not added to usable memory. So, add [0, crashk_res.end]
- * instead of [crashk_res.start, crashk_res.end] to workaround it.
- * Also, crashed kernel's memory must be added to reserve map to
- * avoid kdump kernel from using it.
- */
- ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
- if (ret)
- goto out;
-
- ret = add_rtas_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_opal_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_tce_mem_ranges(mem_ranges);
-out:
- if (ret)
- pr_err("Failed to setup usable memory ranges\n");
- return ret;
-}
-
-/**
- * get_crash_memory_ranges - Get crash memory ranges. This list includes
- * first/crashing kernel's memory regions that
- * would be exported via an elfcore.
- * @mem_ranges: Range list to add the memory ranges to.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
-{
- phys_addr_t base, end;
- struct crash_mem *tmem;
- u64 i;
- int ret;
-
- for_each_mem_range(i, &base, &end) {
- u64 size = end - base;
-
- /* Skip backup memory region, which needs a separate entry */
- if (base == BACKUP_SRC_START) {
- if (size > BACKUP_SRC_SIZE) {
- base = BACKUP_SRC_END + 1;
- size -= BACKUP_SRC_SIZE;
- } else
- continue;
- }
-
- ret = add_mem_range(mem_ranges, base, size);
- if (ret)
- goto out;
-
- /* Try merging adjacent ranges before reallocation attempt */
- if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
- sort_memory_ranges(*mem_ranges, true);
- }
-
- /* Reallocate memory ranges if there is no space to split ranges */
- tmem = *mem_ranges;
- if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
- tmem = realloc_mem_ranges(mem_ranges);
- if (!tmem)
- goto out;
- }
-
- /* Exclude crashkernel region */
- ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
- if (ret)
- goto out;
-
- /*
- * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
- * regions are exported to save their context at the time of
- * crash, they should actually be backed up just like the
- * first 64K bytes of memory.
- */
- ret = add_rtas_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_opal_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- /* create a separate program header for the backup region */
- ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
- if (ret)
- goto out;
-
- sort_memory_ranges(*mem_ranges, false);
-out:
- if (ret)
- pr_err("Failed to setup crash memory ranges\n");
- return ret;
-}
-
-/**
* get_reserved_memory_ranges - Get reserve memory ranges. This list includes
* memory regions that should be added to the
* memory reserve map to ensure the region is
@@ -434,6 +321,120 @@ static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
return ret;
}
+#ifdef CONFIG_CRASH_DUMP
+/**
+ * get_usable_memory_ranges - Get usable memory ranges. This list includes
+ * regions like crashkernel, opal/rtas & tce-table,
+ * that kdump kernel could use.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int get_usable_memory_ranges(struct crash_mem **mem_ranges)
+{
+ int ret;
+
+ /*
+ * Early boot failure observed on guests when low memory (first memory
+ * block?) is not added to usable memory. So, add [0, crashk_res.end]
+ * instead of [crashk_res.start, crashk_res.end] to workaround it.
+ * Also, crashed kernel's memory must be added to reserve map to
+ * avoid kdump kernel from using it.
+ */
+ ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
+ if (ret)
+ goto out;
+
+ ret = add_rtas_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_opal_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_tce_mem_ranges(mem_ranges);
+out:
+ if (ret)
+ pr_err("Failed to setup usable memory ranges\n");
+ return ret;
+}
+
+/**
+ * get_crash_memory_ranges - Get crash memory ranges. This list includes
+ * first/crashing kernel's memory regions that
+ * would be exported via an elfcore.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
+{
+ phys_addr_t base, end;
+ struct crash_mem *tmem;
+ u64 i;
+ int ret;
+
+ for_each_mem_range(i, &base, &end) {
+ u64 size = end - base;
+
+ /* Skip backup memory region, which needs a separate entry */
+ if (base == BACKUP_SRC_START) {
+ if (size > BACKUP_SRC_SIZE) {
+ base = BACKUP_SRC_END + 1;
+ size -= BACKUP_SRC_SIZE;
+ } else
+ continue;
+ }
+
+ ret = add_mem_range(mem_ranges, base, size);
+ if (ret)
+ goto out;
+
+ /* Try merging adjacent ranges before reallocation attempt */
+ if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
+ sort_memory_ranges(*mem_ranges, true);
+ }
+
+ /* Reallocate memory ranges if there is no space to split ranges */
+ tmem = *mem_ranges;
+ if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
+ tmem = realloc_mem_ranges(mem_ranges);
+ if (!tmem)
+ goto out;
+ }
+
+ /* Exclude crashkernel region */
+ ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
+ if (ret)
+ goto out;
+
+ /*
+ * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
+ * regions are exported to save their context at the time of
+ * crash, they should actually be backed up just like the
+ * first 64K bytes of memory.
+ */
+ ret = add_rtas_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_opal_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ /* create a separate program header for the backup region */
+ ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
+ if (ret)
+ goto out;
+
+ sort_memory_ranges(*mem_ranges, false);
+out:
+ if (ret)
+ pr_err("Failed to setup crash memory ranges\n");
+ return ret;
+}
+
/**
* check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
* @um_info: Usable memory buffer and ranges info.
@@ -863,6 +864,7 @@ int load_crashdump_segments_ppc64(struct kimage *image,
return 0;
}
+#endif
/**
* setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
@@ -972,26 +974,14 @@ static unsigned int cpu_node_size(void)
return size;
}
-/**
- * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
- * setup FDT for kexec/kdump kernel.
- * @image: kexec image being loaded.
- *
- * Returns the estimated extra size needed for kexec/kdump kernel FDT.
- */
-unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
+static unsigned int kdump_extra_fdt_size_ppc64(struct kimage *image)
{
unsigned int cpu_nodes, extra_size = 0;
struct device_node *dn;
u64 usm_entries;
- // Budget some space for the password blob. There's already extra space
- // for the key name
- if (plpks_is_available())
- extra_size += (unsigned int)plpks_get_passwordlen();
-
- if (image->type != KEXEC_TYPE_CRASH)
- return extra_size;
+ if (!IS_ENABLED(CONFIG_CRASH_DUMP) || image->type != KEXEC_TYPE_CRASH)
+ return 0;
/*
* For kdump kernel, account for linux,usable-memory and
@@ -1020,6 +1010,25 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
}
/**
+ * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
+ * setup FDT for kexec/kdump kernel.
+ * @image: kexec image being loaded.
+ *
+ * Returns the estimated extra size needed for kexec/kdump kernel FDT.
+ */
+unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
+{
+ unsigned int extra_size = 0;
+
+ // Budget some space for the password blob. There's already extra space
+ // for the key name
+ if (plpks_is_available())
+ extra_size += (unsigned int)plpks_get_passwordlen();
+
+ return extra_size + kdump_extra_fdt_size_ppc64(image);
+}
+
+/**
* add_node_props - Reads node properties from device node structure and add
* them to fdt.
* @fdt: Flattened device tree of the kernel
@@ -1171,6 +1180,7 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
struct crash_mem *umem = NULL, *rmem = NULL;
int i, nr_ranges, ret;
+#ifdef CONFIG_CRASH_DUMP
/*
* Restrict memory usage for kdump kernel by setting up
* usable memory ranges and memory reserve map.
@@ -1207,6 +1217,7 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
goto out;
}
}
+#endif
/* Update cpus nodes information to account hotplug CPUs. */
ret = update_cpus_node(fdt);
@@ -1278,7 +1289,7 @@ int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
buf_min = kbuf->buf_min;
buf_max = kbuf->buf_max;
/* Segments for kdump kernel should be within crashkernel region */
- if (kbuf->image->type == KEXEC_TYPE_CRASH) {
+ if (IS_ENABLED(CONFIG_CRASH_DUMP) && kbuf->image->type == KEXEC_TYPE_CRASH) {
buf_min = (buf_min < crashk_res.start ?
crashk_res.start : buf_min);
buf_max = (buf_max > crashk_res.end ?
diff --git a/arch/powerpc/kexec/ranges.c b/arch/powerpc/kexec/ranges.c
index fb3e12f152..33b780049a 100644
--- a/arch/powerpc/kexec/ranges.c
+++ b/arch/powerpc/kexec/ranges.c
@@ -385,14 +385,16 @@ int add_opal_mem_range(struct crash_mem **mem_ranges)
int add_reserved_mem_ranges(struct crash_mem **mem_ranges)
{
int n_mem_addr_cells, n_mem_size_cells, i, len, cells, ret = 0;
+ struct device_node *root = of_find_node_by_path("/");
const __be32 *prop;
- prop = of_get_property(of_root, "reserved-ranges", &len);
+ prop = of_get_property(root, "reserved-ranges", &len);
+ n_mem_addr_cells = of_n_addr_cells(root);
+ n_mem_size_cells = of_n_size_cells(root);
+ of_node_put(root);
if (!prop)
return 0;
- n_mem_addr_cells = of_n_addr_cells(of_root);
- n_mem_size_cells = of_n_size_cells(of_root);
cells = n_mem_addr_cells + n_mem_size_cells;
/* Each reserved range is an (address,size) pair */
diff --git a/arch/powerpc/kexec/relocate_32.S b/arch/powerpc/kexec/relocate_32.S
index d9f0dd9b34..104c9911f4 100644
--- a/arch/powerpc/kexec/relocate_32.S
+++ b/arch/powerpc/kexec/relocate_32.S
@@ -8,6 +8,7 @@
* Author: Suzuki Poulose <suzuki@in.ibm.com>
*/
+#include <linux/objtool.h>
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -349,6 +350,7 @@ write_utlb:
cmpwi r10, PPC47x_TLB0_4K
bne 0f
li r10, 0x1000 /* r10 = 4k */
+ ANNOTATE_INTRA_FUNCTION_CALL
bl 1f
0:
diff --git a/arch/powerpc/kexec/vmcore_info.c b/arch/powerpc/kexec/vmcore_info.c
new file mode 100644
index 0000000000..2b65d2adca
--- /dev/null
+++ b/arch/powerpc/kexec/vmcore_info.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/vmcore_info.h>
+#include <asm/pgalloc.h>
+
+void arch_crash_save_vmcoreinfo(void)
+{
+
+#ifdef CONFIG_NUMA
+ VMCOREINFO_SYMBOL(node_data);
+ VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
+#endif
+#ifndef CONFIG_NUMA
+ VMCOREINFO_SYMBOL(contig_page_data);
+#endif
+#if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP)
+ VMCOREINFO_SYMBOL(vmemmap_list);
+ VMCOREINFO_SYMBOL(mmu_vmemmap_psize);
+ VMCOREINFO_SYMBOL(mmu_psize_defs);
+ VMCOREINFO_STRUCT_SIZE(vmemmap_backing);
+ VMCOREINFO_OFFSET(vmemmap_backing, list);
+ VMCOREINFO_OFFSET(vmemmap_backing, phys);
+ VMCOREINFO_OFFSET(vmemmap_backing, virt_addr);
+ VMCOREINFO_STRUCT_SIZE(mmu_psize_def);
+ VMCOREINFO_OFFSET(mmu_psize_def, shift);
+#endif
+ VMCOREINFO_SYMBOL(cur_cpu_spec);
+ VMCOREINFO_OFFSET(cpu_spec, cpu_features);
+ VMCOREINFO_OFFSET(cpu_spec, mmu_features);
+ vmcoreinfo_append_str("NUMBER(RADIX_MMU)=%d\n", early_radix_enabled());
+ vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
+}
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 074263429f..dbfdc126bf 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -22,7 +22,6 @@ config KVM
select KVM_COMMON
select HAVE_KVM_VCPU_ASYNC_IOCTL
select KVM_VFIO
- select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
config KVM_BOOK3S_HANDLER
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 4a1abb9f7c..408d98f8a5 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -503,7 +503,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
if (!pmd_present(*p))
continue;
- if (pmd_is_leaf(*p)) {
+ if (pmd_leaf(*p)) {
if (full) {
pmd_clear(p);
} else {
@@ -532,7 +532,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
if (!pud_present(*p))
continue;
- if (pud_is_leaf(*p)) {
+ if (pud_leaf(*p)) {
pud_clear(p);
} else {
pmd_t *pmd;
@@ -635,12 +635,12 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
new_pud = pud_alloc_one(kvm->mm, gpa);
pmd = NULL;
- if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
+ if (pud && pud_present(*pud) && !pud_leaf(*pud))
pmd = pmd_offset(pud, gpa);
else if (level <= 1)
new_pmd = kvmppc_pmd_alloc();
- if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
+ if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_leaf(*pmd)))
new_ptep = kvmppc_pte_alloc();
/* Check if we might have been invalidated; let the guest retry if so */
@@ -658,7 +658,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
new_pud = NULL;
}
pud = pud_offset(p4d, gpa);
- if (pud_is_leaf(*pud)) {
+ if (pud_leaf(*pud)) {
unsigned long hgpa = gpa & PUD_MASK;
/* Check if we raced and someone else has set the same thing */
@@ -709,7 +709,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
new_pmd = NULL;
}
pmd = pmd_offset(pud, gpa);
- if (pmd_is_leaf(*pmd)) {
+ if (pmd_leaf(*pmd)) {
unsigned long lgpa = gpa & PMD_MASK;
/* Check if we raced and someone else has set the same thing */
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 0b921704da..692a7c6f5f 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -444,6 +444,7 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
guest_pcr_bit = PCR_ARCH_300;
break;
case PVR_ARCH_31:
+ case PVR_ARCH_31_P11:
guest_pcr_bit = PCR_ARCH_31;
break;
default:
@@ -4856,7 +4857,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
* entering a nested guest in which case the decrementer is now owned
* by L2 and the L1 decrementer is provided in hdec_expires
*/
- if (!kvmhv_is_nestedv2() && kvmppc_core_pending_dec(vcpu) &&
+ if (kvmppc_core_pending_dec(vcpu) &&
((tb < kvmppc_dec_expires_host_tb(vcpu)) ||
(trap == BOOK3S_INTERRUPT_SYSCALL &&
kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 5c375ec1a3..05f5220960 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -55,7 +55,7 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
hr->dawrx1 = vcpu->arch.dawrx1;
}
-/* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
+/* Use noinline_for_stack due to https://llvm.org/pr49610 */
static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
{
unsigned long *addr = (unsigned long *) regs;
diff --git a/arch/powerpc/kvm/book3s_hv_nestedv2.c b/arch/powerpc/kvm/book3s_hv_nestedv2.c
index 8e6f5355f0..1091f7a83b 100644
--- a/arch/powerpc/kvm/book3s_hv_nestedv2.c
+++ b/arch/powerpc/kvm/book3s_hv_nestedv2.c
@@ -71,8 +71,8 @@ gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb,
}
if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) {
- kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
- cfg->vcpu_run_output_cfg);
+ rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
+ cfg->vcpu_run_output_cfg);
if (rc < 0)
return rc;
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 23407fbd73..d32abe7fe6 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -2538,9 +2538,8 @@ void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_
vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
}
-int kvm_arch_create_vm_debugfs(struct kvm *kvm)
+void kvm_arch_create_vm_debugfs(struct kvm *kvm)
{
if (kvm->arch.kvm_ops->create_vm_debugfs)
kvm->arch.kvm_ops->create_vm_debugfs(kvm);
- return 0;
}
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
index a783973f12..07e7cec4d1 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -27,17 +27,7 @@ _GLOBAL(copypage_power7)
#endif
ori r10,r7,1 /* stream=1 */
- lis r8,0x8000 /* GO=1 */
- clrldi r8,r8,32
-
- /* setup read stream 0 */
- dcbt 0,r4,0b01000 /* addr from */
- dcbt 0,r7,0b01010 /* length and depth from */
- /* setup write stream 1 */
- dcbtst 0,r9,0b01000 /* addr to */
- dcbtst 0,r10,0b01010 /* length and depth to */
- eieio
- dcbt 0,r8,0b01010 /* all streams GO */
+ DCBT_SETUP_STREAMS(r4, r7, r9, r10, r8)
#ifdef CONFIG_ALTIVEC
mflr r0
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index ac41053c3a..8474c682a1 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -298,17 +298,7 @@ err1; stb r0,0(r3)
or r7,r7,r0
ori r10,r7,1 /* stream=1 */
- lis r8,0x8000 /* GO=1 */
- clrldi r8,r8,32
-
- /* setup read stream 0 */
- dcbt 0,r6,0b01000 /* addr from */
- dcbt 0,r7,0b01010 /* length and depth from */
- /* setup write stream 1 */
- dcbtst 0,r9,0b01000 /* addr to */
- dcbtst 0,r10,0b01010 /* length and depth to */
- eieio
- dcbt 0,r8,0b01010 /* all streams GO */
+ DCBT_SETUP_STREAMS(r6, r7, r9, r10, r8)
beq cr1,.Lunwind_stack_nonvmx_copy
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 9398b2b746..b7c5e7fca8 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -244,15 +244,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
or r7,r7,r0
ori r10,r7,1 /* stream=1 */
- lis r8,0x8000 /* GO=1 */
- clrldi r8,r8,32
-
- dcbt 0,r6,0b01000
- dcbt 0,r7,0b01010
- dcbtst 0,r9,0b01000
- dcbtst 0,r10,0b01010
- eieio
- dcbt 0,r8,0b01010 /* GO */
+ DCBT_SETUP_STREAMS(r6, r7, r9, r10, r8)
beq cr1,.Lunwind_stack_nonvmx_copy
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 5766180f53..e65f3fb68d 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1429,7 +1429,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 1;
case 18: /* rfid, scary */
- if (regs->msr & MSR_PR)
+ if (user_mode(regs))
goto priv;
op->type = RFI;
return 0;
@@ -1742,13 +1742,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 1;
#endif
case 83: /* mfmsr */
- if (regs->msr & MSR_PR)
+ if (user_mode(regs))
goto priv;
op->type = MFMSR;
op->reg = rd;
return 0;
case 146: /* mtmsr */
- if (regs->msr & MSR_PR)
+ if (user_mode(regs))
goto priv;
op->type = MTMSR;
op->reg = rd;
@@ -1756,7 +1756,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 0;
#ifdef CONFIG_PPC64
case 178: /* mtmsrd */
- if (regs->msr & MSR_PR)
+ if (user_mode(regs))
goto priv;
op->type = MTMSR;
op->reg = rd;
@@ -3437,14 +3437,14 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
* stored in the thread_struct. If the instruction is in
* the kernel, we must not touch the state in the thread_struct.
*/
- if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
+ if (!user_mode(regs) && !(regs->msr & MSR_FP))
return 0;
err = do_fp_load(op, ea, regs, cross_endian);
break;
#endif
#ifdef CONFIG_ALTIVEC
case LOAD_VMX:
- if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
+ if (!user_mode(regs) && !(regs->msr & MSR_VEC))
return 0;
err = do_vec_load(op->reg, ea, size, regs, cross_endian);
break;
@@ -3459,7 +3459,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
*/
if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
msrbit = MSR_VEC;
- if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
+ if (!user_mode(regs) && !(regs->msr & msrbit))
return 0;
err = do_vsx_load(op, ea, regs, cross_endian);
break;
@@ -3495,8 +3495,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
}
#endif
if ((op->type & UPDATE) && size == sizeof(long) &&
- op->reg == 1 && op->update_reg == 1 &&
- !(regs->msr & MSR_PR) &&
+ op->reg == 1 && op->update_reg == 1 && !user_mode(regs) &&
ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
err = handle_stack_update(ea, regs);
break;
@@ -3508,14 +3507,14 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
#ifdef CONFIG_PPC_FPU
case STORE_FP:
- if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
+ if (!user_mode(regs) && !(regs->msr & MSR_FP))
return 0;
err = do_fp_store(op, ea, regs, cross_endian);
break;
#endif
#ifdef CONFIG_ALTIVEC
case STORE_VMX:
- if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
+ if (!user_mode(regs) && !(regs->msr & MSR_VEC))
return 0;
err = do_vec_store(op->reg, ea, size, regs, cross_endian);
break;
@@ -3530,7 +3529,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
*/
if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
msrbit = MSR_VEC;
- if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
+ if (!user_mode(regs) && !(regs->msr & msrbit))
return 0;
err = do_vsx_store(op, ea, regs, cross_endian);
break;
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 5445587bfe..100f999871 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -193,7 +193,7 @@ static bool is_module_segment(unsigned long addr)
return true;
}
-void mmu_mark_initmem_nx(void)
+int mmu_mark_initmem_nx(void)
{
int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
int i;
@@ -230,9 +230,10 @@ void mmu_mark_initmem_nx(void)
mtsr(mfsr(i << 28) | 0x10000000, i << 28);
}
+ return 0;
}
-void mmu_mark_rodata_ro(void)
+int mmu_mark_rodata_ro(void)
{
int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
int i;
@@ -245,6 +246,8 @@ void mmu_mark_rodata_ro(void)
}
update_bats();
+
+ return 0;
}
/*
diff --git a/arch/powerpc/mm/book3s64/hash_hugepage.c b/arch/powerpc/mm/book3s64/hash_hugepage.c
index c0fabe6c5a..15d6f3ea71 100644
--- a/arch/powerpc/mm/book3s64/hash_hugepage.c
+++ b/arch/powerpc/mm/book3s64/hash_hugepage.c
@@ -59,16 +59,13 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
rflags = htab_convert_pte_flags(new_pmd, flags);
-#if 0
- if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
+ /*
+ * THPs are only supported on platforms that can do mixed page size
+ * segments (MPSS) and all such platforms have coherent icache. Hence we
+ * don't need to do lazy icache flush (hash_page_do_lazy_icache()) on
+ * noexecute fault.
+ */
- /*
- * No CPU has hugepages but lacks no execute, so we
- * don't need to worry about that case
- */
- rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
- }
-#endif
/*
* Find the slot index details for this ea, using base page size.
*/
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 0626a25b0d..01c3b4b652 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -2172,7 +2172,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
mmu_kernel_ssize, 0);
}
-void hash__kernel_map_pages(struct page *page, int numpages, int enable)
+int hash__kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long flags, vaddr, lmi;
int i;
@@ -2189,6 +2189,7 @@ void hash__kernel_map_pages(struct page *page, int numpages, int enable)
kernel_unmap_linear_page(vaddr, lmi);
}
local_irq_restore(flags);
+ return 0;
}
#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index d975fb5d7c..2975ea0841 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -113,7 +113,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
assert_spin_locked(pmd_lockptr(mm, pmdp));
- WARN_ON(!(pmd_large(pmd)));
+ WARN_ON(!(pmd_leaf(pmd)));
#endif
trace_hugepage_set_pmd(addr, pmd_val(pmd));
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
@@ -170,6 +170,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
{
unsigned long old_pmd;
+ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return __pmd(old_pmd);
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index c6a4ac766b..15e88f1439 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -204,14 +204,14 @@ static void radix__change_memory_range(unsigned long start, unsigned long end,
pudp = pud_alloc(&init_mm, p4dp, idx);
if (!pudp)
continue;
- if (pud_is_leaf(*pudp)) {
+ if (pud_leaf(*pudp)) {
ptep = (pte_t *)pudp;
goto update_the_pte;
}
pmdp = pmd_alloc(&init_mm, pudp, idx);
if (!pmdp)
continue;
- if (pmd_is_leaf(*pmdp)) {
+ if (pmd_leaf(*pmdp)) {
ptep = pmdp_ptep(pmdp);
goto update_the_pte;
}
@@ -767,7 +767,7 @@ static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
if (!pmd_present(*pmd))
continue;
- if (pmd_is_leaf(*pmd)) {
+ if (pmd_leaf(*pmd)) {
if (IS_ALIGNED(addr, PMD_SIZE) &&
IS_ALIGNED(next, PMD_SIZE)) {
if (!direct)
@@ -807,7 +807,7 @@ static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
if (!pud_present(*pud))
continue;
- if (pud_is_leaf(*pud)) {
+ if (pud_leaf(*pud)) {
if (!IS_ALIGNED(addr, PUD_SIZE) ||
!IS_ALIGNED(next, PUD_SIZE)) {
WARN_ONCE(1, "%s: unaligned range\n", __func__);
@@ -845,7 +845,7 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct,
if (!p4d_present(*p4d))
continue;
- if (p4d_is_leaf(*p4d)) {
+ if (p4d_leaf(*p4d)) {
if (!IS_ALIGNED(addr, P4D_SIZE) ||
!IS_ALIGNED(next, P4D_SIZE)) {
WARN_ONCE(1, "%s: unaligned range\n", __func__);
@@ -924,7 +924,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
unsigned long addr, unsigned long next)
{
- int large = pmd_large(*pmdp);
+ int large = pmd_leaf(*pmdp);
if (large)
vmemmap_verify(pmdp_ptep(pmdp), node, addr, next);
@@ -1339,20 +1339,6 @@ void __ref radix__vmemmap_free(unsigned long start, unsigned long end,
#endif
#endif
-#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
-void radix__kernel_map_pages(struct page *page, int numpages, int enable)
-{
- unsigned long addr;
-
- addr = (unsigned long)page_address(page);
-
- if (enable)
- set_memory_p(addr, numpages);
- else
- set_memory_np(addr, numpages);
-}
-#endif
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
@@ -1554,7 +1540,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
int pud_clear_huge(pud_t *pud)
{
- if (pud_is_leaf(*pud)) {
+ if (pud_leaf(*pud)) {
pud_clear(pud);
return 1;
}
@@ -1601,7 +1587,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
int pmd_clear_huge(pmd_t *pmd)
{
- if (pmd_is_leaf(*pmd)) {
+ if (pmd_leaf(*pmd)) {
pmd_clear(pmd);
return 1;
}
diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
index fde7790277..c110ab8fa8 100644
--- a/arch/powerpc/mm/drmem.c
+++ b/arch/powerpc/mm/drmem.c
@@ -393,17 +393,17 @@ static const __be32 *of_get_usable_memory(struct device_node *dn)
int walk_drmem_lmbs(struct device_node *dn, void *data,
int (*func)(struct drmem_lmb *, const __be32 **, void *))
{
+ struct device_node *root = of_find_node_by_path("/");
const __be32 *prop, *usm;
int ret = -ENODEV;
- if (!of_root)
+ if (!root)
return ret;
/* Get the address & size cells */
- of_node_get(of_root);
- n_root_addr_cells = of_n_addr_cells(of_root);
- n_root_size_cells = of_n_size_cells(of_root);
- of_node_put(of_root);
+ n_root_addr_cells = of_n_addr_cells(root);
+ n_root_size_cells = of_n_size_cells(root);
+ of_node_put(root);
if (init_drmem_lmb_size(dn))
return ret;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0a540b37aa..594a4b7b2c 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -226,7 +226,7 @@ static int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
return 0;
m = phys_to_virt(gpage_freearray[--nr_gpages]);
gpage_freearray[nr_gpages] = 0;
- list_add(&m->list, &huge_boot_pages);
+ list_add(&m->list, &huge_boot_pages[0]);
m->hstate = hstate;
return 1;
}
@@ -614,8 +614,6 @@ void __init gigantic_hugetlb_cma_reserve(void)
*/
order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
- if (order) {
- VM_WARN_ON(order <= MAX_PAGE_ORDER);
+ if (order)
hugetlb_cma_reserve(order);
- }
}
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 72341b9fb5..6949c2c937 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -160,23 +160,17 @@ static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; }
#endif
#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_E500)
-void mmu_mark_initmem_nx(void);
-void mmu_mark_rodata_ro(void);
+int mmu_mark_initmem_nx(void);
+int mmu_mark_rodata_ro(void);
#else
-static inline void mmu_mark_initmem_nx(void) { }
-static inline void mmu_mark_rodata_ro(void) { }
+static inline int mmu_mark_initmem_nx(void) { return 0; }
+static inline int mmu_mark_rodata_ro(void) { return 0; }
#endif
#ifdef CONFIG_PPC_8xx
void __init mmu_mapin_immr(void);
#endif
-#ifdef CONFIG_DEBUG_WX
-void ptdump_check_wx(void);
-#else
-static inline void ptdump_check_wx(void) { }
-#endif
-
static inline bool debug_pagealloc_enabled_or_kfence(void)
{
return IS_ENABLED(CONFIG_KFENCE) || debug_pagealloc_enabled();
@@ -186,3 +180,5 @@ static inline bool debug_pagealloc_enabled_or_kfence(void)
int create_section_mapping(unsigned long start, unsigned long end,
int nid, pgprot_t prot);
#endif
+
+int hash__kernel_map_pages(struct page *page, int numpages, int enable);
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 6be6421086..43d4842bb1 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -119,23 +119,26 @@ void __init mmu_mapin_immr(void)
PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
}
-static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
- pgprot_t prot, bool new)
+static int mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
+ pgprot_t prot, bool new)
{
unsigned long v = PAGE_OFFSET + offset;
unsigned long p = offset;
+ int err = 0;
WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
- for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
- __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
- for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
- __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
- for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K)
- __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
+ for (; p < ALIGN(p, SZ_8M) && p < top && !err; p += SZ_512K, v += SZ_512K)
+ err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
+ for (; p < ALIGN_DOWN(top, SZ_8M) && p < top && !err; p += SZ_8M, v += SZ_8M)
+ err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
+ for (; p < ALIGN_DOWN(top, SZ_512K) && p < top && !err; p += SZ_512K, v += SZ_512K)
+ err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
if (!new)
flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
+
+ return err;
}
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
@@ -166,27 +169,33 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
return top;
}
-void mmu_mark_initmem_nx(void)
+int mmu_mark_initmem_nx(void)
{
unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
unsigned long sinittext = __pa(_sinittext);
unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
+ int err = 0;
if (!debug_pagealloc_enabled_or_kfence())
- mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
+ err = mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
mmu_pin_tlb(block_mapped_ram, false);
+
+ return err;
}
#ifdef CONFIG_STRICT_KERNEL_RWX
-void mmu_mark_rodata_ro(void)
+int mmu_mark_rodata_ro(void)
{
unsigned long sinittext = __pa(_sinittext);
+ int err;
- mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
+ err = mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
if (IS_ENABLED(CONFIG_PIN_TLB_DATA))
mmu_pin_tlb(block_mapped_ram, true);
+
+ return err;
}
#endif
diff --git a/arch/powerpc/mm/nohash/e500.c b/arch/powerpc/mm/nohash/e500.c
index 921c3521ec..266fb22131 100644
--- a/arch/powerpc/mm/nohash/e500.c
+++ b/arch/powerpc/mm/nohash/e500.c
@@ -285,19 +285,23 @@ void __init adjust_total_lowmem(void)
}
#ifdef CONFIG_STRICT_KERNEL_RWX
-void mmu_mark_rodata_ro(void)
+int mmu_mark_rodata_ro(void)
{
unsigned long remapped;
remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false);
- WARN_ON(__max_low_memory != remapped);
+ if (WARN_ON(__max_low_memory != remapped))
+ return -EINVAL;
+
+ return 0;
}
#endif
-void mmu_mark_initmem_nx(void)
+int mmu_mark_initmem_nx(void)
{
/* Everything is done in mmu_mark_rodata_ro() */
+ return 0;
}
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
index b4f2786a7d..cdff129abb 100644
--- a/arch/powerpc/mm/nohash/kaslr_booke.c
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -13,7 +13,7 @@
#include <linux/delay.h>
#include <linux/memblock.h>
#include <linux/libfdt.h>
-#include <linux/crash_core.h>
+#include <linux/crash_reserve.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/cacheflush.h>
@@ -173,7 +173,7 @@ static __init bool overlaps_region(const void *fdt, u32 start,
static void __init get_crash_kernel(void *fdt, unsigned long size)
{
-#ifdef CONFIG_CRASH_CORE
+#ifdef CONFIG_CRASH_RESERVE
unsigned long long crash_size, crash_base;
int ret;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index f6c4ace3b2..a490724e84 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1111,7 +1111,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
static void __init find_possible_nodes(void)
{
- struct device_node *rtas;
+ struct device_node *rtas, *root;
const __be32 *domains = NULL;
int prop_length, max_nodes;
u32 i;
@@ -1132,10 +1132,12 @@ static void __init find_possible_nodes(void)
* If the LPAR is migratable, new nodes might be activated after a LPM,
* so we should consider the max number in that case.
*/
- if (!of_get_property(of_root, "ibm,migratable-partition", NULL))
+ root = of_find_node_by_path("/");
+ if (!of_get_property(root, "ibm,migratable-partition", NULL))
domains = of_get_property(rtas,
"ibm,current-associativity-domains",
&prop_length);
+ of_node_put(root);
if (!domains) {
domains = of_get_property(rtas, "ibm,max-associativity-domains",
&prop_length);
diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
index 6163e484bc..ac22bf2808 100644
--- a/arch/powerpc/mm/pageattr.c
+++ b/arch/powerpc/mm/pageattr.c
@@ -14,6 +14,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <mm/mmu_decl.h>
static pte_basic_t pte_update_delta(pte_t *ptep, unsigned long addr,
unsigned long old, unsigned long new)
@@ -38,6 +39,10 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
/* Don't clear DIRTY bit */
pte_update_delta(ptep, addr, _PAGE_KERNEL_RW & ~_PAGE_DIRTY, _PAGE_KERNEL_RO);
break;
+ case SET_MEMORY_ROX:
+ /* Don't clear DIRTY bit */
+ pte_update_delta(ptep, addr, _PAGE_KERNEL_RW & ~_PAGE_DIRTY, _PAGE_KERNEL_ROX);
+ break;
case SET_MEMORY_RW:
pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_RW);
break;
@@ -97,3 +102,26 @@ int change_memory_attr(unsigned long addr, int numpages, long action)
return apply_to_existing_page_range(&init_mm, start, size,
change_page_attr, (void *)action);
}
+
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
+#ifdef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
+void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ int err;
+ unsigned long addr = (unsigned long)page_address(page);
+
+ if (PageHighMem(page))
+ return;
+
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
+ err = hash__kernel_map_pages(page, numpages, enable);
+ else if (enable)
+ err = set_memory_p(addr, numpages);
+ else
+ err = set_memory_np(addr, numpages);
+
+ if (err)
+ panic("%s: changing memory protections failed\n", __func__);
+}
+#endif
+#endif
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index a04ae4449a..9e7ba9c385 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -220,10 +220,7 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
break;
ptep++;
addr += PAGE_SIZE;
- /*
- * increment the pfn.
- */
- pte = pfn_pte(pte_pfn(pte) + 1, pte_pgprot((pte)));
+ pte = pte_next_pfn(pte);
}
}
@@ -413,7 +410,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
if (p4d_none(p4d))
return NULL;
- if (p4d_is_leaf(p4d)) {
+ if (p4d_leaf(p4d)) {
ret_pte = (pte_t *)p4dp;
goto out;
}
@@ -435,7 +432,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
if (pud_none(pud))
return NULL;
- if (pud_is_leaf(pud)) {
+ if (pud_leaf(pud)) {
ret_pte = (pte_t *)pudp;
goto out;
}
@@ -474,7 +471,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
goto out;
}
- if (pmd_is_leaf(pmd)) {
+ if (pmd_leaf(pmd)) {
ret_pte = (pte_t *)pmdp;
goto out;
}
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 5c02fd08d6..cfd622ebf7 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -130,32 +130,41 @@ void __init mapin_ram(void)
}
}
-void mark_initmem_nx(void)
+static int __mark_initmem_nx(void)
{
unsigned long numpages = PFN_UP((unsigned long)_einittext) -
PFN_DOWN((unsigned long)_sinittext);
+ int err;
- mmu_mark_initmem_nx();
+ err = mmu_mark_initmem_nx();
if (!v_block_mapped((unsigned long)_sinittext)) {
- set_memory_nx((unsigned long)_sinittext, numpages);
- set_memory_rw((unsigned long)_sinittext, numpages);
+ err = set_memory_nx((unsigned long)_sinittext, numpages);
+ if (err)
+ return err;
+ err = set_memory_rw((unsigned long)_sinittext, numpages);
}
+ return err;
+}
+
+void mark_initmem_nx(void)
+{
+ int err = __mark_initmem_nx();
+
+ if (err)
+ panic("%s() failed, err = %d\n", __func__, err);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
-void mark_rodata_ro(void)
+static int __mark_rodata_ro(void)
{
unsigned long numpages;
if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && mmu_has_feature(MMU_FTR_HPTE_TABLE))
pr_warn("This platform has HASH MMU, STRICT_MODULE_RWX won't work\n");
- if (v_block_mapped((unsigned long)_stext + 1)) {
- mmu_mark_rodata_ro();
- ptdump_check_wx();
- return;
- }
+ if (v_block_mapped((unsigned long)_stext + 1))
+ return mmu_mark_rodata_ro();
/*
* mark text and rodata as read only. __end_rodata is set by
@@ -165,24 +174,14 @@ void mark_rodata_ro(void)
numpages = PFN_UP((unsigned long)__end_rodata) -
PFN_DOWN((unsigned long)_stext);
- set_memory_ro((unsigned long)_stext, numpages);
-
- // mark_initmem_nx() should have already run by now
- ptdump_check_wx();
+ return set_memory_ro((unsigned long)_stext, numpages);
}
-#endif
-#if defined(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && defined(CONFIG_DEBUG_PAGEALLOC)
-void __kernel_map_pages(struct page *page, int numpages, int enable)
+void mark_rodata_ro(void)
{
- unsigned long addr = (unsigned long)page_address(page);
+ int err = __mark_rodata_ro();
- if (PageHighMem(page))
- return;
-
- if (enable)
- set_memory_p(addr, numpages);
- else
- set_memory_np(addr, numpages);
+ if (err)
+ panic("%s() failed, err = %d\n", __func__, err);
}
-#endif /* CONFIG_DEBUG_PAGEALLOC */
+#endif
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 5ac1fd3034..9b99113cb5 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -100,7 +100,7 @@ EXPORT_SYMBOL(__pte_frag_size_shift);
/* 4 level page table */
struct page *p4d_page(p4d_t p4d)
{
- if (p4d_is_leaf(p4d)) {
+ if (p4d_leaf(p4d)) {
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
VM_WARN_ON(!p4d_huge(p4d));
return pte_page(p4d_pte(p4d));
@@ -111,7 +111,7 @@ struct page *p4d_page(p4d_t p4d)
struct page *pud_page(pud_t pud)
{
- if (pud_is_leaf(pud)) {
+ if (pud_leaf(pud)) {
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
VM_WARN_ON(!pud_huge(pud));
return pte_page(pud_pte(pud));
@@ -125,14 +125,14 @@ struct page *pud_page(pud_t pud)
*/
struct page *pmd_page(pmd_t pmd)
{
- if (pmd_is_leaf(pmd)) {
+ if (pmd_leaf(pmd)) {
/*
* vmalloc_to_page may be called on any vmap address (not only
* vmalloc), and it uses pmd_page() etc., when huge vmap is
* enabled so these checks can't be used.
*/
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
- VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
+ VM_WARN_ON(!(pmd_leaf(pmd) || pmd_huge(pmd)));
return pte_page(pmd_pte(pmd));
}
return virt_to_page(pmd_page_vaddr(pmd));
@@ -150,9 +150,6 @@ void mark_rodata_ro(void)
radix__mark_rodata_ro();
else
hash__mark_rodata_ro();
-
- // mark_initmem_nx() should have already run by now
- ptdump_check_wx();
}
void mark_initmem_nx(void)
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index 2313053fe6..9dc239967b 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -184,13 +184,14 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
{
pte_t pte = __pte(st->current_flags);
- if (!IS_ENABLED(CONFIG_DEBUG_WX) || !st->check_wx)
+ if (!st->check_wx)
return;
if (!pte_write(pte) || !pte_exec(pte))
return;
- WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n",
+ WARN_ONCE(IS_ENABLED(CONFIG_DEBUG_WX),
+ "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n",
(void *)st->start_address, (void *)st->start_address);
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
@@ -326,8 +327,7 @@ static void __init build_pgtable_complete_mask(void)
pg_level[i].mask |= pg_level[i].flag[j].mask;
}
-#ifdef CONFIG_DEBUG_WX
-void ptdump_check_wx(void)
+bool ptdump_check_wx(void)
{
struct pg_state st = {
.seq = NULL,
@@ -343,15 +343,22 @@ void ptdump_check_wx(void)
}
};
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !mmu_has_feature(MMU_FTR_KERNEL_RO))
+ return true;
+
ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
- if (st.wx_pages)
+ if (st.wx_pages) {
pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
st.wx_pages);
- else
+
+ return false;
+ } else {
pr_info("Checked W+X mappings: passed, no W+X pages found\n");
+
+ return true;
+ }
}
-#endif
static int __init ptdump_init(void)
{
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index 2f39c50ca7..a0c4f1bde8 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -450,10 +450,16 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
}
break;
case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
- EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg));
+ if (off)
+ EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, src_reg));
+ else
+ EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg));
break;
case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
- EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg));
+ if (off)
+ EMIT(PPC_RAW_DIVW(_R0, src2_reg, src_reg));
+ else
+ EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg));
EMIT(PPC_RAW_MULW(_R0, src_reg, _R0));
EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
break;
@@ -467,10 +473,16 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
if (imm == 1) {
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
} else if (is_power_of_2((u32)imm)) {
- EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm)));
+ if (off)
+ EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg, ilog2(imm)));
+ else
+ EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm)));
} else {
PPC_LI32(_R0, imm);
- EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0));
+ if (off)
+ EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, _R0));
+ else
+ EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0));
}
break;
case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
@@ -480,11 +492,19 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
if (!is_power_of_2((u32)imm)) {
bpf_set_seen_register(ctx, tmp_reg);
PPC_LI32(tmp_reg, imm);
- EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg));
+ if (off)
+ EMIT(PPC_RAW_DIVW(_R0, src2_reg, tmp_reg));
+ else
+ EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg));
EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0));
EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
} else if (imm == 1) {
EMIT(PPC_RAW_LI(dst_reg, 0));
+ } else if (off) {
+ EMIT(PPC_RAW_SRAWI(_R0, src2_reg, ilog2(imm)));
+ EMIT(PPC_RAW_ADDZE(_R0, _R0));
+ EMIT(PPC_RAW_SLWI(_R0, _R0, ilog2(imm)));
+ EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
} else {
imm = ilog2((u32)imm);
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - imm, 31));
@@ -497,11 +517,21 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
imm = -imm;
if (!is_power_of_2(imm))
return -EOPNOTSUPP;
- if (imm == 1)
+ if (imm == 1) {
EMIT(PPC_RAW_LI(dst_reg, 0));
- else
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ } else if (off) {
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31));
+ EMIT(PPC_RAW_XOR(dst_reg, src2_reg, dst_reg_h));
+ EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg));
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2(imm), 31));
+ EMIT(PPC_RAW_XOR(dst_reg, dst_reg, dst_reg_h));
+ EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg));
+ EMIT(PPC_RAW_SUBFE(dst_reg_h, dst_reg_h, dst_reg_h));
+ } else {
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - ilog2(imm), 31));
- EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ }
break;
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
if (!imm)
@@ -727,15 +757,30 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
* MOV
*/
case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
- if (dst_reg == src_reg)
- break;
- EMIT(PPC_RAW_MR(dst_reg, src_reg));
- EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
+ if (off == 8) {
+ EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
+ } else if (off == 16) {
+ EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
+ } else if (off == 32 && dst_reg == src_reg) {
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31));
+ } else if (off == 32) {
+ EMIT(PPC_RAW_MR(dst_reg, src_reg));
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31));
+ } else if (dst_reg != src_reg) {
+ EMIT(PPC_RAW_MR(dst_reg, src_reg));
+ EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
+ }
break;
case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
/* special mov32 for zext */
if (imm == 1)
EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ else if (off == 8)
+ EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
+ else if (off == 16)
+ EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
else if (dst_reg != src_reg)
EMIT(PPC_RAW_MR(dst_reg, src_reg));
break;
@@ -751,6 +796,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
* BPF_FROM_BE/LE
*/
case BPF_ALU | BPF_END | BPF_FROM_LE:
+ case BPF_ALU64 | BPF_END | BPF_FROM_LE:
switch (imm) {
case 16:
/* Copy 16 bits to upper part */
@@ -785,6 +831,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg));
break;
}
+ if (BPF_CLASS(code) == BPF_ALU64 && imm != 64)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
break;
case BPF_ALU | BPF_END | BPF_FROM_BE:
switch (imm) {
@@ -852,6 +900,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
/* Get offset into TMP_REG */
EMIT(PPC_RAW_LI(tmp_reg, off));
+ /*
+ * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
+ * before and after the operation.
+ *
+ * This is a requirement in the Linux Kernel Memory Model.
+ * See __cmpxchg_u32() in asm/cmpxchg.h as an example.
+ */
+ if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
tmp_idx = ctx->idx * 4;
/* load value from memory into r0 */
EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
@@ -905,6 +962,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
/* For the BPF_FETCH variant, get old data into src_reg */
if (imm & BPF_FETCH) {
+ /* Emit 'sync' to enforce full ordering */
+ if (IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
EMIT(PPC_RAW_MR(ret_reg, ax_reg));
if (!fp->aux->verifier_zext)
EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
@@ -918,11 +978,17 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
* BPF_LDX
*/
case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEMSX | BPF_B:
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEMSX | BPF_H:
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEMSX | BPF_W:
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
/*
@@ -931,7 +997,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
* load only if addr is kernel address (see is_kernel_addr()), otherwise
* set dst_reg=0 and move on.
*/
- if (BPF_MODE(code) == BPF_PROBE_MEM) {
+ if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) {
PPC_LI32(_R0, TASK_SIZE - off);
EMIT(PPC_RAW_CMPLW(src_reg, _R0));
PPC_BCC_SHORT(COND_GT, (ctx->idx + 4) * 4);
@@ -953,30 +1019,48 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
* as there are two load instructions for dst_reg_h & dst_reg
* respectively.
*/
- if (size == BPF_DW)
+ if (size == BPF_DW ||
+ (size == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX))
PPC_JMP((ctx->idx + 3) * 4);
else
PPC_JMP((ctx->idx + 2) * 4);
}
- switch (size) {
- case BPF_B:
- EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
- break;
- case BPF_H:
- EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
- break;
- case BPF_W:
- EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
- break;
- case BPF_DW:
- EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
- EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
- break;
- }
+ if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) {
+ switch (size) {
+ case BPF_B:
+ EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
+ EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg));
+ break;
+ case BPF_H:
+ EMIT(PPC_RAW_LHA(dst_reg, src_reg, off));
+ break;
+ case BPF_W:
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
+ break;
+ }
+ if (!fp->aux->verifier_zext)
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
- if (size != BPF_DW && !fp->aux->verifier_zext)
- EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ } else {
+ switch (size) {
+ case BPF_B:
+ EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
+ break;
+ case BPF_H:
+ EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
+ break;
+ case BPF_W:
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
+ break;
+ case BPF_DW:
+ EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
+ break;
+ }
+ if (size != BPF_DW && !fp->aux->verifier_zext)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ }
if (BPF_MODE(code) == BPF_PROBE_MEM) {
int insn_idx = ctx->idx - 1;
@@ -1068,6 +1152,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
case BPF_JMP | BPF_JA:
PPC_JMP(addrs[i + 1 + off]);
break;
+ case BPF_JMP32 | BPF_JA:
+ PPC_JMP(addrs[i + 1 + imm]);
+ break;
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JGT | BPF_X:
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 79f23974a3..58522de615 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -202,7 +202,8 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
EMIT(PPC_RAW_BLR());
}
-static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func)
+static int
+bpf_jit_emit_func_call_hlp(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
{
unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
long reladdr;
@@ -211,19 +212,20 @@ static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u
return -EINVAL;
if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
- reladdr = func_addr - CTX_NIA(ctx);
+ reladdr = func_addr - local_paca->kernelbase;
if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
- pr_err("eBPF: address of %ps out of range of pcrel address.\n",
- (void *)func);
+ pr_err("eBPF: address of %ps out of range of 34-bit relative address.\n",
+ (void *)func);
return -ERANGE;
}
- /* pla r12,addr */
- EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
- EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
- EMIT(PPC_RAW_MTCTR(_R12));
- EMIT(PPC_RAW_BCTR());
-
+ EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
+ /* Align for subsequent prefix instruction */
+ if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
+ EMIT(PPC_RAW_NOP());
+ /* paddi r12,r12,addr */
+ EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr));
+ EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr));
} else {
reladdr = func_addr - kernel_toc_addr();
if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
@@ -233,9 +235,9 @@ static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u
EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
- EMIT(PPC_RAW_MTCTR(_R12));
- EMIT(PPC_RAW_BCTRL());
}
+ EMIT(PPC_RAW_MTCTR(_R12));
+ EMIT(PPC_RAW_BCTRL());
return 0;
}
@@ -285,7 +287,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
int b2p_index = bpf_to_ppc(BPF_REG_3);
int bpf_tailcall_prologue_size = 8;
- if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
+ if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
bpf_tailcall_prologue_size += 4; /* skip past the toc load */
/*
@@ -803,6 +805,15 @@ emit_clear:
/* Get offset into TMP_REG_1 */
EMIT(PPC_RAW_LI(tmp1_reg, off));
+ /*
+ * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
+ * before and after the operation.
+ *
+ * This is a requirement in the Linux Kernel Memory Model.
+ * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
+ */
+ if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
tmp_idx = ctx->idx * 4;
/* load value from memory into TMP_REG_2 */
if (size == BPF_DW)
@@ -865,6 +876,9 @@ emit_clear:
PPC_BCC_SHORT(COND_NE, tmp_idx);
if (imm & BPF_FETCH) {
+ /* Emit 'sync' to enforce full ordering */
+ if (IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
EMIT(PPC_RAW_MR(ret_reg, _R0));
/*
* Skip unnecessary zero-extension for 32-bit cmpxchg.
@@ -993,7 +1007,7 @@ emit_clear:
return ret;
if (func_addr_fixed)
- ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
+ ret = bpf_jit_emit_func_call_hlp(image, fimage, ctx, func_addr);
else
ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index b7ff680cde..6b5f8a94e7 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -256,7 +256,7 @@ static bool regs_sipr(struct pt_regs *regs)
static inline u32 perf_flags_from_msr(struct pt_regs *regs)
{
- if (regs->msr & MSR_PR)
+ if (user_mode(regs))
return PERF_RECORD_MISC_USER;
if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV)
return PERF_RECORD_MISC_HYPERVISOR;
@@ -2593,6 +2593,8 @@ static int __init init_ppc64_pmu(void)
return 0;
else if (!init_power10_pmu())
return 0;
+ else if (!init_power11_pmu())
+ return 0;
else if (!init_ppc970_pmu())
return 0;
else
diff --git a/arch/powerpc/perf/internal.h b/arch/powerpc/perf/internal.h
index 4c18b55043..a70ac471a5 100644
--- a/arch/powerpc/perf/internal.h
+++ b/arch/powerpc/perf/internal.h
@@ -10,4 +10,5 @@ int __init init_power7_pmu(void);
int __init init_power8_pmu(void);
int __init init_power9_pmu(void);
int __init init_power10_pmu(void);
+int __init init_power11_pmu(void);
int __init init_generic_compat_pmu(void);
diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c
index 9b5133e361..62a68b6b2d 100644
--- a/arch/powerpc/perf/power10-pmu.c
+++ b/arch/powerpc/perf/power10-pmu.c
@@ -634,3 +634,30 @@ int __init init_power10_pmu(void)
return 0;
}
+
+static struct power_pmu power11_pmu;
+
+int __init init_power11_pmu(void)
+{
+ unsigned int pvr;
+ int rc;
+
+ pvr = mfspr(SPRN_PVR);
+ if (PVR_VER(pvr) != PVR_POWER11)
+ return -ENODEV;
+
+ /* Set the PERF_REG_EXTENDED_MASK here */
+ PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_31;
+
+ power11_pmu = power10_pmu;
+ power11_pmu.name = "Power11";
+
+ rc = register_power_pmu(&power11_pmu);
+ if (rc)
+ return rc;
+
+ /* Tell userspace that EBB is supported */
+ cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
+
+ return 0;
+}
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
index e454e9d2ef..294ab27285 100644
--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -59,16 +59,13 @@ static const char * const board[] __initconst = {
static int __init ppc40x_probe(void)
{
- if (of_device_compatible_match(of_root, board)) {
- pci_set_flags(PCI_REASSIGN_ALL_RSRC);
- return 1;
- }
-
- return 0;
+ pci_set_flags(PCI_REASSIGN_ALL_RSRC);
+ return 1;
}
define_machine(ppc40x_simple) {
.name = "PowerPC 40x Platform",
+ .compatibles = board,
.probe = ppc40x_probe,
.progress = udbg_progress,
.init_IRQ = uic_init_tree,
diff --git a/arch/powerpc/platforms/512x/mpc512x_generic.c b/arch/powerpc/platforms/512x/mpc512x_generic.c
index 0d58ab257c..d4fa6c302c 100644
--- a/arch/powerpc/platforms/512x/mpc512x_generic.c
+++ b/arch/powerpc/platforms/512x/mpc512x_generic.c
@@ -32,9 +32,6 @@ static const char * const board[] __initconst = {
*/
static int __init mpc512x_generic_probe(void)
{
- if (!of_device_compatible_match(of_root, board))
- return 0;
-
mpc512x_init_early();
return 1;
@@ -42,6 +39,7 @@ static int __init mpc512x_generic_probe(void)
define_machine(mpc512x_generic) {
.name = "MPC512x generic",
+ .compatibles = board,
.probe = mpc512x_generic_probe,
.init = mpc512x_init,
.setup_arch = mpc512x_setup_arch,
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index aa82e6b437..37a67120f2 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -195,8 +195,10 @@ static void __init efika_setup_arch(void)
static int __init efika_probe(void)
{
- const char *model = of_get_property(of_root, "model", NULL);
+ struct device_node *root = of_find_node_by_path("/");
+ const char *model = of_get_property(root, "model", NULL);
+ of_node_put(root);
if (model == NULL)
return 0;
if (strcmp(model, "EFIKA5K2"))
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
index 0fd67b3ffc..0a161d82a3 100644
--- a/arch/powerpc/platforms/52xx/lite5200.c
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -172,17 +172,9 @@ static const char * const board[] __initconst = {
NULL,
};
-/*
- * Called very early, MMU is off, device-tree isn't unflattened
- */
-static int __init lite5200_probe(void)
-{
- return of_device_compatible_match(of_root, board);
-}
-
define_machine(lite5200) {
.name = "lite5200",
- .probe = lite5200_probe,
+ .compatibles = board,
.setup_arch = lite5200_setup_arch,
.discover_phbs = mpc52xx_setup_pci,
.init = mpc52xx_declare_of_platform_devices,
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
index f1e85e86f5..7e0e4c34a4 100644
--- a/arch/powerpc/platforms/52xx/mpc5200_simple.c
+++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
@@ -59,17 +59,9 @@ static const char *board[] __initdata = {
NULL
};
-/*
- * Called very early, MMU is off, device-tree isn't unflattened
- */
-static int __init mpc5200_simple_probe(void)
-{
- return of_device_compatible_match(of_root, board);
-}
-
define_machine(mpc5200_simple_platform) {
.name = "mpc5200-simple-platform",
- .probe = mpc5200_simple_probe,
+ .compatibles = board,
.setup_arch = mpc5200_simple_setup_arch,
.discover_phbs = mpc52xx_setup_pci,
.init = mpc52xx_declare_of_platform_devices,
diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
index 534bb22748..63b6d21372 100644
--- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
@@ -34,19 +34,11 @@ static const char *board[] __initdata = {
NULL
};
-/*
- * Called very early, MMU is off, device-tree isn't unflattened
- */
-static int __init mpc830x_rdb_probe(void)
-{
- return of_device_compatible_match(of_root, board);
-}
-
machine_device_initcall(mpc830x_rdb, mpc83xx_declare_of_platform_devices);
define_machine(mpc830x_rdb) {
.name = "MPC830x RDB",
- .probe = mpc830x_rdb_probe,
+ .compatibles = board,
.setup_arch = mpc830x_rdb_setup_arch,
.discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index 7b901ab3b8..5c39966762 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -34,19 +34,11 @@ static const char *board[] __initdata = {
NULL
};
-/*
- * Called very early, MMU is off, device-tree isn't unflattened
- */
-static int __init mpc831x_rdb_probe(void)
-{
- return of_device_compatible_match(of_root, board);
-}
-
machine_device_initcall(mpc831x_rdb, mpc83xx_declare_of_platform_devices);
define_machine(mpc831x_rdb) {
.name = "MPC831x RDB",
- .probe = mpc831x_rdb_probe,
+ .compatibles = board,
.setup_arch = mpc831x_rdb_setup_arch,
.discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index 39e78018dd..45823e1479 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -61,17 +61,9 @@ static const char * const board[] __initconst = {
NULL
};
-/*
- * Called very early, MMU is off, device-tree isn't unflattened
- */
-static int __init mpc837x_rdb_probe(void)
-{
- return of_device_compatible_match(of_root, board);
-}
-
define_machine(mpc837x_rdb) {
.name = "MPC837x RDB/WLAN",
- .probe = mpc837x_rdb_probe,
+ .compatibles = board,
.setup_arch = mpc837x_rdb_setup_arch,
.discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index c9664e46b0..99bd4355f2 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -206,7 +206,8 @@ static int mpc83xx_suspend_enter(suspend_state_t state)
out_be32(&pmc_regs->config1,
in_be32(&pmc_regs->config1) | PMCCR1_POWER_OFF);
- enable_kernel_fp();
+ if (IS_ENABLED(CONFIG_PPC_FPU))
+ enable_kernel_fp();
mpc83xx_enter_deep_sleep(immrbase);
diff --git a/arch/powerpc/platforms/85xx/bsc913x_qds.c b/arch/powerpc/platforms/85xx/bsc913x_qds.c
index 2eb62bff86..3ad8096fcf 100644
--- a/arch/powerpc/platforms/85xx/bsc913x_qds.c
+++ b/arch/powerpc/platforms/85xx/bsc913x_qds.c
@@ -19,7 +19,7 @@
#include "mpc85xx.h"
#include "smp.h"
-void __init bsc913x_qds_pic_init(void)
+static void __init bsc913x_qds_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
diff --git a/arch/powerpc/platforms/85xx/bsc913x_rdb.c b/arch/powerpc/platforms/85xx/bsc913x_rdb.c
index 161f006cb3..dcd358c282 100644
--- a/arch/powerpc/platforms/85xx/bsc913x_rdb.c
+++ b/arch/powerpc/platforms/85xx/bsc913x_rdb.c
@@ -15,7 +15,7 @@
#include "mpc85xx.h"
-void __init bsc913x_rdb_pic_init(void)
+static void __init bsc913x_rdb_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 645fcca77c..c44400e95f 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -149,7 +149,7 @@ static int __init corenet_generic_probe(void)
extern struct smp_ops_t smp_85xx_ops;
#endif
- if (of_device_compatible_match(of_root, boards))
+ if (of_machine_compatible_match(boards))
return 1;
/* Check if we're running under the Freescale hypervisor */
diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c
index 9c3b44a195..477852f1a7 100644
--- a/arch/powerpc/platforms/85xx/ge_imp3a.c
+++ b/arch/powerpc/platforms/85xx/ge_imp3a.c
@@ -38,7 +38,7 @@
void __iomem *imp3a_regs;
-void __init ge_imp3a_pic_init(void)
+static void __init ge_imp3a_pic_init(void)
{
struct mpic *mpic;
struct device_node *np;
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index 751395cbf0..34ce21f426 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -114,7 +114,7 @@ static int gpio_halt_probe(struct platform_device *pdev)
return ret;
}
-static int gpio_halt_remove(struct platform_device *pdev)
+static void gpio_halt_remove(struct platform_device *pdev)
{
free_irq(halt_irq, pdev);
cancel_work_sync(&gpio_halt_wq);
@@ -124,8 +124,6 @@ static int gpio_halt_remove(struct platform_device *pdev)
gpiod_put(halt_gpio);
halt_gpio = NULL;
-
- return 0;
}
static const struct of_device_id gpio_halt_match[] = {
@@ -145,7 +143,7 @@ static struct platform_driver gpio_halt_driver = {
.of_match_table = gpio_halt_match,
},
.probe = gpio_halt_probe,
- .remove = gpio_halt_remove,
+ .remove_new = gpio_halt_remove,
};
module_platform_driver(gpio_halt_driver);
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 40aa582068..e52b848b64 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -398,6 +398,7 @@ static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
hard_irq_disable();
mpic_teardown_this_cpu(secondary);
+#ifdef CONFIG_CRASH_DUMP
if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) {
/*
* We enter the crash kernel on whatever cpu crashed,
@@ -406,9 +407,11 @@ static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
*/
disable_threadbit = 1;
disable_cpu = cpu_first_thread_sibling(cpu);
- } else if (sibling != crashing_cpu &&
- cpu_thread_in_core(cpu) == 0 &&
- cpu_thread_in_core(sibling) != 0) {
+ } else if (sibling == crashing_cpu) {
+ return;
+ }
+#endif
+ if (cpu_thread_in_core(cpu) == 0 && cpu_thread_in_core(sibling) != 0) {
disable_threadbit = 2;
disable_cpu = sibling;
}
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index 6be1b9809d..f74d446c53 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -112,17 +112,9 @@ static const char * const board[] __initconst = {
NULL
};
-/*
- * Called very early, device-tree isn't unflattened
- */
-static int __init tqm85xx_probe(void)
-{
- return of_device_compatible_match(of_root, board);
-}
-
define_machine(tqm85xx) {
.name = "TQM85xx",
- .probe = tqm85xx_probe,
+ .compatibles = board,
.setup_arch = tqm85xx_setup_arch,
.init_IRQ = tqm85xx_pic_init,
.show_cpuinfo = tqm85xx_show_cpuinfo,
diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c
index 6c6e714a75..2c8dc08869 100644
--- a/arch/powerpc/platforms/amigaone/setup.c
+++ b/arch/powerpc/platforms/amigaone/setup.c
@@ -25,7 +25,7 @@
extern void __flush_disable_L1(void);
-void amigaone_show_cpuinfo(struct seq_file *m)
+static void amigaone_show_cpuinfo(struct seq_file *m)
{
seq_printf(m, "vendor\t\t: Eyetech Ltd.\n");
}
@@ -65,7 +65,7 @@ static int __init amigaone_add_bridge(struct device_node *dev)
return 0;
}
-void __init amigaone_setup_arch(void)
+static void __init amigaone_setup_arch(void)
{
if (ppc_md.progress)
ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0);
@@ -83,7 +83,7 @@ static void __init amigaone_discover_phbs(void)
BUG_ON(phb != 0);
}
-void __init amigaone_init_IRQ(void)
+static void __init amigaone_init_IRQ(void)
{
struct device_node *pic, *np = NULL;
const unsigned long *prop = NULL;
@@ -123,7 +123,7 @@ static int __init request_isa_regions(void)
}
machine_device_initcall(amigaone, request_isa_regions);
-void __noreturn amigaone_restart(char *cmd)
+static void __noreturn amigaone_restart(char *cmd)
{
local_irq_disable();
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c
index fd130fe7a6..4e983af329 100644
--- a/arch/powerpc/platforms/pasemi/gpio_mdio.c
+++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c
@@ -260,7 +260,7 @@ out:
}
-static int gpio_mdio_remove(struct platform_device *dev)
+static void gpio_mdio_remove(struct platform_device *dev)
{
struct mii_bus *bus = dev_get_drvdata(&dev->dev);
@@ -271,8 +271,6 @@ static int gpio_mdio_remove(struct platform_device *dev)
kfree(bus->priv);
bus->priv = NULL;
mdiobus_free(bus);
-
- return 0;
}
static const struct of_device_id gpio_mdio_match[] =
@@ -287,7 +285,7 @@ MODULE_DEVICE_TABLE(of, gpio_mdio_match);
static struct platform_driver gpio_mdio_driver =
{
.probe = gpio_mdio_probe,
- .remove = gpio_mdio_remove,
+ .remove_new = gpio_mdio_remove,
.driver = {
.name = "gpio-mdio-bitbang",
.of_match_table = gpio_mdio_match,
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index f27d314147..60f990a336 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -270,16 +270,18 @@ static int __init pas_add_bridge(struct device_node *dev)
void __init pas_pci_init(void)
{
+ struct device_node *root = of_find_node_by_path("/");
struct device_node *np;
int res;
pci_set_flags(PCI_SCAN_ALL_PCIE_DEVS);
- np = of_find_compatible_node(of_root, NULL, "pasemi,rootbus");
+ np = of_find_compatible_node(root, NULL, "pasemi,rootbus");
if (np) {
res = pas_add_bridge(np);
of_node_put(np);
}
+ of_node_put(root);
}
void __iomem *__init pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset)
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
index aeb79a8b3e..12bc01353b 100644
--- a/arch/powerpc/platforms/powermac/backlight.c
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -9,7 +9,6 @@
*/
#include <linux/kernel.h>
-#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/adb.h>
#include <linux/pmu.h>
@@ -72,31 +71,6 @@ int pmac_has_backlight_type(const char *type)
return 0;
}
-int pmac_backlight_curve_lookup(struct fb_info *info, int value)
-{
- int level = (FB_BACKLIGHT_LEVELS - 1);
-
- if (info && info->bl_dev) {
- int i, max = 0;
-
- /* Look for biggest value */
- for (i = 0; i < FB_BACKLIGHT_LEVELS; i++)
- max = max((int)info->bl_curve[i], max);
-
- /* Look for nearest value */
- for (i = 0; i < FB_BACKLIGHT_LEVELS; i++) {
- int diff = abs(info->bl_curve[i] - value);
- if (diff < max) {
- max = diff;
- level = i;
- }
- }
-
- }
-
- return level;
-}
-
static void pmac_backlight_key_worker(struct work_struct *work)
{
if (atomic_read(&kernel_backlight_disabled))
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index 81c9fbae88..2cc257f75c 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -2333,7 +2333,6 @@ static struct pmac_mb_def pmac_mb_defs[] = {
PMAC_TYPE_POWERMAC_G5, g5_features,
0,
},
-#ifdef CONFIG_PPC64
{ "PowerMac7,3", "PowerMac G5",
PMAC_TYPE_POWERMAC_G5, g5_features,
0,
@@ -2359,7 +2358,6 @@ static struct pmac_mb_def pmac_mb_defs[] = {
0,
},
#endif /* CONFIG_PPC64 */
-#endif /* CONFIG_PPC64 */
};
/*
diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c
index bb7657115f..c9a9b759cc 100644
--- a/arch/powerpc/platforms/powernv/opal-core.c
+++ b/arch/powerpc/platforms/powernv/opal-core.c
@@ -16,7 +16,7 @@
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
-#include <linux/crash_core.h>
+#include <linux/vmcore_info.h>
#include <linux/of.h>
#include <asm/page.h>
diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
index b66b06efce..24f04f20d3 100644
--- a/arch/powerpc/platforms/powernv/opal-prd.c
+++ b/arch/powerpc/platforms/powernv/opal-prd.c
@@ -425,12 +425,11 @@ static int opal_prd_probe(struct platform_device *pdev)
return 0;
}
-static int opal_prd_remove(struct platform_device *pdev)
+static void opal_prd_remove(struct platform_device *pdev)
{
misc_deregister(&opal_prd_dev);
opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb);
opal_message_notifier_unregister(OPAL_MSG_PRD2, &opal_prd_event_nb2);
- return 0;
}
static const struct of_device_id opal_prd_match[] = {
@@ -444,7 +443,7 @@ static struct platform_driver opal_prd_driver = {
.of_match_table = opal_prd_match,
},
.probe = opal_prd_probe,
- .remove = opal_prd_remove,
+ .remove_new = opal_prd_remove,
};
module_platform_driver(opal_prd_driver);
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 9e1a25398f..8f14f0581a 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -434,7 +434,7 @@ void __init pnv_smp_init(void)
smp_ops = &pnv_smp_ops;
#ifdef CONFIG_HOTPLUG_CPU
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_DUMP
crash_wake_offline = 1;
#endif
#endif
diff --git a/arch/powerpc/platforms/ps3/hvcall.S b/arch/powerpc/platforms/ps3/hvcall.S
index 509e30ad01..e8ab3d6b03 100644
--- a/arch/powerpc/platforms/ps3/hvcall.S
+++ b/arch/powerpc/platforms/ps3/hvcall.S
@@ -9,6 +9,7 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
+#include <asm/ptrace.h>
#define lv1call .long 0x44000022; extsw r3, r3
@@ -16,12 +17,14 @@
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
+ stdu r1, -STACK_FRAME_MIN_SIZE(r1); \
li r11, API_NUMBER; \
lv1call; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE; \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -38,18 +41,19 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
- stdu r3, -8(r1); \
+ std r3, -8(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-8(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 8; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+8; \
ld r11, -8(r1); \
std r4, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -57,21 +61,22 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r3, -8(r1); \
- stdu r4, -16(r1); \
+ std r4, -16(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-16(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 16; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+16; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
std r5, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -79,16 +84,17 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r3, -8(r1); \
std r4, -16(r1); \
- stdu r5, -24(r1); \
+ std r5, -24(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-24(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 24; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+24; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
@@ -96,7 +102,7 @@ _GLOBAL(_##API_NAME) \
ld r11, -24(r1); \
std r6, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -104,7 +110,7 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r3, -8(r1); \
std r4, -16(r1); \
@@ -112,12 +118,13 @@ _GLOBAL(_##API_NAME) \
std r6, -32(r1); \
std r7, -40(r1); \
std r8, -48(r1); \
- stdu r9, -56(r1); \
+ std r9, -56(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-56(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 56; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+56; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
@@ -133,7 +140,7 @@ _GLOBAL(_##API_NAME) \
ld r11, -56(r1); \
std r10, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -141,18 +148,19 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
- stdu r4, -8(r1); \
+ std r4, -8(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-8(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 8; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+8; \
ld r11, -8(r1); \
std r4, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -160,21 +168,22 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r4, -8(r1); \
- stdu r5, -16(r1); \
+ std r5, -16(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-16(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 16; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+16; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
std r5, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -182,16 +191,17 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r4, -8(r1); \
std r5, -16(r1); \
- stdu r6, -24(r1); \
+ std r6, -24(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-24(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 24; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+24; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
@@ -199,7 +209,7 @@ _GLOBAL(_##API_NAME) \
ld r11, -24(r1); \
std r6, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -207,17 +217,18 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r4, -8(r1); \
std r5, -16(r1); \
std r6, -24(r1); \
- stdu r7, -32(r1); \
+ std r7, -32(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-32(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 32; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+32; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
@@ -227,7 +238,7 @@ _GLOBAL(_##API_NAME) \
ld r11, -32(r1); \
std r7, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -235,18 +246,19 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r4, -8(r1); \
std r5, -16(r1); \
std r6, -24(r1); \
std r7, -32(r1); \
- stdu r8, -40(r1); \
+ std r8, -40(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-40(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 40; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+40; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
@@ -258,7 +270,7 @@ _GLOBAL(_##API_NAME) \
ld r11, -40(r1); \
std r8, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -266,19 +278,20 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r4, -8(r1); \
std r5, -16(r1); \
std r6, -24(r1); \
std r7, -32(r1); \
std r8, -40(r1); \
- stdu r9, -48(r1); \
+ std r9, -48(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-48(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 48; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+48; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
@@ -292,7 +305,7 @@ _GLOBAL(_##API_NAME) \
ld r11, -48(r1); \
std r9, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -300,7 +313,7 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r4, -8(r1); \
std r5, -16(r1); \
@@ -308,12 +321,13 @@ _GLOBAL(_##API_NAME) \
std r7, -32(r1); \
std r8, -40(r1); \
std r9, -48(r1); \
- stdu r10, -56(r1); \
+ std r10, -56(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-56(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 56; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+56; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
@@ -329,7 +343,7 @@ _GLOBAL(_##API_NAME) \
ld r11, -56(r1); \
std r10, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -337,18 +351,19 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
- stdu r5, -8(r1); \
+ std r5, -8(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-8(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 8; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+8; \
ld r11, -8(r1); \
std r4, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -356,21 +371,22 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r5, -8(r1); \
- stdu r6, -16(r1); \
+ std r6, -16(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-16(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 16; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+16; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
std r5, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -378,16 +394,17 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r5, -8(r1); \
std r6, -16(r1); \
- stdu r7, -24(r1); \
+ std r7, -24(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-24(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 24; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+24; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
@@ -395,7 +412,7 @@ _GLOBAL(_##API_NAME) \
ld r11, -24(r1); \
std r6, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -403,17 +420,18 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r5, -8(r1); \
std r6, -16(r1); \
std r7, -24(r1); \
- stdu r8, -32(r1); \
+ std r8, -32(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-32(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 32; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+32;\
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
@@ -423,7 +441,7 @@ _GLOBAL(_##API_NAME) \
ld r11, -32(r1); \
std r7, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -431,18 +449,19 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r5, -8(r1); \
std r6, -16(r1); \
std r7, -24(r1); \
std r8, -32(r1); \
- stdu r9, -40(r1); \
+ std r9, -40(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-40(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 40; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+40; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
@@ -454,7 +473,7 @@ _GLOBAL(_##API_NAME) \
ld r11, -40(r1); \
std r8, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -462,18 +481,19 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
- stdu r6, -8(r1); \
+ std r6, -8(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-8(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 8; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+8; \
ld r11, -8(r1); \
std r4, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -481,21 +501,22 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r6, -8(r1); \
- stdu r7, -16(r1); \
+ std r7, -16(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-16(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 16; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+16; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
std r5, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -503,16 +524,17 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r6, -8(r1); \
std r7, -16(r1); \
- stdu r8, -24(r1); \
+ std r8, -24(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-24(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 24; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+24; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
@@ -520,7 +542,7 @@ _GLOBAL(_##API_NAME) \
ld r11, -24(r1); \
std r6, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -528,18 +550,19 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
- stdu r7, -8(r1); \
+ std r7, -8(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-8(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 8; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+8; \
ld r11, -8(r1); \
std r4, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -547,21 +570,22 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r7, -8(r1); \
- stdu r8, -16(r1); \
+ std r8, -16(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-16(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 16; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+16; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
std r5, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -569,16 +593,17 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r7, -8(r1); \
std r8, -16(r1); \
- stdu r9, -24(r1); \
+ std r9, -24(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-24(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 24; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+24; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
@@ -586,7 +611,7 @@ _GLOBAL(_##API_NAME) \
ld r11, -24(r1); \
std r6, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -594,18 +619,19 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
- stdu r8, -8(r1); \
+ std r8, -8(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-8(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 8; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+8; \
ld r11, -8(r1); \
std r4, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -613,21 +639,22 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r8, -8(r1); \
- stdu r9, -16(r1); \
+ std r9, -16(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-16(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 16; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+16; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
std r5, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -635,16 +662,17 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r8, -8(r1); \
std r9, -16(r1); \
- stdu r10, -24(r1); \
+ std r10, -24(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-24(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 24; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+24; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
@@ -652,7 +680,7 @@ _GLOBAL(_##API_NAME) \
ld r11, -24(r1); \
std r6, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -660,18 +688,19 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
- stdu r9, -8(r1); \
+ std r9, -8(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-8(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 8; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+8; \
ld r11, -8(r1); \
std r4, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -679,21 +708,22 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r9, -8(r1); \
- stdu r10, -16(r1); \
+ std r10, -16(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-16(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 16; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+16; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
std r5, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -701,23 +731,24 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
std r9, -8(r1); \
- stdu r10, -16(r1); \
+ std r10, -16(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-16(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 16; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+16; \
ld r11, -8(r1); \
std r4, 0(r11); \
ld r11, -16(r1); \
std r5, 0(r11); \
- ld r11, 48+8*8(r1); \
+ ld r11, STK_PARAM_AREA+8*8(r1); \
std r6, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -725,18 +756,19 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
- stdu r10, -8(r1); \
+ std r10, -8(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE-8(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- addi r1, r1, 8; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE+8; \
ld r11, -8(r1); \
std r4, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -744,27 +776,29 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
\
- std r10, 48+8*7(r1); \
+ std r10, STK_PARAM_AREA+8*7(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- ld r11, 48+8*7(r1); \
+ addi r1, r1, STACK_FRAME_MIN_SIZE; \
+ ld r11, STK_PARAM_AREA+8*7(r1); \
std r4, 0(r11); \
- ld r11, 48+8*8(r1); \
+ ld r11, STK_PARAM_AREA+8*8(r1); \
std r5, 0(r11); \
- ld r11, 48+8*9(r1); \
+ ld r11, STK_PARAM_AREA+8*9(r1); \
std r6, 0(r11); \
- ld r11, 48+8*10(r1); \
+ ld r11, STK_PARAM_AREA+8*10(r1); \
std r7, 0(r11); \
- ld r11, 48+8*11(r1); \
+ ld r11, STK_PARAM_AREA+8*11(r1); \
std r8, 0(r11); \
- ld r11, 48+8*12(r1); \
+ ld r11, STK_PARAM_AREA+8*12(r1); \
std r9, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
@@ -772,15 +806,17 @@ _GLOBAL(_##API_NAME) \
_GLOBAL(_##API_NAME) \
\
mflr r0; \
- std r0, 16(r1); \
+ std r0, LRSAVE(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE(r1); \
\
li r11, API_NUMBER; \
lv1call; \
\
- ld r11, 48+8*8(r1); \
+ addi r1, r1, STACK_FRAME_MIN_SIZE; \
+ ld r11, STK_PARAM_AREA+8*8(r1); \
std r4, 0(r11); \
\
- ld r0, 16(r1); \
+ ld r0, LRSAVE(r1); \
mtlr r0; \
blr
diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
index 998e3aff24..b401282727 100644
--- a/arch/powerpc/platforms/pseries/ibmebus.c
+++ b/arch/powerpc/platforms/pseries/ibmebus.c
@@ -55,7 +55,7 @@ static struct device ibmebus_bus_device = { /* fake "parent" device */
.init_name = "ibmebus",
};
-struct bus_type ibmebus_bus_type;
+const struct bus_type ibmebus_bus_type;
/* These devices will automatically be added to the bus during init */
static const struct of_device_id ibmebus_matches[] __initconst = {
@@ -432,7 +432,7 @@ static int ibmebus_bus_modalias(const struct device *dev, struct kobj_uevent_env
return of_device_uevent_modalias(dev, env);
}
-struct bus_type ibmebus_bus_type = {
+const struct bus_type ibmebus_bus_type = {
.name = "ibmebus",
.uevent = ibmebus_bus_modalias,
.bus_groups = ibmbus_bus_groups,
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 4e9916bb03..c1d8bee8f7 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -1886,10 +1886,10 @@ out:
* h_get_mpp
* H_GET_MPP hcall returns info in 7 parms
*/
-int h_get_mpp(struct hvcall_mpp_data *mpp_data)
+long h_get_mpp(struct hvcall_mpp_data *mpp_data)
{
- int rc;
- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
+ long rc;
rc = plpar_hcall9(H_GET_MPP, retbuf);
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index 1c151d77e7..0ed56e5627 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -113,8 +113,8 @@ struct hvcall_ppp_data {
*/
static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
{
- unsigned long rc;
- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
+ long rc;
rc = plpar_hcall9(H_GET_PPP, retbuf);
@@ -193,7 +193,7 @@ static void parse_ppp_data(struct seq_file *m)
struct hvcall_ppp_data ppp_data;
struct device_node *root;
const __be32 *perf_level;
- int rc;
+ long rc;
rc = h_get_ppp(&ppp_data);
if (rc)
@@ -346,9 +346,13 @@ static int read_rtas_lpar_name(struct seq_file *m)
*/
static int read_dt_lpar_name(struct seq_file *m)
{
+ struct device_node *root = of_find_node_by_path("/");
const char *name;
+ int ret;
- if (of_property_read_string(of_root, "ibm,partition-name", &name))
+ ret = of_property_read_string(root, "ibm,partition-name", &name);
+ of_node_put(root);
+ if (ret)
return -ENOENT;
seq_printf(m, "partition_name=%s\n", name);
@@ -357,8 +361,8 @@ static int read_dt_lpar_name(struct seq_file *m)
static void read_lpar_name(struct seq_file *m)
{
- if (read_rtas_lpar_name(m) && read_dt_lpar_name(m))
- pr_err_once("Error can't get the LPAR name");
+ if (read_rtas_lpar_name(m))
+ read_dt_lpar_name(m);
}
#define SPLPAR_MAXLENGTH 1026*(sizeof(char))
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 423ee1d5bd..6dfb55b52d 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -26,6 +26,7 @@ static int query_token, change_token;
#define RTAS_CHANGE_MSI_FN 3
#define RTAS_CHANGE_MSIX_FN 4
#define RTAS_CHANGE_32MSI_FN 5
+#define RTAS_CHANGE_32MSIX_FN 6
/* RTAS Helpers */
@@ -41,7 +42,7 @@ static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs)
seq_num = 1;
do {
if (func == RTAS_CHANGE_MSI_FN || func == RTAS_CHANGE_MSIX_FN ||
- func == RTAS_CHANGE_32MSI_FN)
+ func == RTAS_CHANGE_32MSI_FN || func == RTAS_CHANGE_32MSIX_FN)
rc = rtas_call(change_token, 6, 4, rtas_ret, addr,
BUID_HI(buid), BUID_LO(buid),
func, num_irqs, seq_num);
@@ -406,8 +407,12 @@ again:
if (use_32bit_msi_hack && rc > 0)
rtas_hack_32bit_msi_gen2(pdev);
- } else
- rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);
+ } else {
+ if (pdev->no_64bit_msi)
+ rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSIX_FN, nvec);
+ else
+ rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);
+ }
if (rc != nvec) {
if (nvec != nvec_in) {
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index 1a53e048ce..c233f9db03 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -1521,7 +1521,7 @@ err: kfree(p);
return rc;
}
-static int papr_scm_remove(struct platform_device *pdev)
+static void papr_scm_remove(struct platform_device *pdev)
{
struct papr_scm_priv *p = platform_get_drvdata(pdev);
@@ -1538,8 +1538,6 @@ static int papr_scm_remove(struct platform_device *pdev)
pdev->archdata.priv = NULL;
kfree(p->bus_desc.provider_name);
kfree(p);
-
- return 0;
}
static const struct of_device_id papr_scm_match[] = {
@@ -1550,7 +1548,7 @@ static const struct of_device_id papr_scm_match[] = {
static struct platform_driver papr_scm_driver = {
.probe = papr_scm_probe,
- .remove = papr_scm_remove,
+ .remove_new = papr_scm_remove,
.driver = {
.name = "papr_scm",
.of_match_table = papr_scm_match,
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index ecea85c74c..284a6fa04b 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -1029,9 +1029,11 @@ static void __init pseries_add_hw_description(void)
return;
}
- if (of_property_read_bool(of_root, "ibm,powervm-partition") ||
- of_property_read_bool(of_root, "ibm,fw-net-version"))
+ dn = of_find_node_by_path("/");
+ if (of_property_read_bool(dn, "ibm,powervm-partition") ||
+ of_property_read_bool(dn, "ibm,fw-net-version"))
seq_buf_printf(&ppc_hw_desc, "hv:phyp ");
+ of_node_put(dn);
}
/*
@@ -1091,7 +1093,11 @@ static void pseries_power_off(void)
static int __init pSeries_probe(void)
{
- if (!of_node_is_type(of_root, "chrp"))
+ struct device_node *root = of_find_node_by_path("/");
+ bool ret = of_node_is_type(root, "chrp");
+
+ of_node_put(root);
+ if (!ret)
return 0;
/* Cell blades firmware claims to be chrp while it's not. Until this
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 2dc9cbc4bc..90ff85c879 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -991,18 +991,6 @@ static DEVICE_ATTR_RO(cmo_allocated);
static DEVICE_ATTR_RW(cmo_desired);
static DEVICE_ATTR_RW(cmo_allocs_failed);
-static struct attribute *vio_cmo_dev_attrs[] = {
- &dev_attr_name.attr,
- &dev_attr_devspec.attr,
- &dev_attr_modalias.attr,
- &dev_attr_cmo_entitled.attr,
- &dev_attr_cmo_allocated.attr,
- &dev_attr_cmo_desired.attr,
- &dev_attr_cmo_allocs_failed.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(vio_cmo_dev);
-
/* sysfs bus functions and data structures for CMO */
#define viobus_cmo_rd_attr(name) \
@@ -1062,11 +1050,7 @@ static struct attribute *vio_bus_attrs[] = {
};
ATTRIBUTE_GROUPS(vio_bus);
-static void __init vio_cmo_sysfs_init(void)
-{
- vio_bus_type.dev_groups = vio_cmo_dev_groups;
- vio_bus_type.bus_groups = vio_bus_groups;
-}
+static void __init vio_cmo_sysfs_init(void) { }
#else /* CONFIG_PPC_SMLPAR */
int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
@@ -1584,14 +1568,6 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(modalias);
-static struct attribute *vio_dev_attrs[] = {
- &dev_attr_name.attr,
- &dev_attr_devspec.attr,
- &dev_attr_modalias.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(vio_dev);
-
void vio_unregister_device(struct vio_dev *viodev)
{
device_unregister(&viodev->dev);
@@ -1626,7 +1602,39 @@ static int vio_hotplug(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-struct bus_type vio_bus_type = {
+#ifdef CONFIG_PPC_SMLPAR
+static struct attribute *vio_cmo_dev_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_devspec.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_cmo_entitled.attr,
+ &dev_attr_cmo_allocated.attr,
+ &dev_attr_cmo_desired.attr,
+ &dev_attr_cmo_allocs_failed.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vio_cmo_dev);
+
+const struct bus_type vio_bus_type = {
+ .name = "vio",
+ .dev_groups = vio_cmo_dev_groups,
+ .bus_groups = vio_bus_groups,
+ .uevent = vio_hotplug,
+ .match = vio_bus_match,
+ .probe = vio_bus_probe,
+ .remove = vio_bus_remove,
+ .shutdown = vio_bus_shutdown,
+};
+#else /* CONFIG_PPC_SMLPAR */
+static struct attribute *vio_dev_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_devspec.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vio_dev);
+
+const struct bus_type vio_bus_type = {
.name = "vio",
.dev_groups = vio_dev_groups,
.uevent = vio_hotplug,
@@ -1635,6 +1643,7 @@ struct bus_type vio_bus_type = {
.remove = vio_bus_remove,
.shutdown = vio_bus_shutdown,
};
+#endif /* CONFIG_PPC_SMLPAR */
/**
* vio_get_attribute: - get attribute for virtual device
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 60a2c59251..e205135ae1 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -320,7 +320,7 @@ static irqreturn_t fsl_msi_cascade(int irq, void *data)
return ret;
}
-static int fsl_of_msi_remove(struct platform_device *ofdev)
+static void fsl_of_msi_remove(struct platform_device *ofdev)
{
struct fsl_msi *msi = platform_get_drvdata(ofdev);
int virq, i;
@@ -343,8 +343,6 @@ static int fsl_of_msi_remove(struct platform_device *ofdev)
if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
iounmap(msi->msi_regs);
kfree(msi);
-
- return 0;
}
static struct lock_class_key fsl_msi_irq_class;
@@ -605,7 +603,7 @@ static struct platform_driver fsl_of_msi_driver = {
.of_match_table = fsl_of_msi_ids,
},
.probe = fsl_of_msi_probe,
- .remove = fsl_of_msi_remove,
+ .remove_new = fsl_of_msi_remove,
};
static __init int fsl_of_msi_init(void)
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index dabbdd3566..d94cf36b0f 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -49,7 +49,7 @@
#define DBG(fmt...)
#endif
-struct bus_type mpic_subsys = {
+const struct bus_type mpic_subsys = {
.name = "mpic",
.dev_name = "mpic",
};
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
index fcf8d15162..737f97fd67 100644
--- a/arch/powerpc/sysdev/pmi.c
+++ b/arch/powerpc/sysdev/pmi.c
@@ -173,7 +173,7 @@ out:
return rc;
}
-static int pmi_of_remove(struct platform_device *dev)
+static void pmi_of_remove(struct platform_device *dev)
{
struct pmi_handler *handler, *tmp;
@@ -189,13 +189,11 @@ static int pmi_of_remove(struct platform_device *dev)
kfree(data);
data = NULL;
-
- return 0;
}
static struct platform_driver pmi_of_platform_driver = {
.probe = pmi_of_probe,
- .remove = pmi_of_remove,
+ .remove_new = pmi_of_remove,
.driver = {
.name = "pmi",
.of_match_table = pmi_match,
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index b3b94cd377..d79d6633f3 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -643,10 +643,8 @@ static int xmon_core(struct pt_regs *regs, volatile int fromipi)
touch_nmi_watchdog();
} else {
cmd = 1;
-#ifdef CONFIG_SMP
if (xmon_batch)
cmd = batch_cmds(regs);
-#endif
if (!locked_down && cmd)
cmd = cmds(regs);
if (locked_down || cmd != 0) {
@@ -1820,8 +1818,8 @@ static void print_bug_trap(struct pt_regs *regs)
const struct bug_entry *bug;
unsigned long addr;
- if (regs->msr & MSR_PR)
- return; /* not in kernel */
+ if (user_mode(regs))
+ return;
addr = regs->nip; /* address of trap instruction */
if (!is_kernel_addr(addr))
return;
@@ -3342,7 +3340,7 @@ static void show_pte(unsigned long addr)
return;
}
- if (p4d_is_leaf(*p4dp)) {
+ if (p4d_leaf(*p4dp)) {
format_pte(p4dp, p4d_val(*p4dp));
return;
}
@@ -3356,7 +3354,7 @@ static void show_pte(unsigned long addr)
return;
}
- if (pud_is_leaf(*pudp)) {
+ if (pud_leaf(*pudp)) {
format_pte(pudp, pud_val(*pudp));
return;
}
@@ -3370,7 +3368,7 @@ static void show_pte(unsigned long addr)
return;
}
- if (pmd_is_leaf(*pmdp)) {
+ if (pmd_leaf(*pmdp)) {
format_pte(pmdp, pmd_val(*pmdp));
return;
}