summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:35:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:31 +0000
commit85c675d0d09a45a135bddd15d7b385f8758c32fb (patch)
tree76267dbc9b9a130337be3640948fe397b04ac629 /arch/powerpc
parentAdding upstream version 6.6.15. (diff)
downloadlinux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz
linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/Kconfig.debug14
-rw-r--r--arch/powerpc/Makefile.postlink3
-rwxr-xr-xarch/powerpc/boot/install.sh16
-rw-r--r--arch/powerpc/configs/44x/sam440ep_defconfig1
-rw-r--r--arch/powerpc/configs/debug.config4
-rw-r--r--arch/powerpc/configs/g5_defconfig4
-rw-r--r--arch/powerpc/configs/hardening.config10
-rw-r--r--arch/powerpc/configs/pmac32_defconfig2
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig4
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig5
-rw-r--r--arch/powerpc/crypto/aes-gcm-p10-glue.c2
-rw-r--r--arch/powerpc/include/asm/bitops.h21
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h83
-rw-r--r--arch/powerpc/include/asm/book3s/32/tlbflush.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h37
-rw-r--r--arch/powerpc/include/asm/book3s/pgtable.h33
-rw-r--r--arch/powerpc/include/asm/code-patching.h1
-rw-r--r--arch/powerpc/include/asm/cpm1.h5
-rw-r--r--arch/powerpc/include/asm/cpm2.h4
-rw-r--r--arch/powerpc/include/asm/fb.h13
-rw-r--r--arch/powerpc/include/asm/fixmap.h16
-rw-r--r--arch/powerpc/include/asm/guest-state-buffer.h995
-rw-r--r--arch/powerpc/include/asm/hvcall.h30
-rw-r--r--arch/powerpc/include/asm/imc-pmu.h16
-rw-r--r--arch/powerpc/include/asm/io.h2
-rw-r--r--arch/powerpc/include/asm/irq_work.h1
-rw-r--r--arch/powerpc/include/asm/jump_label.h4
-rw-r--r--arch/powerpc/include/asm/kexec.h8
-rw-r--r--arch/powerpc/include/asm/kprobes.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h220
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h8
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h10
-rw-r--r--arch/powerpc/include/asm/kvm_host.h22
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h110
-rw-r--r--arch/powerpc/include/asm/local.h12
-rw-r--r--arch/powerpc/include/asm/machdep.h13
-rw-r--r--arch/powerpc/include/asm/mmu.h4
-rw-r--r--arch/powerpc/include/asm/mmzone.h8
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-8xx.h1
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h201
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-40x.h18
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-44x.h20
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-85xx.h20
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-8xx.h92
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h120
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h217
-rw-r--r--arch/powerpc/include/asm/nohash/pte-e500.h41
-rw-r--r--arch/powerpc/include/asm/opal.h2
-rw-r--r--arch/powerpc/include/asm/paravirt.h47
-rw-r--r--arch/powerpc/include/asm/pci.h4
-rw-r--r--arch/powerpc/include/asm/pgtable-masks.h32
-rw-r--r--arch/powerpc/include/asm/pgtable.h41
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h267
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h10
-rw-r--r--arch/powerpc/include/asm/ptrace.h17
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/include/asm/uaccess.h14
-rw-r--r--arch/powerpc/kernel/btext.c360
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S20
-rw-r--r--arch/powerpc/kernel/cpu_specs_e500mc.h3
-rw-r--r--arch/powerpc/kernel/crash_dump.c12
-rw-r--r--arch/powerpc/kernel/eeh_driver.c4
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S6
-rw-r--r--arch/powerpc/kernel/fadump.c2
-rw-r--r--arch/powerpc/kernel/head_40x.S19
-rw-r--r--arch/powerpc/kernel/head_44x.S40
-rw-r--r--arch/powerpc/kernel/head_85xx.S12
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S63
-rw-r--r--arch/powerpc/kernel/idle.c1
-rw-r--r--arch/powerpc/kernel/interrupt_64.S4
-rw-r--r--arch/powerpc/kernel/io.c12
-rw-r--r--arch/powerpc/kernel/iommu.c86
-rw-r--r--arch/powerpc/kernel/irq_64.c2
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c3
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace.c5
-rw-r--r--arch/powerpc/kernel/rtas-proc.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c17
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal.c5
-rw-r--r--arch/powerpc/kernel/signal.h7
-rw-r--r--arch/powerpc/kernel/smp.c6
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/powerpc/kernel/traps.c6
-rw-r--r--arch/powerpc/kexec/core.c2
-rw-r--r--arch/powerpc/kexec/core_64.c4
-rw-r--r--arch/powerpc/kexec/file_load_64.c14
-rw-r--r--arch/powerpc/kvm/Makefile4
-rw-r--r--arch/powerpc/kvm/book3s.c38
-rw-r--r--arch/powerpc/kvm/book3s_64_entry.S2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c26
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c12
-rw-r--r--arch/powerpc/kvm/book3s_hv.c200
-rw-r--r--arch/powerpc/kvm/book3s_hv.h80
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c44
-rw-r--r--arch/powerpc/kvm/book3s_hv_nestedv2.c1010
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_entry.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c2
-rw-r--r--arch/powerpc/kvm/book3s_xive.c12
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c2
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c6
-rw-r--r--arch/powerpc/kvm/guest-state-buffer.c621
-rw-r--r--arch/powerpc/kvm/powerpc.c76
-rw-r--r--arch/powerpc/kvm/test-guest-state-buffer.c328
-rw-r--r--arch/powerpc/lib/code-patching.c146
-rw-r--r--arch/powerpc/lib/qspinlock.c122
-rw-r--r--arch/powerpc/lib/sstep.c10
-rw-r--r--arch/powerpc/mm/book3s32/hash_low.S32
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c6
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c12
-rw-r--r--arch/powerpc/mm/drmem.c2
-rw-r--r--arch/powerpc/mm/fault.c11
-rw-r--r--arch/powerpc/mm/init-common.c5
-rw-r--r--arch/powerpc/mm/init_32.c1
-rw-r--r--arch/powerpc/mm/ioremap.c6
-rw-r--r--arch/powerpc/mm/kasan/init_32.c1
-rw-r--r--arch/powerpc/mm/mem.c9
-rw-r--r--arch/powerpc/mm/mmu_decl.h5
-rw-r--r--arch/powerpc/mm/nohash/40x.c19
-rw-r--r--arch/powerpc/mm/nohash/8xx.c2
-rw-r--r--arch/powerpc/mm/nohash/book3e_pgtable.c2
-rw-r--r--arch/powerpc/mm/nohash/e500.c6
-rw-r--r--arch/powerpc/mm/nohash/e500_hugetlbpage.c3
-rw-r--r--arch/powerpc/mm/nohash/kaslr_booke.c2
-rw-r--r--arch/powerpc/mm/pgtable.c26
-rw-r--r--arch/powerpc/mm/ptdump/8xx.c5
-rw-r--r--arch/powerpc/mm/ptdump/shared.c14
-rw-r--r--arch/powerpc/net/bpf_jit.h18
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c149
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c15
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c10
-rw-r--r--arch/powerpc/perf/hv-24x7.c2
-rw-r--r--arch/powerpc/perf/imc-pmu.c11
-rw-r--r--arch/powerpc/perf/power6-pmu.c46
-rw-r--r--arch/powerpc/platforms/4xx/soc.c2
-rw-r--r--arch/powerpc/platforms/83xx/misc.c2
-rw-r--r--arch/powerpc/platforms/8xx/cpm1.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c11
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/powerpc/platforms/powermac/Kconfig1
-rw-r--r--arch/powerpc/platforms/powermac/feature.c3
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c4
-rw-r--r--arch/powerpc/platforms/powermac/smp.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-fadump.h2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig6
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c3
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c16
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c1
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c4
-rw-r--r--arch/powerpc/platforms/pseries/plpks.c4
-rw-r--r--arch/powerpc/platforms/pseries/plpks_sed_ops.c131
-rw-r--r--arch/powerpc/platforms/pseries/rtas-work-area.c1
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c10
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rwxr-xr-xarch/powerpc/tools/gcc-check-mprofile-kernel.sh11
164 files changed, 5327 insertions, 1727 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2fe51e0ad6..dbb6c44d59 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -237,6 +237,7 @@ config PPC
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_DESCRIPTORS if PPC64_ELF_ABI_V1
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 2a54fadbea..ea4033abc0 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -82,6 +82,18 @@ config MSI_BITMAP_SELFTEST
bool "Run self-tests of the MSI bitmap code"
depends on DEBUG_KERNEL
+config GUEST_STATE_BUFFER_TEST
+ def_tristate n
+ prompt "Enable Guest State Buffer unit tests"
+ depends on KUNIT
+ depends on KVM_BOOK3S_HV_POSSIBLE
+ default KUNIT_ALL_TESTS
+ help
+ The Guest State Buffer is a data format specified in the PAPR.
+ It is by hcalls to communicate the state of L2 guests between
+ the L1 and L0 hypervisors. Enable unit tests for the library
+ used to create and use guest state buffers.
+
config PPC_IRQ_SOFT_MASK_DEBUG
bool "Include extra checks for powerpc irq soft masking"
depends on PPC64
@@ -147,6 +159,8 @@ config BDI_SWITCH
config BOOTX_TEXT
bool "Support for early boot text console (BootX or OpenFirmware only)"
depends on PPC_BOOK3S
+ select FONT_SUN8x16
+ select FONT_SUPPORT
help
Say Y here to see progress messages from the boot firmware in text
mode. Requires either BootX or Open Firmware.
diff --git a/arch/powerpc/Makefile.postlink b/arch/powerpc/Makefile.postlink
index 1f860b3c9b..ae5a4256b0 100644
--- a/arch/powerpc/Makefile.postlink
+++ b/arch/powerpc/Makefile.postlink
@@ -35,9 +35,6 @@ ifdef CONFIG_RELOCATABLE
$(call if_changed,relocs_check)
endif
-%.ko: FORCE
- @true
-
clean:
rm -f .tmp_symbols.txt
diff --git a/arch/powerpc/boot/install.sh b/arch/powerpc/boot/install.sh
index 461902c8a4..101fcb397a 100755
--- a/arch/powerpc/boot/install.sh
+++ b/arch/powerpc/boot/install.sh
@@ -21,13 +21,17 @@ set -e
# this should work for both the pSeries zImage and the iSeries vmlinux.sm
image_name=`basename $2`
-if [ -f $4/$image_name ]; then
- mv $4/$image_name $4/$image_name.old
+
+echo "Warning: '${INSTALLKERNEL}' command not available... Copying" \
+ "directly to $4/$image_name-$1" >&2
+
+if [ -f $4/$image_name-$1 ]; then
+ mv $4/$image_name-$1 $4/$image_name-$1.old
fi
-if [ -f $4/System.map ]; then
- mv $4/System.map $4/System.old
+if [ -f $4/System.map-$1 ]; then
+ mv $4/System.map-$1 $4/System-$1.old
fi
-cat $2 > $4/$image_name
-cp $3 $4/System.map
+cat $2 > $4/$image_name-$1
+cp $3 $4/System.map-$1
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig
index 51499ee636..2479ab62d1 100644
--- a/arch/powerpc/configs/44x/sam440ep_defconfig
+++ b/arch/powerpc/configs/44x/sam440ep_defconfig
@@ -78,7 +78,6 @@ CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS=y
CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
diff --git a/arch/powerpc/configs/debug.config b/arch/powerpc/configs/debug.config
index a14ae1f20d..bcc1fcf25e 100644
--- a/arch/powerpc/configs/debug.config
+++ b/arch/powerpc/configs/debug.config
@@ -1 +1,5 @@
+CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG=y
+CONFIG_PPC_IRQ_SOFT_MASK_DEBUG=y
+CONFIG_PPC_KUAP_DEBUG=y
+CONFIG_PPC_RFI_SRR_DEBUG=y
CONFIG_SCOM_DEBUGFS=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 71d9d112c0..9215bed532 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -202,10 +202,6 @@ CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_REISERFS_FS=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_POSIX_ACL=y
CONFIG_FS_DAX=y
diff --git a/arch/powerpc/configs/hardening.config b/arch/powerpc/configs/hardening.config
new file mode 100644
index 0000000000..4e9bba327e
--- /dev/null
+++ b/arch/powerpc/configs/hardening.config
@@ -0,0 +1,10 @@
+# PowerPC specific hardening options
+
+# Block kernel from unexpectedly reading userspace memory.
+CONFIG_PPC_KUAP=y
+
+# Attack surface reduction.
+# CONFIG_SCOM_DEBUGFS is not set
+
+# Disable internal kernel debugger.
+# CONFIG_XMON is not set
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index a205da9ee5..57ded82c28 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -138,7 +138,6 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_ADB=y
-CONFIG_ADB_CUDA=y
CONFIG_ADB_PMU=y
CONFIG_ADB_PMU_LED=y
CONFIG_ADB_PMU_LED_DISK=y
@@ -181,6 +180,7 @@ CONFIG_SERIAL_PMACZILOG_TTYS=y
CONFIG_SERIAL_PMACZILOG_CONSOLE=y
CONFIG_NVRAM=y
CONFIG_I2C_CHARDEV=m
+CONFIG_POWER_RESET=y
CONFIG_APM_POWER=y
CONFIG_BATTERY_PMU=y
CONFIG_HWMON=m
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 624c371ffc..4c05f4e4d5 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -175,10 +175,6 @@ CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_REISERFS_FS=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
CONFIG_JFS_FS=y
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index eaf3273372..f279703425 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -954,11 +954,6 @@ CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_JBD2_DEBUG=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_PROC_INFO=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c
index 4b6e899895..f62ee54076 100644
--- a/arch/powerpc/crypto/aes-gcm-p10-glue.c
+++ b/arch/powerpc/crypto/aes-gcm-p10-glue.c
@@ -37,7 +37,7 @@ asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
void *rkey, u8 *iv, void *Xi);
asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
void *rkey, u8 *iv, void *Xi);
-asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]);
+asmlinkage void gcm_init_htable(unsigned char htable[], unsigned char Xi[]);
asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable,
unsigned char *aad, unsigned int alen);
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 7e0f032291..671ecc6711 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -233,35 +233,24 @@ static inline int arch_test_and_change_bit(unsigned long nr,
return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
}
-#ifdef CONFIG_PPC64
-static inline unsigned long
-clear_bit_unlock_return_word(int nr, volatile unsigned long *addr)
+static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
+ volatile unsigned long *p)
{
unsigned long old, t;
- unsigned long *p = (unsigned long *)addr + BIT_WORD(nr);
- unsigned long mask = BIT_MASK(nr);
__asm__ __volatile__ (
PPC_RELEASE_BARRIER
"1:" PPC_LLARX "%0,0,%3,0\n"
- "andc %1,%0,%2\n"
+ "xor %1,%0,%2\n"
PPC_STLCX "%1,0,%3\n"
"bne- 1b\n"
: "=&r" (old), "=&r" (t)
: "r" (mask), "r" (p)
: "cc", "memory");
- return old;
+ return (old & BIT_MASK(7)) != 0;
}
-
-/*
- * This is a special function for mm/filemap.c
- * Bit 7 corresponds to PG_waiters.
- */
-#define arch_clear_bit_unlock_is_negative_byte(nr, addr) \
- (clear_bit_unlock_return_word(nr, addr) & BIT_MASK(7))
-
-#endif /* CONFIG_PPC64 */
+#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
#include <asm-generic/bitops/non-atomic.h>
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 9b13eb14e2..52971ee307 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -20,7 +20,7 @@
#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
-#define _PAGE_USER 0x004 /* usermode access allowed */
+#define _PAGE_READ 0x004 /* software: read access allowed */
#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
@@ -28,7 +28,7 @@
#define _PAGE_DIRTY 0x080 /* C: page changed */
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
#define _PAGE_EXEC 0x200 /* software: exec allowed */
-#define _PAGE_RW 0x400 /* software: user write access allowed */
+#define _PAGE_WRITE 0x400 /* software: user write access allowed */
#define _PAGE_SPECIAL 0x800 /* software: Special page */
#ifdef CONFIG_PTE_64BIT
@@ -42,26 +42,13 @@
#define _PMD_PRESENT_MASK (PAGE_MASK)
#define _PMD_BAD (~PAGE_MASK)
-/* We borrow the _PAGE_USER bit to store the exclusive marker in swap PTEs. */
-#define _PAGE_SWP_EXCLUSIVE _PAGE_USER
+/* We borrow the _PAGE_READ bit to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE _PAGE_READ
/* And here we include common definitions */
-#define _PAGE_KERNEL_RO 0
-#define _PAGE_KERNEL_ROX (_PAGE_EXEC)
-#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
-#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
-
#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
-#ifndef __ASSEMBLY__
-
-static inline bool pte_user(pte_t pte)
-{
- return pte_val(pte) & _PAGE_USER;
-}
-#endif /* __ASSEMBLY__ */
-
/*
* Location of the PFN in the PTE. Most 32-bit platforms use the same
* as _PAGE_SHIFT here (ie, naturally aligned).
@@ -97,20 +84,7 @@ static inline bool pte_user(pte_t pte)
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
-/*
- * Permission masks used to generate the __P and __S table.
- *
- * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
- *
- * Write permissions imply read permissions for now.
- */
-#define PAGE_NONE __pgprot(_PAGE_BASE)
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#include <asm/pgtable-masks.h>
/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
@@ -170,7 +144,14 @@ void unmap_kernel_page(unsigned long va);
* value (for now) on others, from where we can start layout kernel
* virtual space that goes below PKMAP and FIXMAP
*/
-#include <asm/fixmap.h>
+
+#define FIXADDR_SIZE 0
+#ifdef CONFIG_KASAN
+#include <asm/kasan.h>
+#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
+#else
+#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
+#endif
/*
* ioremap_bot starts at that address. Early ioremaps move down from there,
@@ -224,9 +205,6 @@ void unmap_kernel_page(unsigned long va);
/* Bits to mask out from a PGD to get to the PUD page */
#define PGD_MASKED_BITS 0
-#define pte_ERROR(e) \
- pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
- (unsigned long long)pte_val(e))
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/*
@@ -343,7 +321,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
+ pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
}
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
@@ -402,8 +380,16 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
}
/* Generic accessors to PTE bits */
-static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
-static inline int pte_read(pte_t pte) { return 1; }
+static inline bool pte_read(pte_t pte)
+{
+ return !!(pte_val(pte) & _PAGE_READ);
+}
+
+static inline bool pte_write(pte_t pte)
+{
+ return !!(pte_val(pte) & _PAGE_WRITE);
+}
+
static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
@@ -438,10 +424,10 @@ static inline bool pte_ci(pte_t pte)
static inline bool pte_access_permitted(pte_t pte, bool write)
{
/*
- * A read-only access is controlled by _PAGE_USER bit.
- * We have _PAGE_READ set for WRITE and EXECUTE
+ * A read-only access is controlled by _PAGE_READ bit.
+ * We have _PAGE_READ set for WRITE
*/
- if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
+ if (!pte_present(pte) || !pte_read(pte))
return false;
if (write && !pte_write(pte))
@@ -465,7 +451,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
/* Generic modifiers for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_RW);
+ return __pte(pte_val(pte) & ~_PAGE_WRITE);
}
static inline pte_t pte_exprotect(pte_t pte)
@@ -495,6 +481,9 @@ static inline pte_t pte_mkpte(pte_t pte)
static inline pte_t pte_mkwrite_novma(pte_t pte)
{
+ /*
+ * write implies read, hence set both
+ */
return __pte(pte_val(pte) | _PAGE_RW);
}
@@ -518,16 +507,6 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte;
}
-static inline pte_t pte_mkprivileged(pte_t pte)
-{
- return __pte(pte_val(pte) & ~_PAGE_USER);
-}
-
-static inline pte_t pte_mkuser(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_USER);
-}
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
diff --git a/arch/powerpc/include/asm/book3s/32/tlbflush.h b/arch/powerpc/include/asm/book3s/32/tlbflush.h
index 4be5729081..e43534da52 100644
--- a/arch/powerpc/include/asm/book3s/32/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/32/tlbflush.h
@@ -80,7 +80,7 @@ static inline void local_flush_tlb_page(struct vm_area_struct *vma,
static inline void local_flush_tlb_page_psize(struct mm_struct *mm,
unsigned long vmaddr, int psize)
{
- BUILD_BUG();
+ flush_range(mm, vmaddr, vmaddr);
}
static inline void local_flush_tlb_mm(struct mm_struct *mm)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 5c497c862d..cb77eddca5 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -17,6 +17,10 @@
#define _PAGE_EXEC 0x00001 /* execute permission */
#define _PAGE_WRITE 0x00002 /* write access allowed */
#define _PAGE_READ 0x00004 /* read access allowed */
+#define _PAGE_NA _PAGE_PRIVILEGED
+#define _PAGE_NAX _PAGE_EXEC
+#define _PAGE_RO _PAGE_READ
+#define _PAGE_ROX (_PAGE_READ | _PAGE_EXEC)
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#define _PAGE_PRIVILEGED 0x00008 /* kernel access only */
@@ -136,23 +140,7 @@
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
#define _PAGE_BASE (_PAGE_BASE_NC)
-/* Permission masks used to generate the __P and __S table,
- *
- * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
- *
- * Write permissions imply read permissions for now (we could make write-only
- * pages on BookE but we don't bother for now). Execute permission control is
- * possible on platforms that define _PAGE_EXEC
- */
-#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_READ)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
-/* Radix only, Hash uses PAGE_READONLY_X + execute-only pkey instead */
-#define PAGE_EXECONLY __pgprot(_PAGE_BASE | _PAGE_EXEC)
+#include <asm/pgtable-masks.h>
/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
@@ -316,6 +304,7 @@ extern unsigned long pci_io_base;
#define IOREMAP_START (ioremap_bot)
#define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE)
#define FIXADDR_SIZE SZ_32M
+#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)
#ifndef __ASSEMBLY__
@@ -629,16 +618,6 @@ static inline pte_t pte_mkdevmap(pte_t pte)
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL | _PAGE_DEVMAP));
}
-static inline pte_t pte_mkprivileged(pte_t pte)
-{
- return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED));
-}
-
-static inline pte_t pte_mkuser(pte_t pte)
-{
- return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED));
-}
-
/*
* This is potentially called with a pmd as the argument, in which case it's not
* safe to check _PAGE_DEVMAP unless we also confirm that _PAGE_PTE is set.
@@ -647,7 +626,7 @@ static inline pte_t pte_mkuser(pte_t pte)
*/
static inline int pte_devmap(pte_t pte)
{
- u64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE);
+ __be64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE);
return (pte_raw(pte) & mask) == mask;
}
@@ -1014,8 +993,6 @@ static inline pmd_t *pud_pgtable(pud_t pud)
return (pmd_t *)__va(pud_val(pud) & ~PUD_MASKED_BITS);
}
-#define pte_ERROR(e) \
- pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_ERROR(e) \
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
index 3b7bd36a23..f42d68c6b3 100644
--- a/arch/powerpc/include/asm/book3s/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/pgtable.h
@@ -8,37 +8,4 @@
#include <asm/book3s/32/pgtable.h>
#endif
-#ifndef __ASSEMBLY__
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
- pte_t *ptep, pte_t entry, int dirty);
-
-struct file;
-extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot);
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-
-void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
-
-/*
- * This gets called at the end of handling a page fault, when
- * the kernel has put a new PTE into the page table for the process.
- * We use it to ensure coherency between the i-cache and d-cache
- * for the page which has just been mapped in.
- * On machines which use an MMU hash table, we use this to put a
- * corresponding HPTE into the hash table ahead of time, instead of
- * waiting for the inevitable extra hash-table miss exception.
- */
-static inline void update_mmu_cache_range(struct vm_fault *vmf,
- struct vm_area_struct *vma, unsigned long address,
- pte_t *ptep, unsigned int nr)
-{
- if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE))
- return;
- if (radix_enabled())
- return;
- __update_mmu_cache(vma, address, ptep);
-}
-
-#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 3f881548fb..0e29ccf903 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -74,6 +74,7 @@ int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
int patch_branch(u32 *addr, unsigned long target, int flags);
int patch_instruction(u32 *addr, ppc_inst_t instr);
int raw_patch_instruction(u32 *addr, ppc_inst_t instr);
+int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr);
static inline unsigned long patch_site_addr(s32 *site)
{
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
index 3bdd74739c..e3c6969853 100644
--- a/arch/powerpc/include/asm/cpm1.h
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -49,11 +49,6 @@
*/
extern cpm8xx_t __iomem *cpmp; /* Pointer to comm processor */
-#define cpm_dpalloc cpm_muram_alloc
-#define cpm_dpfree cpm_muram_free
-#define cpm_dpram_addr cpm_muram_addr
-#define cpm_dpram_phys cpm_muram_dma
-
extern void cpm_setbrg(uint brg, uint rate);
extern void __init cpm_load_patch(cpm8xx_t *cp);
diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h
index 249d43cc64..a22acc36eb 100644
--- a/arch/powerpc/include/asm/cpm2.h
+++ b/arch/powerpc/include/asm/cpm2.h
@@ -87,10 +87,6 @@
*/
extern cpm_cpm2_t __iomem *cpmp; /* Pointer to comm processor */
-#define cpm_dpalloc cpm_muram_alloc
-#define cpm_dpfree cpm_muram_free
-#define cpm_dpram_addr cpm_muram_addr
-
extern void cpm2_reset(void);
/* Baud rate generators.
diff --git a/arch/powerpc/include/asm/fb.h b/arch/powerpc/include/asm/fb.h
index 5f1a2e5f76..c0c5d1df7a 100644
--- a/arch/powerpc/include/asm/fb.h
+++ b/arch/powerpc/include/asm/fb.h
@@ -2,18 +2,15 @@
#ifndef _ASM_FB_H_
#define _ASM_FB_H_
-#include <linux/fs.h>
-
#include <asm/page.h>
-static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
- unsigned long off)
+static inline pgprot_t pgprot_framebuffer(pgprot_t prot,
+ unsigned long vm_start, unsigned long vm_end,
+ unsigned long offset)
{
- vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
+ return __phys_mem_access_prot(PHYS_PFN(offset), vm_end - vm_start, prot);
}
-#define fb_pgprotect fb_pgprotect
+#define pgprot_framebuffer pgprot_framebuffer
#include <asm-generic/fb.h>
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index a832aeafe5..f9068dd8df 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -23,18 +23,6 @@
#include <asm/kmap_size.h>
#endif
-#ifdef CONFIG_PPC64
-#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)
-#else
-#define FIXADDR_SIZE 0
-#ifdef CONFIG_KASAN
-#include <asm/kasan.h>
-#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
-#else
-#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
-#endif
-#endif
-
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
@@ -119,5 +107,9 @@ static inline void __set_fixmap(enum fixed_addresses idx,
#define __early_set_fixmap __set_fixmap
+#ifdef CONFIG_PPC_8xx
+#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/guest-state-buffer.h b/arch/powerpc/include/asm/guest-state-buffer.h
new file mode 100644
index 0000000000..808149f315
--- /dev/null
+++ b/arch/powerpc/include/asm/guest-state-buffer.h
@@ -0,0 +1,995 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interface based on include/net/netlink.h
+ */
+#ifndef _ASM_POWERPC_GUEST_STATE_BUFFER_H
+#define _ASM_POWERPC_GUEST_STATE_BUFFER_H
+
+#include "asm/hvcall.h"
+#include <linux/gfp.h>
+#include <linux/bitmap.h>
+#include <asm/plpar_wrappers.h>
+
+/**************************************************************************
+ * Guest State Buffer Constants
+ **************************************************************************/
+/* Element without a value and any length */
+#define KVMPPC_GSID_BLANK 0x0000
+/* Size required for the L0's internal VCPU representation */
+#define KVMPPC_GSID_HOST_STATE_SIZE 0x0001
+ /* Minimum size for the H_GUEST_RUN_VCPU output buffer */
+#define KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE 0x0002
+ /* "Logical" PVR value as defined in the PAPR */
+#define KVMPPC_GSID_LOGICAL_PVR 0x0003
+ /* L0 relative timebase offset */
+#define KVMPPC_GSID_TB_OFFSET 0x0004
+ /* Partition Scoped Page Table Info */
+#define KVMPPC_GSID_PARTITION_TABLE 0x0005
+ /* Process Table Info */
+#define KVMPPC_GSID_PROCESS_TABLE 0x0006
+
+/* H_GUEST_RUN_VCPU input buffer Info */
+#define KVMPPC_GSID_RUN_INPUT 0x0C00
+/* H_GUEST_RUN_VCPU output buffer Info */
+#define KVMPPC_GSID_RUN_OUTPUT 0x0C01
+#define KVMPPC_GSID_VPA 0x0C02
+
+#define KVMPPC_GSID_GPR(x) (0x1000 + (x))
+#define KVMPPC_GSID_HDEC_EXPIRY_TB 0x1020
+#define KVMPPC_GSID_NIA 0x1021
+#define KVMPPC_GSID_MSR 0x1022
+#define KVMPPC_GSID_LR 0x1023
+#define KVMPPC_GSID_XER 0x1024
+#define KVMPPC_GSID_CTR 0x1025
+#define KVMPPC_GSID_CFAR 0x1026
+#define KVMPPC_GSID_SRR0 0x1027
+#define KVMPPC_GSID_SRR1 0x1028
+#define KVMPPC_GSID_DAR 0x1029
+#define KVMPPC_GSID_DEC_EXPIRY_TB 0x102A
+#define KVMPPC_GSID_VTB 0x102B
+#define KVMPPC_GSID_LPCR 0x102C
+#define KVMPPC_GSID_HFSCR 0x102D
+#define KVMPPC_GSID_FSCR 0x102E
+#define KVMPPC_GSID_FPSCR 0x102F
+#define KVMPPC_GSID_DAWR0 0x1030
+#define KVMPPC_GSID_DAWR1 0x1031
+#define KVMPPC_GSID_CIABR 0x1032
+#define KVMPPC_GSID_PURR 0x1033
+#define KVMPPC_GSID_SPURR 0x1034
+#define KVMPPC_GSID_IC 0x1035
+#define KVMPPC_GSID_SPRG0 0x1036
+#define KVMPPC_GSID_SPRG1 0x1037
+#define KVMPPC_GSID_SPRG2 0x1038
+#define KVMPPC_GSID_SPRG3 0x1039
+#define KVMPPC_GSID_PPR 0x103A
+#define KVMPPC_GSID_MMCR(x) (0x103B + (x))
+#define KVMPPC_GSID_MMCRA 0x103F
+#define KVMPPC_GSID_SIER(x) (0x1040 + (x))
+#define KVMPPC_GSID_BESCR 0x1043
+#define KVMPPC_GSID_EBBHR 0x1044
+#define KVMPPC_GSID_EBBRR 0x1045
+#define KVMPPC_GSID_AMR 0x1046
+#define KVMPPC_GSID_IAMR 0x1047
+#define KVMPPC_GSID_AMOR 0x1048
+#define KVMPPC_GSID_UAMOR 0x1049
+#define KVMPPC_GSID_SDAR 0x104A
+#define KVMPPC_GSID_SIAR 0x104B
+#define KVMPPC_GSID_DSCR 0x104C
+#define KVMPPC_GSID_TAR 0x104D
+#define KVMPPC_GSID_DEXCR 0x104E
+#define KVMPPC_GSID_HDEXCR 0x104F
+#define KVMPPC_GSID_HASHKEYR 0x1050
+#define KVMPPC_GSID_HASHPKEYR 0x1051
+#define KVMPPC_GSID_CTRL 0x1052
+
+#define KVMPPC_GSID_CR 0x2000
+#define KVMPPC_GSID_PIDR 0x2001
+#define KVMPPC_GSID_DSISR 0x2002
+#define KVMPPC_GSID_VSCR 0x2003
+#define KVMPPC_GSID_VRSAVE 0x2004
+#define KVMPPC_GSID_DAWRX0 0x2005
+#define KVMPPC_GSID_DAWRX1 0x2006
+#define KVMPPC_GSID_PMC(x) (0x2007 + (x))
+#define KVMPPC_GSID_WORT 0x200D
+#define KVMPPC_GSID_PSPB 0x200E
+
+#define KVMPPC_GSID_VSRS(x) (0x3000 + (x))
+
+#define KVMPPC_GSID_HDAR 0xF000
+#define KVMPPC_GSID_HDSISR 0xF001
+#define KVMPPC_GSID_HEIR 0xF002
+#define KVMPPC_GSID_ASDR 0xF003
+
+#define KVMPPC_GSE_GUESTWIDE_START KVMPPC_GSID_BLANK
+#define KVMPPC_GSE_GUESTWIDE_END KVMPPC_GSID_PROCESS_TABLE
+#define KVMPPC_GSE_GUESTWIDE_COUNT \
+ (KVMPPC_GSE_GUESTWIDE_END - KVMPPC_GSE_GUESTWIDE_START + 1)
+
+#define KVMPPC_GSE_META_START KVMPPC_GSID_RUN_INPUT
+#define KVMPPC_GSE_META_END KVMPPC_GSID_VPA
+#define KVMPPC_GSE_META_COUNT (KVMPPC_GSE_META_END - KVMPPC_GSE_META_START + 1)
+
+#define KVMPPC_GSE_DW_REGS_START KVMPPC_GSID_GPR(0)
+#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_CTRL
+#define KVMPPC_GSE_DW_REGS_COUNT \
+ (KVMPPC_GSE_DW_REGS_END - KVMPPC_GSE_DW_REGS_START + 1)
+
+#define KVMPPC_GSE_W_REGS_START KVMPPC_GSID_CR
+#define KVMPPC_GSE_W_REGS_END KVMPPC_GSID_PSPB
+#define KVMPPC_GSE_W_REGS_COUNT \
+ (KVMPPC_GSE_W_REGS_END - KVMPPC_GSE_W_REGS_START + 1)
+
+#define KVMPPC_GSE_VSRS_START KVMPPC_GSID_VSRS(0)
+#define KVMPPC_GSE_VSRS_END KVMPPC_GSID_VSRS(63)
+#define KVMPPC_GSE_VSRS_COUNT (KVMPPC_GSE_VSRS_END - KVMPPC_GSE_VSRS_START + 1)
+
+#define KVMPPC_GSE_INTR_REGS_START KVMPPC_GSID_HDAR
+#define KVMPPC_GSE_INTR_REGS_END KVMPPC_GSID_ASDR
+#define KVMPPC_GSE_INTR_REGS_COUNT \
+ (KVMPPC_GSE_INTR_REGS_END - KVMPPC_GSE_INTR_REGS_START + 1)
+
+#define KVMPPC_GSE_IDEN_COUNT \
+ (KVMPPC_GSE_GUESTWIDE_COUNT + KVMPPC_GSE_META_COUNT + \
+ KVMPPC_GSE_DW_REGS_COUNT + KVMPPC_GSE_W_REGS_COUNT + \
+ KVMPPC_GSE_VSRS_COUNT + KVMPPC_GSE_INTR_REGS_COUNT)
+
+/**
+ * Ranges of guest state buffer elements
+ */
+enum {
+ KVMPPC_GS_CLASS_GUESTWIDE = 0x01,
+ KVMPPC_GS_CLASS_META = 0x02,
+ KVMPPC_GS_CLASS_DWORD_REG = 0x04,
+ KVMPPC_GS_CLASS_WORD_REG = 0x08,
+ KVMPPC_GS_CLASS_VECTOR = 0x10,
+ KVMPPC_GS_CLASS_INTR = 0x20,
+};
+
+/**
+ * Types of guest state buffer elements
+ */
+enum {
+ KVMPPC_GSE_BE32,
+ KVMPPC_GSE_BE64,
+ KVMPPC_GSE_VEC128,
+ KVMPPC_GSE_PARTITION_TABLE,
+ KVMPPC_GSE_PROCESS_TABLE,
+ KVMPPC_GSE_BUFFER,
+ __KVMPPC_GSE_TYPE_MAX,
+};
+
+/**
+ * Flags for guest state elements
+ */
+enum {
+ KVMPPC_GS_FLAGS_WIDE = 0x01,
+};
+
+/**
+ * struct kvmppc_gs_part_table - deserialized partition table information
+ * element
+ * @address: start of the partition table
+ * @ea_bits: number of bits in the effective address
+ * @gpd_size: root page directory size
+ */
+struct kvmppc_gs_part_table {
+ u64 address;
+ u64 ea_bits;
+ u64 gpd_size;
+};
+
+/**
+ * struct kvmppc_gs_proc_table - deserialized process table information element
+ * @address: start of the process table
+ * @gpd_size: process table size
+ */
+struct kvmppc_gs_proc_table {
+ u64 address;
+ u64 gpd_size;
+};
+
+/**
+ * struct kvmppc_gs_buff_info - deserialized meta guest state buffer information
+ * @address: start of the guest state buffer
+ * @size: size of the guest state buffer
+ */
+struct kvmppc_gs_buff_info {
+ u64 address;
+ u64 size;
+};
+
+/**
+ * struct kvmppc_gs_header - serialized guest state buffer header
+ * @nelem: count of guest state elements in the buffer
+ * @data: start of the stream of elements in the buffer
+ */
+struct kvmppc_gs_header {
+ __be32 nelems;
+ char data[];
+} __packed;
+
+/**
+ * struct kvmppc_gs_elem - serialized guest state buffer element
+ * @iden: Guest State ID
+ * @len: length of data
+ * @data: the guest state buffer element's value
+ */
+struct kvmppc_gs_elem {
+ __be16 iden;
+ __be16 len;
+ char data[];
+} __packed;
+
+/**
+ * struct kvmppc_gs_buff - a guest state buffer with metadata.
+ * @capacity: total length of the buffer
+ * @len: current length of the elements and header
+ * @guest_id: guest id associated with the buffer
+ * @vcpu_id: vcpu_id associated with the buffer
+ * @hdr: the serialised guest state buffer
+ */
+struct kvmppc_gs_buff {
+ size_t capacity;
+ size_t len;
+ unsigned long guest_id;
+ unsigned long vcpu_id;
+ struct kvmppc_gs_header *hdr;
+};
+
+/**
+ * struct kvmppc_gs_bitmap - a bitmap for element ids
+ * @bitmap: a bitmap large enough for all Guest State IDs
+ */
+struct kvmppc_gs_bitmap {
+ /* private: */
+ DECLARE_BITMAP(bitmap, KVMPPC_GSE_IDEN_COUNT);
+};
+
+/**
+ * struct kvmppc_gs_parser - a map of element ids to locations in a buffer
+ * @iterator: bitmap used for iterating
+ * @gses: contains the pointers to elements
+ *
+ * A guest state parser is used for deserialising a guest state buffer.
+ * Given a buffer, it then allows looking up guest state elements using
+ * a guest state id.
+ */
+struct kvmppc_gs_parser {
+ /* private: */
+ struct kvmppc_gs_bitmap iterator;
+ struct kvmppc_gs_elem *gses[KVMPPC_GSE_IDEN_COUNT];
+};
+
+enum {
+ GSM_GUEST_WIDE = 0x1,
+ GSM_SEND = 0x2,
+ GSM_RECEIVE = 0x4,
+ GSM_GSB_OWNER = 0x8,
+};
+
+struct kvmppc_gs_msg;
+
+/**
+ * struct kvmppc_gs_msg_ops - guest state message behavior
+ * @get_size: maximum size required for the message data
+ * @fill_info: serializes to the guest state buffer format
+ * @refresh_info: dserializes from the guest state buffer format
+ */
+struct kvmppc_gs_msg_ops {
+ size_t (*get_size)(struct kvmppc_gs_msg *gsm);
+ int (*fill_info)(struct kvmppc_gs_buff *gsb, struct kvmppc_gs_msg *gsm);
+ int (*refresh_info)(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb);
+};
+
+/**
+ * struct kvmppc_gs_msg - a guest state message
+ * @bitmap: the guest state ids that should be included
+ * @ops: modify message behavior for reading and writing to buffers
+ * @flags: guest wide or thread wide
+ * @data: location where buffer data will be written to or from.
+ *
+ * A guest state message is allows flexibility in sending in receiving data
+ * in a guest state buffer format.
+ */
+struct kvmppc_gs_msg {
+ struct kvmppc_gs_bitmap bitmap;
+ struct kvmppc_gs_msg_ops *ops;
+ unsigned long flags;
+ void *data;
+};
+
+/**************************************************************************
+ * Guest State IDs
+ **************************************************************************/
+
+u16 kvmppc_gsid_size(u16 iden);
+unsigned long kvmppc_gsid_flags(u16 iden);
+u64 kvmppc_gsid_mask(u16 iden);
+
+/**************************************************************************
+ * Guest State Buffers
+ **************************************************************************/
+struct kvmppc_gs_buff *kvmppc_gsb_new(size_t size, unsigned long guest_id,
+ unsigned long vcpu_id, gfp_t flags);
+void kvmppc_gsb_free(struct kvmppc_gs_buff *gsb);
+void *kvmppc_gsb_put(struct kvmppc_gs_buff *gsb, size_t size);
+int kvmppc_gsb_send(struct kvmppc_gs_buff *gsb, unsigned long flags);
+int kvmppc_gsb_recv(struct kvmppc_gs_buff *gsb, unsigned long flags);
+
+/**
+ * kvmppc_gsb_header() - the header of a guest state buffer
+ * @gsb: guest state buffer
+ *
+ * Returns a pointer to the buffer header.
+ */
+static inline struct kvmppc_gs_header *
+kvmppc_gsb_header(struct kvmppc_gs_buff *gsb)
+{
+ return gsb->hdr;
+}
+
+/**
+ * kvmppc_gsb_data() - the elements of a guest state buffer
+ * @gsb: guest state buffer
+ *
+ * Returns a pointer to the first element of the buffer data.
+ */
+static inline struct kvmppc_gs_elem *kvmppc_gsb_data(struct kvmppc_gs_buff *gsb)
+{
+ return (struct kvmppc_gs_elem *)kvmppc_gsb_header(gsb)->data;
+}
+
+/**
+ * kvmppc_gsb_len() - the current length of a guest state buffer
+ * @gsb: guest state buffer
+ *
+ * Returns the length including the header of a buffer.
+ */
+static inline size_t kvmppc_gsb_len(struct kvmppc_gs_buff *gsb)
+{
+ return gsb->len;
+}
+
+/**
+ * kvmppc_gsb_capacity() - the capacity of a guest state buffer
+ * @gsb: guest state buffer
+ *
+ * Returns the capacity of a buffer.
+ */
+static inline size_t kvmppc_gsb_capacity(struct kvmppc_gs_buff *gsb)
+{
+ return gsb->capacity;
+}
+
+/**
+ * kvmppc_gsb_paddress() - the physical address of buffer
+ * @gsb: guest state buffer
+ *
+ * Returns the physical address of the buffer.
+ */
+static inline u64 kvmppc_gsb_paddress(struct kvmppc_gs_buff *gsb)
+{
+ return __pa(kvmppc_gsb_header(gsb));
+}
+
+/**
+ * kvmppc_gsb_nelems() - the number of elements in a buffer
+ * @gsb: guest state buffer
+ *
+ * Returns the number of elements in a buffer
+ */
+static inline u32 kvmppc_gsb_nelems(struct kvmppc_gs_buff *gsb)
+{
+ return be32_to_cpu(kvmppc_gsb_header(gsb)->nelems);
+}
+
+/**
+ * kvmppc_gsb_reset() - empty a guest state buffer
+ * @gsb: guest state buffer
+ *
+ * Reset the number of elements and length of buffer to empty.
+ */
+static inline void kvmppc_gsb_reset(struct kvmppc_gs_buff *gsb)
+{
+ kvmppc_gsb_header(gsb)->nelems = cpu_to_be32(0);
+ gsb->len = sizeof(struct kvmppc_gs_header);
+}
+
+/**
+ * kvmppc_gsb_data_len() - the length of a buffer excluding the header
+ * @gsb: guest state buffer
+ *
+ * Returns the length of a buffer excluding the header
+ */
+static inline size_t kvmppc_gsb_data_len(struct kvmppc_gs_buff *gsb)
+{
+ return gsb->len - sizeof(struct kvmppc_gs_header);
+}
+
+/**
+ * kvmppc_gsb_data_cap() - the capacity of a buffer excluding the header
+ * @gsb: guest state buffer
+ *
+ * Returns the capacity of a buffer excluding the header
+ */
+static inline size_t kvmppc_gsb_data_cap(struct kvmppc_gs_buff *gsb)
+{
+ return gsb->capacity - sizeof(struct kvmppc_gs_header);
+}
+
+/**
+ * kvmppc_gsb_for_each_elem - iterate over the elements in a buffer
+ * @i: loop counter
+ * @pos: set to current element
+ * @gsb: guest state buffer
+ * @rem: initialized to buffer capacity, holds bytes currently remaining in
+ * stream
+ */
+#define kvmppc_gsb_for_each_elem(i, pos, gsb, rem) \
+ kvmppc_gse_for_each_elem(i, kvmppc_gsb_nelems(gsb), pos, \
+ kvmppc_gsb_data(gsb), \
+ kvmppc_gsb_data_cap(gsb), rem)
+
+/**************************************************************************
+ * Guest State Elements
+ **************************************************************************/
+
+/**
+ * kvmppc_gse_iden() - guest state ID of element
+ * @gse: guest state element
+ *
+ * Return the guest state ID in host endianness.
+ */
+static inline u16 kvmppc_gse_iden(const struct kvmppc_gs_elem *gse)
+{
+ return be16_to_cpu(gse->iden);
+}
+
+/**
+ * kvmppc_gse_len() - length of guest state element data
+ * @gse: guest state element
+ *
+ * Returns the length of guest state element data
+ */
+static inline u16 kvmppc_gse_len(const struct kvmppc_gs_elem *gse)
+{
+ return be16_to_cpu(gse->len);
+}
+
+/**
+ * kvmppc_gse_total_len() - total length of guest state element
+ * @gse: guest state element
+ *
+ * Returns the length of the data plus the ID and size header.
+ */
+static inline u16 kvmppc_gse_total_len(const struct kvmppc_gs_elem *gse)
+{
+ return be16_to_cpu(gse->len) + sizeof(*gse);
+}
+
+/**
+ * kvmppc_gse_total_size() - space needed for a given data length
+ * @size: data length
+ *
+ * Returns size plus the space needed for the ID and size header.
+ */
+static inline u16 kvmppc_gse_total_size(u16 size)
+{
+ return sizeof(struct kvmppc_gs_elem) + size;
+}
+
+/**
+ * kvmppc_gse_data() - pointer to data of a guest state element
+ * @gse: guest state element
+ *
+ * Returns a pointer to the beginning of guest state element data.
+ */
+static inline void *kvmppc_gse_data(const struct kvmppc_gs_elem *gse)
+{
+ return (void *)gse->data;
+}
+
+/**
+ * kvmppc_gse_ok() - checks space exists for guest state element
+ * @gse: guest state element
+ * @remaining: bytes of space remaining
+ *
+ * Returns true if the guest state element can fit in remaining space.
+ */
+static inline bool kvmppc_gse_ok(const struct kvmppc_gs_elem *gse,
+ int remaining)
+{
+ return remaining >= kvmppc_gse_total_len(gse);
+}
+
+/**
+ * kvmppc_gse_next() - iterate to the next guest state element in a stream
+ * @gse: stream of guest state elements
+ * @remaining: length of the guest element stream
+ *
+ * Returns the next guest state element in a stream of elements. The length of
+ * the stream is updated in remaining.
+ */
+static inline struct kvmppc_gs_elem *
+kvmppc_gse_next(const struct kvmppc_gs_elem *gse, int *remaining)
+{
+ int len = sizeof(*gse) + kvmppc_gse_len(gse);
+
+ *remaining -= len;
+ return (struct kvmppc_gs_elem *)(gse->data + kvmppc_gse_len(gse));
+}
+
+/**
+ * kvmppc_gse_for_each_elem - iterate over a stream of guest state elements
+ * @i: loop counter
+ * @max: number of elements
+ * @pos: set to current element
+ * @head: head of elements
+ * @len: length of the stream
+ * @rem: initialized to len, holds bytes currently remaining elements
+ */
+#define kvmppc_gse_for_each_elem(i, max, pos, head, len, rem) \
+ for (i = 0, pos = head, rem = len; kvmppc_gse_ok(pos, rem) && i < max; \
+ pos = kvmppc_gse_next(pos, &(rem)), i++)
+
+int __kvmppc_gse_put(struct kvmppc_gs_buff *gsb, u16 iden, u16 size,
+ const void *data);
+int kvmppc_gse_parse(struct kvmppc_gs_parser *gsp, struct kvmppc_gs_buff *gsb);
+
+/**
+ * kvmppc_gse_put_be32() - add a be32 guest state element to a buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: big endian value
+ */
+static inline int kvmppc_gse_put_be32(struct kvmppc_gs_buff *gsb, u16 iden,
+ __be32 val)
+{
+ __be32 tmp;
+
+ tmp = val;
+ return __kvmppc_gse_put(gsb, iden, sizeof(__be32), &tmp);
+}
+
+/**
+ * kvmppc_gse_put_u32() - add a host endian 32bit int guest state element to a
+ * buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: host endian value
+ */
+static inline int kvmppc_gse_put_u32(struct kvmppc_gs_buff *gsb, u16 iden,
+ u32 val)
+{
+ __be32 tmp;
+
+ val &= kvmppc_gsid_mask(iden);
+ tmp = cpu_to_be32(val);
+ return kvmppc_gse_put_be32(gsb, iden, tmp);
+}
+
+/**
+ * kvmppc_gse_put_be64() - add a be64 guest state element to a buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: big endian value
+ */
+static inline int kvmppc_gse_put_be64(struct kvmppc_gs_buff *gsb, u16 iden,
+ __be64 val)
+{
+ __be64 tmp;
+
+ tmp = val;
+ return __kvmppc_gse_put(gsb, iden, sizeof(__be64), &tmp);
+}
+
+/**
+ * kvmppc_gse_put_u64() - add a host endian 64bit guest state element to a
+ * buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: host endian value
+ */
+static inline int kvmppc_gse_put_u64(struct kvmppc_gs_buff *gsb, u16 iden,
+ u64 val)
+{
+ __be64 tmp;
+
+ val &= kvmppc_gsid_mask(iden);
+ tmp = cpu_to_be64(val);
+ return kvmppc_gse_put_be64(gsb, iden, tmp);
+}
+
+/**
+ * __kvmppc_gse_put_reg() - add a register type guest state element to a buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: host endian value
+ *
+ * Adds a register type guest state element. Uses the guest state ID for
+ * determining the length of the guest element. If the guest state ID has
+ * bits that can not be set they will be cleared.
+ */
+static inline int __kvmppc_gse_put_reg(struct kvmppc_gs_buff *gsb, u16 iden,
+ u64 val)
+{
+ val &= kvmppc_gsid_mask(iden);
+ if (kvmppc_gsid_size(iden) == sizeof(u64))
+ return kvmppc_gse_put_u64(gsb, iden, val);
+
+ if (kvmppc_gsid_size(iden) == sizeof(u32)) {
+ u32 tmp;
+
+ tmp = (u32)val;
+ if (tmp != val)
+ return -EINVAL;
+
+ return kvmppc_gse_put_u32(gsb, iden, tmp);
+ }
+ return -EINVAL;
+}
+
+/**
+ * kvmppc_gse_put_vector128() - add a vector guest state element to a buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: 16 byte vector value
+ */
+static inline int kvmppc_gse_put_vector128(struct kvmppc_gs_buff *gsb, u16 iden,
+ vector128 *val)
+{
+ __be64 tmp[2] = { 0 };
+ union {
+ __vector128 v;
+ u64 dw[2];
+ } u;
+
+ u.v = *val;
+ tmp[0] = cpu_to_be64(u.dw[TS_FPROFFSET]);
+#ifdef CONFIG_VSX
+ tmp[1] = cpu_to_be64(u.dw[TS_VSRLOWOFFSET]);
+#endif
+ return __kvmppc_gse_put(gsb, iden, sizeof(tmp), &tmp);
+}
+
+/**
+ * kvmppc_gse_put_part_table() - add a partition table guest state element to a
+ * buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: partition table value
+ */
+static inline int kvmppc_gse_put_part_table(struct kvmppc_gs_buff *gsb,
+ u16 iden,
+ struct kvmppc_gs_part_table val)
+{
+ __be64 tmp[3];
+
+ tmp[0] = cpu_to_be64(val.address);
+ tmp[1] = cpu_to_be64(val.ea_bits);
+ tmp[2] = cpu_to_be64(val.gpd_size);
+ return __kvmppc_gse_put(gsb, KVMPPC_GSID_PARTITION_TABLE, sizeof(tmp),
+ &tmp);
+}
+
+/**
+ * kvmppc_gse_put_proc_table() - add a process table guest state element to a
+ * buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: process table value
+ */
+static inline int kvmppc_gse_put_proc_table(struct kvmppc_gs_buff *gsb,
+ u16 iden,
+ struct kvmppc_gs_proc_table val)
+{
+ __be64 tmp[2];
+
+ tmp[0] = cpu_to_be64(val.address);
+ tmp[1] = cpu_to_be64(val.gpd_size);
+ return __kvmppc_gse_put(gsb, KVMPPC_GSID_PROCESS_TABLE, sizeof(tmp),
+ &tmp);
+}
+
+/**
+ * kvmppc_gse_put_buff_info() - adds a GSB description guest state element to a
+ * buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: guest state buffer description value
+ */
+static inline int kvmppc_gse_put_buff_info(struct kvmppc_gs_buff *gsb, u16 iden,
+ struct kvmppc_gs_buff_info val)
+{
+ __be64 tmp[2];
+
+ tmp[0] = cpu_to_be64(val.address);
+ tmp[1] = cpu_to_be64(val.size);
+ return __kvmppc_gse_put(gsb, iden, sizeof(tmp), &tmp);
+}
+
+int __kvmppc_gse_put(struct kvmppc_gs_buff *gsb, u16 iden, u16 size,
+ const void *data);
+
+/**
+ * kvmppc_gse_get_be32() - return the data of a be32 element
+ * @gse: guest state element
+ */
+static inline __be32 kvmppc_gse_get_be32(const struct kvmppc_gs_elem *gse)
+{
+ if (WARN_ON(kvmppc_gse_len(gse) != sizeof(__be32)))
+ return 0;
+ return *(__be32 *)kvmppc_gse_data(gse);
+}
+
+/**
+ * kvmppc_gse_get_u32() - return the data of a be32 element in host endianness
+ * @gse: guest state element
+ */
+static inline u32 kvmppc_gse_get_u32(const struct kvmppc_gs_elem *gse)
+{
+ return be32_to_cpu(kvmppc_gse_get_be32(gse));
+}
+
+/**
+ * kvmppc_gse_get_be64() - return the data of a be64 element
+ * @gse: guest state element
+ */
+static inline __be64 kvmppc_gse_get_be64(const struct kvmppc_gs_elem *gse)
+{
+ if (WARN_ON(kvmppc_gse_len(gse) != sizeof(__be64)))
+ return 0;
+ return *(__be64 *)kvmppc_gse_data(gse);
+}
+
+/**
+ * kvmppc_gse_get_u64() - return the data of a be64 element in host endianness
+ * @gse: guest state element
+ */
+static inline u64 kvmppc_gse_get_u64(const struct kvmppc_gs_elem *gse)
+{
+ return be64_to_cpu(kvmppc_gse_get_be64(gse));
+}
+
+/**
+ * kvmppc_gse_get_vector128() - return the data of a vector element
+ * @gse: guest state element
+ */
+static inline void kvmppc_gse_get_vector128(const struct kvmppc_gs_elem *gse,
+ vector128 *v)
+{
+ union {
+ __vector128 v;
+ u64 dw[2];
+ } u = { 0 };
+ __be64 *src;
+
+ if (WARN_ON(kvmppc_gse_len(gse) != sizeof(__vector128)))
+ *v = u.v;
+
+ src = (__be64 *)kvmppc_gse_data(gse);
+ u.dw[TS_FPROFFSET] = be64_to_cpu(src[0]);
+#ifdef CONFIG_VSX
+ u.dw[TS_VSRLOWOFFSET] = be64_to_cpu(src[1]);
+#endif
+ *v = u.v;
+}
+
+/**************************************************************************
+ * Guest State Bitmap
+ **************************************************************************/
+
+bool kvmppc_gsbm_test(struct kvmppc_gs_bitmap *gsbm, u16 iden);
+void kvmppc_gsbm_set(struct kvmppc_gs_bitmap *gsbm, u16 iden);
+void kvmppc_gsbm_clear(struct kvmppc_gs_bitmap *gsbm, u16 iden);
+u16 kvmppc_gsbm_next(struct kvmppc_gs_bitmap *gsbm, u16 prev);
+
+/**
+ * kvmppc_gsbm_zero - zero the entire bitmap
+ * @gsbm: guest state buffer bitmap
+ */
+static inline void kvmppc_gsbm_zero(struct kvmppc_gs_bitmap *gsbm)
+{
+ bitmap_zero(gsbm->bitmap, KVMPPC_GSE_IDEN_COUNT);
+}
+
+/**
+ * kvmppc_gsbm_fill - fill the entire bitmap
+ * @gsbm: guest state buffer bitmap
+ */
+static inline void kvmppc_gsbm_fill(struct kvmppc_gs_bitmap *gsbm)
+{
+ bitmap_fill(gsbm->bitmap, KVMPPC_GSE_IDEN_COUNT);
+ clear_bit(0, gsbm->bitmap);
+}
+
+/**
+ * kvmppc_gsbm_for_each - iterate the present guest state IDs
+ * @gsbm: guest state buffer bitmap
+ * @iden: current guest state ID
+ */
+#define kvmppc_gsbm_for_each(gsbm, iden) \
+ for (iden = kvmppc_gsbm_next(gsbm, 0); iden != 0; \
+ iden = kvmppc_gsbm_next(gsbm, iden))
+
+/**************************************************************************
+ * Guest State Parser
+ **************************************************************************/
+
+void kvmppc_gsp_insert(struct kvmppc_gs_parser *gsp, u16 iden,
+ struct kvmppc_gs_elem *gse);
+struct kvmppc_gs_elem *kvmppc_gsp_lookup(struct kvmppc_gs_parser *gsp,
+ u16 iden);
+
+/**
+ * kvmppc_gsp_for_each - iterate the <guest state IDs, guest state element>
+ * pairs
+ * @gsp: guest state buffer bitmap
+ * @iden: current guest state ID
+ * @gse: guest state element
+ */
+#define kvmppc_gsp_for_each(gsp, iden, gse) \
+ for (iden = kvmppc_gsbm_next(&(gsp)->iterator, 0), \
+ gse = kvmppc_gsp_lookup((gsp), iden); \
+ iden != 0; iden = kvmppc_gsbm_next(&(gsp)->iterator, iden), \
+ gse = kvmppc_gsp_lookup((gsp), iden))
+
+/**************************************************************************
+ * Guest State Message
+ **************************************************************************/
+
+/**
+ * kvmppc_gsm_for_each - iterate the guest state IDs included in a guest state
+ * message
+ * @gsp: guest state buffer bitmap
+ * @iden: current guest state ID
+ * @gse: guest state element
+ */
+#define kvmppc_gsm_for_each(gsm, iden) \
+ for (iden = kvmppc_gsbm_next(&gsm->bitmap, 0); iden != 0; \
+ iden = kvmppc_gsbm_next(&gsm->bitmap, iden))
+
+int kvmppc_gsm_init(struct kvmppc_gs_msg *mgs, struct kvmppc_gs_msg_ops *ops,
+ void *data, unsigned long flags);
+
+struct kvmppc_gs_msg *kvmppc_gsm_new(struct kvmppc_gs_msg_ops *ops, void *data,
+ unsigned long flags, gfp_t gfp_flags);
+void kvmppc_gsm_free(struct kvmppc_gs_msg *gsm);
+size_t kvmppc_gsm_size(struct kvmppc_gs_msg *gsm);
+int kvmppc_gsm_fill_info(struct kvmppc_gs_msg *gsm, struct kvmppc_gs_buff *gsb);
+int kvmppc_gsm_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb);
+
+/**
+ * kvmppc_gsm_include - indicate a guest state ID should be included when
+ * serializing
+ * @gsm: guest state message
+ * @iden: guest state ID
+ */
+static inline void kvmppc_gsm_include(struct kvmppc_gs_msg *gsm, u16 iden)
+{
+ kvmppc_gsbm_set(&gsm->bitmap, iden);
+}
+
+/**
+ * kvmppc_gsm_includes - check if a guest state ID will be included when
+ * serializing
+ * @gsm: guest state message
+ * @iden: guest state ID
+ */
+static inline bool kvmppc_gsm_includes(struct kvmppc_gs_msg *gsm, u16 iden)
+{
+ return kvmppc_gsbm_test(&gsm->bitmap, iden);
+}
+
+/**
+ * kvmppc_gsm_includes - indicate all guest state IDs should be included when
+ * serializing
+ * @gsm: guest state message
+ * @iden: guest state ID
+ */
+static inline void kvmppc_gsm_include_all(struct kvmppc_gs_msg *gsm)
+{
+ kvmppc_gsbm_fill(&gsm->bitmap);
+}
+
+/**
+ * kvmppc_gsm_include - clear the guest state IDs that should be included when
+ * serializing
+ * @gsm: guest state message
+ */
+static inline void kvmppc_gsm_reset(struct kvmppc_gs_msg *gsm)
+{
+ kvmppc_gsbm_zero(&gsm->bitmap);
+}
+
+/**
+ * kvmppc_gsb_receive_data - flexibly update values from a guest state buffer
+ * @gsb: guest state buffer
+ * @gsm: guest state message
+ *
+ * Requests updated values for the guest state values included in the guest
+ * state message. The guest state message will then deserialize the guest state
+ * buffer.
+ */
+static inline int kvmppc_gsb_receive_data(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ int rc;
+
+ kvmppc_gsb_reset(gsb);
+ rc = kvmppc_gsm_fill_info(gsm, gsb);
+ if (rc < 0)
+ return rc;
+
+ rc = kvmppc_gsb_recv(gsb, gsm->flags);
+ if (rc < 0)
+ return rc;
+
+ rc = kvmppc_gsm_refresh_info(gsm, gsb);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
+
+/**
+ * kvmppc_gsb_recv - receive a single guest state ID
+ * @gsb: guest state buffer
+ * @gsm: guest state message
+ * @iden: guest state identity
+ */
+static inline int kvmppc_gsb_receive_datum(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm, u16 iden)
+{
+ int rc;
+
+ kvmppc_gsm_include(gsm, iden);
+ rc = kvmppc_gsb_receive_data(gsb, gsm);
+ if (rc < 0)
+ return rc;
+ kvmppc_gsm_reset(gsm);
+ return 0;
+}
+
+/**
+ * kvmppc_gsb_send_data - flexibly send values from a guest state buffer
+ * @gsb: guest state buffer
+ * @gsm: guest state message
+ *
+ * Sends the guest state values included in the guest state message.
+ */
+static inline int kvmppc_gsb_send_data(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ int rc;
+
+ kvmppc_gsb_reset(gsb);
+ rc = kvmppc_gsm_fill_info(gsm, gsb);
+ if (rc < 0)
+ return rc;
+ rc = kvmppc_gsb_send(gsb, gsm->flags);
+
+ return rc;
+}
+
+/**
+ * kvmppc_gsb_recv - send a single guest state ID
+ * @gsb: guest state buffer
+ * @gsm: guest state message
+ * @iden: guest state identity
+ */
+static inline int kvmppc_gsb_send_datum(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm, u16 iden)
+{
+ int rc;
+
+ kvmppc_gsm_include(gsm, iden);
+ rc = kvmppc_gsb_send_data(gsb, gsm);
+ if (rc < 0)
+ return rc;
+ kvmppc_gsm_reset(gsm);
+ return 0;
+}
+
+#endif /* _ASM_POWERPC_GUEST_STATE_BUFFER_H */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index c099780385..ddb99e9829 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -100,6 +100,18 @@
#define H_COP_HW -74
#define H_STATE -75
#define H_IN_USE -77
+
+#define H_INVALID_ELEMENT_ID -79
+#define H_INVALID_ELEMENT_SIZE -80
+#define H_INVALID_ELEMENT_VALUE -81
+#define H_INPUT_BUFFER_NOT_DEFINED -82
+#define H_INPUT_BUFFER_TOO_SMALL -83
+#define H_OUTPUT_BUFFER_NOT_DEFINED -84
+#define H_OUTPUT_BUFFER_TOO_SMALL -85
+#define H_PARTITION_PAGE_TABLE_NOT_DEFINED -86
+#define H_GUEST_VCPU_STATE_NOT_HV_OWNED -87
+
+
#define H_UNSUPPORTED_FLAG_START -256
#define H_UNSUPPORTED_FLAG_END -511
#define H_MULTI_THREADS_ACTIVE -9005
@@ -381,6 +393,15 @@
#define H_ENTER_NESTED 0xF804
#define H_TLB_INVALIDATE 0xF808
#define H_COPY_TOFROM_GUEST 0xF80C
+#define H_GUEST_GET_CAPABILITIES 0x460
+#define H_GUEST_SET_CAPABILITIES 0x464
+#define H_GUEST_CREATE 0x470
+#define H_GUEST_CREATE_VCPU 0x474
+#define H_GUEST_GET_STATE 0x478
+#define H_GUEST_SET_STATE 0x47C
+#define H_GUEST_RUN_VCPU 0x480
+#define H_GUEST_COPY_MEMORY 0x484
+#define H_GUEST_DELETE 0x488
/* Flags for H_SVM_PAGE_IN */
#define H_PAGE_IN_SHARED 0x1
@@ -467,6 +488,15 @@
#define H_RPTI_PAGE_1G 0x08
#define H_RPTI_PAGE_ALL (-1UL)
+/* Flags for H_GUEST_{S,G}_STATE */
+#define H_GUEST_FLAGS_WIDE (1UL<<(63-0))
+
+/* Flag values used for H_{S,G}SET_GUEST_CAPABILITIES */
+#define H_GUEST_CAP_COPY_MEM (1UL<<(63-0))
+#define H_GUEST_CAP_POWER9 (1UL<<(63-1))
+#define H_GUEST_CAP_POWER10 (1UL<<(63-2))
+#define H_GUEST_CAP_BITMAP2 (1UL<<(63-63))
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h
index 699a88584a..a656635df3 100644
--- a/arch/powerpc/include/asm/imc-pmu.h
+++ b/arch/powerpc/include/asm/imc-pmu.h
@@ -74,14 +74,14 @@ struct imc_events {
* The following is the data structure to hold trace imc data.
*/
struct trace_imc_data {
- u64 tb1;
- u64 ip;
- u64 val;
- u64 cpmc1;
- u64 cpmc2;
- u64 cpmc3;
- u64 cpmc4;
- u64 tb2;
+ __be64 tb1;
+ __be64 ip;
+ __be64 val;
+ __be64 cpmc1;
+ __be64 cpmc2;
+ __be64 cpmc3;
+ __be64 cpmc4;
+ __be64 tb2;
};
/* Event attribute array index */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 0732b743e0..5220274a62 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -950,7 +950,7 @@ extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size,
* almost all conceivable cases a device driver should not be using
* this function
*/
-static inline unsigned long virt_to_phys(volatile void * address)
+static inline unsigned long virt_to_phys(const volatile void * address)
{
WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && !virt_addr_valid(address));
diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
index b8b0be8f1a..c6d3078bd8 100644
--- a/arch/powerpc/include/asm/irq_work.h
+++ b/arch/powerpc/include/asm/irq_work.h
@@ -6,6 +6,5 @@ static inline bool arch_irq_work_has_interrupt(void)
{
return true;
}
-extern void arch_irq_work_raise(void);
#endif /* _ASM_POWERPC_IRQ_WORK_H */
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 93ce3ec253..2f2a86ed22 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -17,7 +17,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\n\t"
+ asm goto("1:\n\t"
"nop # arch_static_branch\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".long 1b - ., %l[l_yes] - .\n\t"
@@ -32,7 +32,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\n\t"
+ asm goto("1:\n\t"
"b %l[l_yes] # arch_static_branch_jump\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".long 1b - ., %l[l_yes] - .\n\t"
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index a1ddba01e7..e1b43aa121 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -99,10 +99,14 @@ void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_co
void kexec_copy_flush(struct kimage *image);
-#if defined(CONFIG_CRASH_DUMP) && defined(CONFIG_PPC_RTAS)
+#if defined(CONFIG_CRASH_DUMP)
+bool is_kdump_kernel(void);
+#define is_kdump_kernel is_kdump_kernel
+#if defined(CONFIG_PPC_RTAS)
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
#define crash_free_reserved_phys_range crash_free_reserved_phys_range
-#endif
+#endif /* CONFIG_PPC_RTAS */
+#endif /* CONFIG_CRASH_DUMP */
#ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_elf64_ops;
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index c8e4b4fd4e..4525a9c682 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -84,8 +84,6 @@ struct arch_optimized_insn {
kprobe_opcode_t *insn;
};
-extern int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data);
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_handler(struct pt_regs *regs);
extern int kprobe_post_handler(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index bbf5e2c5fe..4f527d09c9 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <asm/kvm_book3s_asm.h>
+#include <asm/guest-state-buffer.h>
struct kvmppc_bat {
u64 raw;
@@ -191,14 +192,14 @@ extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *gpte, bool data, bool iswrite);
extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
- unsigned int pshift, unsigned int lpid);
+ unsigned int pshift, u64 lpid);
extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
unsigned int shift,
const struct kvm_memory_slot *memslot,
- unsigned int lpid);
+ u64 lpid);
extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
bool writing, unsigned long gpa,
- unsigned int lpid);
+ u64 lpid);
extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
unsigned long gpa,
struct kvm_memory_slot *memslot,
@@ -207,7 +208,7 @@ extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
extern int kvmppc_init_vm_radix(struct kvm *kvm);
extern void kvmppc_free_radix(struct kvm *kvm);
extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
- unsigned int lpid);
+ u64 lpid);
extern int kvmppc_radix_init(void);
extern void kvmppc_radix_exit(void);
extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
@@ -295,12 +296,13 @@ static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
#endif
+extern unsigned long nested_capabilities;
long kvmhv_nested_init(void);
void kvmhv_nested_exit(void);
void kvmhv_vm_nested_init(struct kvm *kvm);
long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
-void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
+void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1);
void kvmhv_release_all_nested(struct kvm *kvm);
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
@@ -316,6 +318,69 @@ long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+
+extern struct static_key_false __kvmhv_is_nestedv2;
+
+static inline bool kvmhv_is_nestedv2(void)
+{
+ return static_branch_unlikely(&__kvmhv_is_nestedv2);
+}
+
+static inline bool kvmhv_is_nestedv1(void)
+{
+ return !static_branch_likely(&__kvmhv_is_nestedv2);
+}
+
+#else
+
+static inline bool kvmhv_is_nestedv2(void)
+{
+ return false;
+}
+
+static inline bool kvmhv_is_nestedv1(void)
+{
+ return false;
+}
+
+#endif
+
+int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
+int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
+int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden);
+int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden);
+
+static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_reload_ptregs(vcpu, regs);
+ return 0;
+}
+static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_mark_dirty_ptregs(vcpu, regs);
+ return 0;
+}
+
+static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_mark_dirty(vcpu, iden);
+ return 0;
+}
+
+static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_cached_reload(vcpu, iden);
+ return 0;
+}
+
extern int kvm_irq_bypass;
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
@@ -335,60 +400,72 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
vcpu->arch.regs.gpr[num] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(num));
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_GPR(num)) < 0);
return vcpu->arch.regs.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
vcpu->arch.regs.ccr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CR) < 0);
return vcpu->arch.regs.ccr;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.regs.xer = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
}
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_XER) < 0);
return vcpu->arch.regs.xer;
}
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.regs.ctr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
}
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CTR) < 0);
return vcpu->arch.regs.ctr;
}
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.regs.link = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
}
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_LR) < 0);
return vcpu->arch.regs.link;
}
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.regs.nip = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
}
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_NIA) < 0);
return vcpu->arch.regs.nip;
}
@@ -403,10 +480,141 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
return vcpu->arch.fault_dar;
}
+static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
+ return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
+}
+
+static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
+{
+ vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
+}
+
+static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_FPSCR) < 0);
+ return vcpu->arch.fp.fpscr;
+}
+
+static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val)
+{
+ vcpu->arch.fp.fpscr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_FPSCR);
+}
+
+
+static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
+ return vcpu->arch.fp.fpr[i][j];
+}
+
+static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j,
+ u64 val)
+{
+ vcpu->arch.fp.fpr[i][j] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
+}
+
+#ifdef CONFIG_ALTIVEC
+static inline void kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *v)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(32 + i)) < 0);
+ *v = vcpu->arch.vr.vr[i];
+}
+
+static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i,
+ vector128 *val)
+{
+ vcpu->arch.vr.vr[i] = *val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(32 + i));
+}
+
+static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSCR) < 0);
+ return vcpu->arch.vr.vscr.u[3];
+}
+
+static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.vr.vscr.u[3] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSCR);
+}
+#endif
+
+#define KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \
+static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
+{ \
+ \
+ vcpu->arch.reg = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
+}
+
+#define KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \
+static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
+{ \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
+ return vcpu->arch.reg; \
+}
+
+#define KVMPPC_BOOK3S_VCPU_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \
+
+KVMPPC_BOOK3S_VCPU_ACCESSOR(pid, 32, KVMPPC_GSID_PIDR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(tar, 64, KVMPPC_GSID_TAR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbhr, 64, KVMPPC_GSID_EBBHR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbrr, 64, KVMPPC_GSID_EBBRR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(bescr, 64, KVMPPC_GSID_BESCR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(ic, 64, KVMPPC_GSID_IC)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(vrsave, 64, KVMPPC_GSID_VRSAVE)
+
+
+#define KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \
+static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
+{ \
+ vcpu->arch.vcore->reg = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
+}
+
+#define KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \
+static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
+{ \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
+ return vcpu->arch.vcore->reg; \
+}
+
+#define KVMPPC_BOOK3S_VCORE_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \
+
+
+KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB)
+KVMPPC_BOOK3S_VCORE_ACCESSOR(tb_offset, 64, KVMPPC_GSID_TB_OFFSET)
+KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR)
+KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR)
+
+static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0);
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0);
+ return vcpu->arch.dec_expires;
+}
+
+static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val)
+{
+ vcpu->arch.dec_expires = val;
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB);
+}
+
/* Expiry time of vcpu DEC relative to host TB */
static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.dec_expires - vcpu->arch.vcore->tb_offset;
+ return kvmppc_get_dec_expires(vcpu) - kvmppc_get_tb_offset(vcpu);
}
static inline bool is_kvmppc_resume_guest(int r)
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index d49065af08..2477021bff 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -624,7 +624,7 @@ static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
unsigned long gpa, unsigned int level,
- unsigned long mmu_seq, unsigned int lpid,
+ unsigned long mmu_seq, u64 lpid,
unsigned long *rmapp, struct rmap_nested **n_rmap);
extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
struct rmap_nested **n_rmap);
@@ -677,6 +677,12 @@ static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
unsigned long ea, unsigned *hshift);
+int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu, struct kvmhv_nestedv2_io *io);
+void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu, struct kvmhv_nestedv2_io *io);
+int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit);
+int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1);
+int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu);
+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index 0c3401b2e1..7c3291aa89 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -89,6 +89,16 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
return vcpu->arch.regs.nip;
}
+static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
+{
+ vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
+}
+
+static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
+{
+ return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
+}
+
#ifdef CONFIG_BOOKE
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 14ee0dece8..8799b37be2 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -25,6 +25,7 @@
#include <asm/cacheflush.h>
#include <asm/hvcall.h>
#include <asm/mce.h>
+#include <asm/guest-state-buffer.h>
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
@@ -276,7 +277,7 @@ struct kvm_resize_hpt;
#define KVMPPC_SECURE_INIT_ABORT 0x4 /* H_SVM_INIT_ABORT issued */
struct kvm_arch {
- unsigned int lpid;
+ u64 lpid;
unsigned int smt_mode; /* # vcpus per virtual core */
unsigned int emul_smt_mode; /* emualted SMT mode, on P9 */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
@@ -509,6 +510,23 @@ union xive_tma_w01 {
__be64 w01;
};
+ /* Nestedv2 H_GUEST_RUN_VCPU configuration */
+struct kvmhv_nestedv2_config {
+ struct kvmppc_gs_buff_info vcpu_run_output_cfg;
+ struct kvmppc_gs_buff_info vcpu_run_input_cfg;
+ u64 vcpu_run_output_size;
+};
+
+ /* Nestedv2 L1<->L0 communication state */
+struct kvmhv_nestedv2_io {
+ struct kvmhv_nestedv2_config cfg;
+ struct kvmppc_gs_buff *vcpu_run_output;
+ struct kvmppc_gs_buff *vcpu_run_input;
+ struct kvmppc_gs_msg *vcpu_message;
+ struct kvmppc_gs_msg *vcore_message;
+ struct kvmppc_gs_bitmap valids;
+};
+
struct kvm_vcpu_arch {
ulong host_stack;
u32 host_pid;
@@ -829,6 +847,8 @@ struct kvm_vcpu_arch {
u64 nested_hfscr; /* HFSCR that the L1 requested for the nested guest */
u32 nested_vcpu_id;
gpa_t nested_io_gpr;
+ /* For nested APIv2 guests*/
+ struct kvmhv_nestedv2_io nestedv2_io;
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index b4da8514af..3281215097 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -615,6 +615,42 @@ static inline bool kvmhv_on_pseries(void)
{
return false;
}
+
+#endif
+
+#ifndef CONFIG_PPC_BOOK3S
+
+static inline bool kvmhv_is_nestedv2(void)
+{
+ return false;
+}
+
+static inline bool kvmhv_is_nestedv1(void)
+{
+ return false;
+}
+
+static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ return 0;
+}
+static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ return 0;
+}
+
+static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
+{
+ return 0;
+}
+
+static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
+{
+ return 0;
+}
+
#endif
#ifdef CONFIG_KVM_XICS
@@ -927,79 +963,85 @@ static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
#endif
}
-#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
+#define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr) \
static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
{ \
return mfspr(bookehv_spr); \
} \
-#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
+#define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_SET(reg, bookehv_spr) \
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
{ \
mtspr(bookehv_spr, val); \
} \
-#define SHARED_WRAPPER_GET(reg, size) \
+#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size, iden) \
static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
{ \
+ if (iden) \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
if (kvmppc_shared_big_endian(vcpu)) \
- return be##size##_to_cpu(vcpu->arch.shared->reg); \
+ return be##size##_to_cpu((__be##size __force)vcpu->arch.shared->reg); \
else \
- return le##size##_to_cpu(vcpu->arch.shared->reg); \
+ return le##size##_to_cpu((__le##size __force)vcpu->arch.shared->reg); \
} \
-#define SHARED_WRAPPER_SET(reg, size) \
+#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden) \
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
{ \
if (kvmppc_shared_big_endian(vcpu)) \
- vcpu->arch.shared->reg = cpu_to_be##size(val); \
+ vcpu->arch.shared->reg = (u##size __force)cpu_to_be##size(val); \
else \
- vcpu->arch.shared->reg = cpu_to_le##size(val); \
+ vcpu->arch.shared->reg = (u##size __force)cpu_to_le##size(val); \
+ \
+ if (iden) \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
} \
-#define SHARED_WRAPPER(reg, size) \
- SHARED_WRAPPER_GET(reg, size) \
- SHARED_WRAPPER_SET(reg, size) \
+#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size, iden) \
+ KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size, iden) \
+ KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden) \
-#define SPRNG_WRAPPER(reg, bookehv_spr) \
- SPRNG_WRAPPER_GET(reg, bookehv_spr) \
- SPRNG_WRAPPER_SET(reg, bookehv_spr) \
+#define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \
+ KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr) \
+ KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_SET(reg, bookehv_spr) \
#ifdef CONFIG_KVM_BOOKE_HV
-#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
- SPRNG_WRAPPER(reg, bookehv_spr) \
+#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr, iden) \
+ KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \
#else
-#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
- SHARED_WRAPPER(reg, size) \
+#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr, iden) \
+ KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size, iden) \
#endif
-SHARED_WRAPPER(critical, 64)
-SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
-SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
-SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
-SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
-SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
-SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
-SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
-SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
-SHARED_WRAPPER_GET(msr, 64)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(critical, 64, 0)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg0, 64, SPRN_GSPRG0, KVMPPC_GSID_SPRG0)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg1, 64, SPRN_GSPRG1, KVMPPC_GSID_SPRG1)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg2, 64, SPRN_GSPRG2, KVMPPC_GSID_SPRG2)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg3, 64, SPRN_GSPRG3, KVMPPC_GSID_SPRG3)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr0, 64, SPRN_GSRR0, KVMPPC_GSID_SRR0)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr1, 64, SPRN_GSRR1, KVMPPC_GSID_SRR1)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(dar, 64, SPRN_GDEAR, KVMPPC_GSID_DAR)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(esr, 64, SPRN_GESR, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(msr, 64, KVMPPC_GSID_MSR)
static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
{
if (kvmppc_shared_big_endian(vcpu))
vcpu->arch.shared->msr = cpu_to_be64(val);
else
vcpu->arch.shared->msr = cpu_to_le64(val);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_MSR);
}
-SHARED_WRAPPER(dsisr, 32)
-SHARED_WRAPPER(int_pending, 32)
-SHARED_WRAPPER(sprg4, 64)
-SHARED_WRAPPER(sprg5, 64)
-SHARED_WRAPPER(sprg6, 64)
-SHARED_WRAPPER(sprg7, 64)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(dsisr, 32, KVMPPC_GSID_DSISR)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(int_pending, 32, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg4, 64, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg5, 64, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg6, 64, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg7, 64, 0)
static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
{
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index 45492fb5bf..ec6ced6d7c 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -115,23 +115,23 @@ static __inline__ long local_xchg(local_t *l, long n)
}
/**
- * local_add_unless - add unless the number is a given value
+ * local_add_unless - add unless the number is already a given value
* @l: pointer of type local_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
- * Atomically adds @a to @l, so long as it was not @u.
- * Returns non-zero if @l was not @u, and zero otherwise.
+ * Atomically adds @a to @l, if @v was not already @u.
+ * Returns true if the addition was done.
*/
-static __inline__ int local_add_unless(local_t *l, long a, long u)
+static __inline__ bool local_add_unless(local_t *l, long a, long u)
{
unsigned long flags;
- int ret = 0;
+ bool ret = false;
powerpc_local_irq_pmu_save(flags);
if (l->v != u) {
l->v += a;
- ret = 1;
+ ret = true;
}
powerpc_local_irq_pmu_restore(flags);
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 4f6e7d7ee3..d31a5ec155 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -10,7 +10,7 @@
#include <linux/export.h>
struct pt_regs;
-struct pci_bus;
+struct pci_bus;
struct device_node;
struct iommu_table;
struct rtc_time;
@@ -78,8 +78,8 @@ struct machdep_calls {
unsigned char (*nvram_read_val)(int addr);
void (*nvram_write_val)(int addr, unsigned char val);
ssize_t (*nvram_write)(char *buf, size_t count, loff_t *index);
- ssize_t (*nvram_read)(char *buf, size_t count, loff_t *index);
- ssize_t (*nvram_size)(void);
+ ssize_t (*nvram_read)(char *buf, size_t count, loff_t *index);
+ ssize_t (*nvram_size)(void);
void (*nvram_sync)(void);
/* Exception handlers */
@@ -102,12 +102,11 @@ struct machdep_calls {
*/
long (*feature_call)(unsigned int feature, ...);
- /* Get legacy PCI/IDE interrupt mapping */
+ /* Get legacy PCI/IDE interrupt mapping */
int (*pci_get_legacy_ide_irq)(struct pci_dev *dev, int channel);
-
+
/* Get access protection for /dev/mem */
- pgprot_t (*phys_mem_access_prot)(struct file *file,
- unsigned long pfn,
+ pgprot_t (*phys_mem_access_prot)(unsigned long pfn,
unsigned long size,
pgprot_t vma_prot);
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 52cc25864a..d8b7e246a3 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -412,5 +412,9 @@ extern void *abatron_pteptrs[2];
#include <asm/nohash/mmu.h>
#endif
+#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
+#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MMU_H_ */
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index 4c6c6dbd18..da827d2d08 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -42,14 +42,6 @@ u64 memory_hotplug_max(void);
#else
#define memory_hotplug_max() memblock_end_of_DRAM()
#endif /* CONFIG_NUMA */
-#ifdef CONFIG_FA_DUMP
-#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
-#endif
-
-#ifdef CONFIG_MEMORY_HOTPLUG
-extern int create_section_mapping(unsigned long start, unsigned long end,
- int nid, pgprot_t prot);
-#endif
#endif /* __KERNEL__ */
#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index 0e93a4728c..141d82e249 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -188,7 +188,6 @@ typedef struct {
} mm_context_t;
#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
-#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
/* Page size definitions, common between 32 and 64-bit
*
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index f99c53a5f1..9164a9e41b 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -9,10 +9,6 @@
#include <linux/threads.h>
#include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */
-#ifdef CONFIG_44x
-extern int icache_44x_need_flush;
-#endif
-
#endif /* __ASSEMBLY__ */
#define PTE_INDEX_SIZE PTE_SHIFT
@@ -55,26 +51,22 @@ extern int icache_44x_need_flush;
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define pte_ERROR(e) \
- pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
- (unsigned long long)pte_val(e))
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-#ifndef __ASSEMBLY__
-
-int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
-void unmap_kernel_page(unsigned long va);
-
-#endif /* !__ASSEMBLY__ */
-
-
/*
* This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
* value (for now) on others, from where we can start layout kernel
* virtual space that goes below PKMAP and FIXMAP
*/
-#include <asm/fixmap.h>
+
+#define FIXADDR_SIZE 0
+#ifdef CONFIG_KASAN
+#include <asm/kasan.h>
+#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
+#else
+#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
+#endif
/*
* ioremap_bot starts at that address. Early ioremaps move down from there,
@@ -151,7 +143,7 @@ void unmap_kernel_page(unsigned long va);
* The mask covered by the RPN must be a ULL on 32-bit platforms with
* 64-bit PTEs.
*/
-#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+#ifdef CONFIG_PTE_64BIT
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
#define MAX_POSSIBLE_PHYSMEM_BITS 36
#else
@@ -159,48 +151,8 @@ void unmap_kernel_page(unsigned long va);
#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
-/*
- * _PAGE_CHG_MASK masks of bits that are to be preserved across
- * pgprot changes.
- */
-#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
-
#ifndef __ASSEMBLY__
-#define pte_clear(mm, addr, ptep) \
- do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
-
-#ifndef pte_mkwrite_novma
-static inline pte_t pte_mkwrite_novma(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_RW);
-}
-#endif
-
-static inline pte_t pte_mkdirty(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_DIRTY);
-}
-
-static inline pte_t pte_mkyoung(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_ACCESSED);
-}
-
-#ifndef pte_wrprotect
-static inline pte_t pte_wrprotect(pte_t pte)
-{
- return __pte(pte_val(pte) & ~_PAGE_RW);
-}
-#endif
-
-#ifndef pte_mkexec
-static inline pte_t pte_mkexec(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_EXEC);
-}
-#endif
-
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
@@ -210,141 +162,6 @@ static inline void pmd_clear(pmd_t *pmdp)
}
/*
- * PTE updates. This function is called whenever an existing
- * valid PTE is updated. This does -not- include set_pte_at()
- * which nowadays only sets a new PTE.
- *
- * Depending on the type of MMU, we may need to use atomic updates
- * and the PTE may be either 32 or 64 bit wide. In the later case,
- * when using atomic updates, only the low part of the PTE is
- * accessed atomically.
- *
- * In addition, on 44x, we also maintain a global flag indicating
- * that an executable user mapping was modified, which is needed
- * to properly flush the virtually tagged instruction cache of
- * those implementations.
- *
- * On the 8xx, the page tables are a bit special. For 16k pages, we have
- * 4 identical entries. For 512k pages, we have 128 entries as if it was
- * 4k pages, but they are flagged as 512k pages for the hardware.
- * For other page sizes, we have a single entry in the table.
- */
-#ifdef CONFIG_PPC_8xx
-static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr);
-static int hugepd_ok(hugepd_t hpd);
-
-static int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge)
-{
- if (!huge)
- return PAGE_SIZE / SZ_4K;
- else if (hugepd_ok(*((hugepd_t *)pmd)))
- return 1;
- else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE))
- return SZ_16K / SZ_4K;
- else
- return SZ_512K / SZ_4K;
-}
-
-static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
- unsigned long clr, unsigned long set, int huge)
-{
- pte_basic_t *entry = (pte_basic_t *)p;
- pte_basic_t old = pte_val(*p);
- pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
- int num, i;
- pmd_t *pmd = pmd_off(mm, addr);
-
- num = number_of_cells_per_pte(pmd, new, huge);
-
- for (i = 0; i < num; i += PAGE_SIZE / SZ_4K, new += PAGE_SIZE) {
- *entry++ = new;
- if (IS_ENABLED(CONFIG_PPC_16K_PAGES) && num != 1) {
- *entry++ = new;
- *entry++ = new;
- *entry++ = new;
- }
- }
-
- return old;
-}
-
-#ifdef CONFIG_PPC_16K_PAGES
-#define ptep_get ptep_get
-static inline pte_t ptep_get(pte_t *ptep)
-{
- pte_basic_t val = READ_ONCE(ptep->pte);
- pte_t pte = {val, val, val, val};
-
- return pte;
-}
-#endif /* CONFIG_PPC_16K_PAGES */
-
-#else
-static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
- unsigned long clr, unsigned long set, int huge)
-{
- pte_basic_t old = pte_val(*p);
- pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
-
- *p = __pte(new);
-
-#ifdef CONFIG_44x
- if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
- icache_44x_need_flush = 1;
-#endif
- return old;
-}
-#endif
-
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- unsigned long old;
- old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
- return (old & _PAGE_ACCESSED) != 0;
-}
-#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
- __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
-
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
- return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
-}
-
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-#ifndef ptep_set_wrprotect
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
- pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
-}
-#endif
-
-#ifndef __ptep_set_access_flags
-static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
- pte_t *ptep, pte_t entry,
- unsigned long address,
- int psize)
-{
- unsigned long set = pte_val(entry) &
- (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
- int huge = psize > mmu_virtual_psize ? 1 : 0;
-
- pte_update(vma->vm_mm, address, ptep, 0, set, huge);
-
- flush_tlb_page(vma, address);
-}
-#endif
-
-static inline int pte_young(pte_t pte)
-{
- return pte_val(pte) & _PAGE_ACCESSED;
-}
-
-/*
* Note that on Book E processors, the pmd contains the kernel virtual
* (lowmem) address of the pte page. The physical address is less useful
* because everything runs with translation enabled (even the TLB miss
diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
index 0b4e5f8ce3..d759cfd747 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-40x.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h
@@ -42,10 +42,10 @@
#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
-#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
+#define _PAGE_READ 0x010 /* software: read permission */
#define _PAGE_SPECIAL 0x020 /* software: Special page */
#define _PAGE_DIRTY 0x080 /* software: dirty page */
-#define _PAGE_RW 0x100 /* hardware: WR, anded with dirty in exception */
+#define _PAGE_WRITE 0x100 /* hardware: WR, anded with dirty in exception */
#define _PAGE_EXEC 0x200 /* hardware: EX permission */
#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
@@ -55,11 +55,6 @@
/* cache related flags non existing on 40x */
#define _PAGE_COHERENT 0
-#define _PAGE_KERNEL_RO 0
-#define _PAGE_KERNEL_ROX _PAGE_EXEC
-#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
-#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
-
#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
#define _PMD_PRESENT_MASK _PMD_PRESENT
#define _PMD_BAD 0x802
@@ -72,14 +67,7 @@
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
#define _PAGE_BASE (_PAGE_BASE_NC)
-/* Permission masks used to generate the __P and __S table */
-#define PAGE_NONE __pgprot(_PAGE_BASE)
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#include <asm/pgtable-masks.h>
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-44x.h b/arch/powerpc/include/asm/nohash/32/pte-44x.h
index b7ed13cee1..8518137252 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-44x.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-44x.h
@@ -63,12 +63,12 @@
*/
#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
-#define _PAGE_RW 0x00000002 /* S: Write permission */
+#define _PAGE_WRITE 0x00000002 /* S: Write permission */
#define _PAGE_EXEC 0x00000004 /* H: Execute permission */
-#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
+#define _PAGE_READ 0x00000008 /* S: Read permission */
#define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
#define _PAGE_SPECIAL 0x00000020 /* S: Special page */
-#define _PAGE_USER 0x00000040 /* S: User page */
+#define _PAGE_ACCESSED 0x00000040 /* S: Page referenced */
#define _PAGE_ENDIAN 0x00000080 /* H: E bit */
#define _PAGE_GUARDED 0x00000100 /* H: G bit */
#define _PAGE_COHERENT 0x00000200 /* H: M bit */
@@ -78,11 +78,6 @@
/* No page size encoding in the linux PTE */
#define _PAGE_PSIZE 0
-#define _PAGE_KERNEL_RO 0
-#define _PAGE_KERNEL_ROX _PAGE_EXEC
-#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
-#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
-
/* TODO: Add large page lowmem mapping support */
#define _PMD_PRESENT 0
#define _PMD_PRESENT_MASK (PAGE_MASK)
@@ -105,14 +100,7 @@
#define _PAGE_BASE (_PAGE_BASE_NC)
#endif
-/* Permission masks used to generate the __P and __S table */
-#define PAGE_NONE __pgprot(_PAGE_BASE)
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#include <asm/pgtable-masks.h>
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_44x_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-85xx.h b/arch/powerpc/include/asm/nohash/32/pte-85xx.h
index 16451df5dd..653a342d3b 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-85xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-85xx.h
@@ -17,9 +17,9 @@
*/
/* Definitions for FSL Book-E Cores */
-#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */
-#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */
-#define _PAGE_RW 0x00004 /* S: Write permission (SW) */
+#define _PAGE_READ 0x00001 /* H: Read permission (SR) */
+#define _PAGE_PRESENT 0x00002 /* S: PTE contains a translation */
+#define _PAGE_WRITE 0x00004 /* S: Write permission (SW) */
#define _PAGE_DIRTY 0x00008 /* S: Page dirty */
#define _PAGE_EXEC 0x00010 /* H: SX permission */
#define _PAGE_ACCESSED 0x00020 /* S: Page referenced */
@@ -31,11 +31,6 @@
#define _PAGE_WRITETHRU 0x00400 /* H: W bit */
#define _PAGE_SPECIAL 0x00800 /* S: Special page */
-#define _PAGE_KERNEL_RO 0
-#define _PAGE_KERNEL_ROX _PAGE_EXEC
-#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
-#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
-
/* No page size encoding in the linux PTE */
#define _PAGE_PSIZE 0
@@ -61,14 +56,7 @@
#define _PAGE_BASE (_PAGE_BASE_NC)
#endif
-/* Permission masks used to generate the __P and __S table */
-#define PAGE_NONE __pgprot(_PAGE_BASE)
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#include <asm/pgtable-masks.h>
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_85xx_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index e6fe1d5731..137dc3c84e 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -48,6 +48,11 @@
#define _PAGE_HUGE 0x0800 /* Copied to L1 PS bit 29 */
+#define _PAGE_NAX (_PAGE_NA | _PAGE_EXEC)
+#define _PAGE_ROX (_PAGE_RO | _PAGE_EXEC)
+#define _PAGE_RW 0
+#define _PAGE_RWX _PAGE_EXEC
+
/* cache related flags non existing on 8xx */
#define _PAGE_COHERENT 0
#define _PAGE_WRITETHRU 0
@@ -77,14 +82,7 @@
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
#define _PAGE_BASE (_PAGE_BASE_NC)
-/* Permission masks used to generate the __P and __S table */
-#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_NA)
-#define PAGE_SHARED __pgprot(_PAGE_BASE)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_RO)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_RO | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_RO)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_RO | _PAGE_EXEC)
+#include <asm/pgtable-masks.h>
#ifndef __ASSEMBLY__
static inline pte_t pte_wrprotect(pte_t pte)
@@ -115,27 +113,6 @@ static inline pte_t pte_mkwrite_novma(pte_t pte)
#define pte_mkwrite_novma pte_mkwrite_novma
-static inline bool pte_user(pte_t pte)
-{
- return !(pte_val(pte) & _PAGE_SH);
-}
-
-#define pte_user pte_user
-
-static inline pte_t pte_mkprivileged(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_SH);
-}
-
-#define pte_mkprivileged pte_mkprivileged
-
-static inline pte_t pte_mkuser(pte_t pte)
-{
- return __pte(pte_val(pte) & ~_PAGE_SH);
-}
-
-#define pte_mkuser pte_mkuser
-
static inline pte_t pte_mkhuge(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_SPS | _PAGE_HUGE);
@@ -187,6 +164,63 @@ static inline unsigned long pte_leaf_size(pte_t pte)
#define pte_leaf_size pte_leaf_size
+/*
+ * On the 8xx, the page tables are a bit special. For 16k pages, we have
+ * 4 identical entries. For 512k pages, we have 128 entries as if it was
+ * 4k pages, but they are flagged as 512k pages for the hardware.
+ * For other page sizes, we have a single entry in the table.
+ */
+static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr);
+static int hugepd_ok(hugepd_t hpd);
+
+static inline int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge)
+{
+ if (!huge)
+ return PAGE_SIZE / SZ_4K;
+ else if (hugepd_ok(*((hugepd_t *)pmd)))
+ return 1;
+ else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE))
+ return SZ_16K / SZ_4K;
+ else
+ return SZ_512K / SZ_4K;
+}
+
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
+{
+ pte_basic_t *entry = (pte_basic_t *)p;
+ pte_basic_t old = pte_val(*p);
+ pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
+ int num, i;
+ pmd_t *pmd = pmd_off(mm, addr);
+
+ num = number_of_cells_per_pte(pmd, new, huge);
+
+ for (i = 0; i < num; i += PAGE_SIZE / SZ_4K, new += PAGE_SIZE) {
+ *entry++ = new;
+ if (IS_ENABLED(CONFIG_PPC_16K_PAGES) && num != 1) {
+ *entry++ = new;
+ *entry++ = new;
+ *entry++ = new;
+ }
+ }
+
+ return old;
+}
+
+#define pte_update pte_update
+
+#ifdef CONFIG_PPC_16K_PAGES
+#define ptep_get ptep_get
+static inline pte_t ptep_get(pte_t *ptep)
+{
+ pte_basic_t val = READ_ONCE(ptep->pte);
+ pte_t pte = {val, val, val, val};
+
+ return pte;
+}
+#endif /* CONFIG_PPC_16K_PAGES */
+
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index eb6891e34c..2202c78730 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -57,6 +57,7 @@
#define IOREMAP_START (ioremap_bot)
#define IOREMAP_END (KERN_IO_START + KERN_IO_SIZE - FIXADDR_SIZE)
#define FIXADDR_SIZE SZ_32M
+#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)
/*
* Defines the address of the vmemap area, in its own region on
@@ -74,37 +75,11 @@
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
-/*
- * _PAGE_CHG_MASK masks of bits that are to be preserved across
- * pgprot changes.
- */
-#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
-
#define H_PAGE_4K_PFN 0
#ifndef __ASSEMBLY__
/* pte_clear moved to later in this file */
-static inline pte_t pte_mkwrite_novma(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_RW);
-}
-
-static inline pte_t pte_mkdirty(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_DIRTY);
-}
-
-static inline pte_t pte_mkyoung(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_ACCESSED);
-}
-
-static inline pte_t pte_wrprotect(pte_t pte)
-{
- return __pte(pte_val(pte) & ~_PAGE_RW);
-}
-
#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
@@ -170,107 +145,20 @@ static inline void p4d_set(p4d_t *p4dp, unsigned long val)
*p4dp = __p4d(val);
}
-/* Atomic PTE updates */
-static inline unsigned long pte_update(struct mm_struct *mm,
- unsigned long addr,
- pte_t *ptep, unsigned long clr,
- unsigned long set,
- int huge)
-{
- unsigned long old = pte_val(*ptep);
- *ptep = __pte((old & ~clr) | set);
-
- /* huge pages use the old page table lock */
- if (!huge)
- assert_pte_locked(mm, addr);
-
- return old;
-}
-
-static inline int pte_young(pte_t pte)
-{
- return pte_val(pte) & _PAGE_ACCESSED;
-}
-
-static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- unsigned long old;
-
- if (!pte_young(*ptep))
- return 0;
- old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
- return (old & _PAGE_ACCESSED) != 0;
-}
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
-({ \
- int __r; \
- __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
- __r; \
-})
-
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
-
- if ((pte_val(*ptep) & _PAGE_RW) == 0)
- return;
-
- pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
-}
-
#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- if ((pte_val(*ptep) & _PAGE_RW) == 0)
- return;
-
- pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
+ pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
}
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define ptep_clear_flush_young(__vma, __address, __ptep) \
({ \
- int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
- __ptep); \
+ int __young = ptep_test_and_clear_young(__vma, __address, __ptep);\
__young; \
})
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
- return __pte(old);
-}
-
-static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t * ptep)
-{
- pte_update(mm, addr, ptep, ~0UL, 0, 0);
-}
-
-
-/* Set the dirty and/or accessed bits atomically in a linux PTE */
-static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
- pte_t *ptep, pte_t entry,
- unsigned long address,
- int psize)
-{
- unsigned long bits = pte_val(entry) &
- (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
-
- unsigned long old = pte_val(*ptep);
- *ptep = __pte(old | bits);
-
- flush_tlb_page(vma, address);
-}
-
-#define pte_ERROR(e) \
- pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
#define pgd_ERROR(e) \
@@ -310,8 +198,6 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
/* We borrow MSB 56 (LSB 7) to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE 0x80
-int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
-void unmap_kernel_page(unsigned long va);
extern int __meminit vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys);
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index c721478c59..427db14292 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -2,12 +2,23 @@
#ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
#define _ASM_POWERPC_NOHASH_PGTABLE_H
+#ifndef __ASSEMBLY__
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge);
+#endif
+
#if defined(CONFIG_PPC64)
#include <asm/nohash/64/pgtable.h>
#else
#include <asm/nohash/32/pgtable.h>
#endif
+/*
+ * _PAGE_CHG_MASK masks of bits that are to be preserved across
+ * pgprot changes.
+ */
+#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
+
/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
@@ -18,16 +29,136 @@
#ifndef __ASSEMBLY__
+extern int icache_44x_need_flush;
+
+/*
+ * PTE updates. This function is called whenever an existing
+ * valid PTE is updated. This does -not- include set_pte_at()
+ * which nowadays only sets a new PTE.
+ *
+ * Depending on the type of MMU, we may need to use atomic updates
+ * and the PTE may be either 32 or 64 bit wide. In the later case,
+ * when using atomic updates, only the low part of the PTE is
+ * accessed atomically.
+ *
+ * In addition, on 44x, we also maintain a global flag indicating
+ * that an executable user mapping was modified, which is needed
+ * to properly flush the virtually tagged instruction cache of
+ * those implementations.
+ */
+#ifndef pte_update
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
+{
+ pte_basic_t old = pte_val(*p);
+ pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
+
+ if (new == old)
+ return old;
+
+ *p = __pte(new);
+
+ if (IS_ENABLED(CONFIG_44x) && !is_kernel_addr(addr) && (old & _PAGE_EXEC))
+ icache_44x_need_flush = 1;
+
+ /* huge pages use the old page table lock */
+ if (!huge)
+ assert_pte_locked(mm, addr);
+
+ return old;
+}
+#endif
+
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long old;
+
+ old = pte_update(vma->vm_mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
+
+ return (old & _PAGE_ACCESSED) != 0;
+}
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+
+#ifndef ptep_set_wrprotect
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
+}
+#endif
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
+}
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_update(mm, addr, ptep, ~0UL, 0, 0);
+}
+
+/* Set the dirty and/or accessed bits atomically in a linux PTE */
+#ifndef __ptep_set_access_flags
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
+ pte_t *ptep, pte_t entry,
+ unsigned long address,
+ int psize)
+{
+ unsigned long set = pte_val(entry) &
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+ int huge = psize > mmu_virtual_psize ? 1 : 0;
+
+ pte_update(vma->vm_mm, address, ptep, 0, set, huge);
+
+ flush_tlb_page(vma, address);
+}
+#endif
+
/* Generic accessors to PTE bits */
+#ifndef pte_mkwrite_novma
+static inline pte_t pte_mkwrite_novma(pte_t pte)
+{
+ /*
+ * write implies read, hence set both
+ */
+ return __pte(pte_val(pte) | _PAGE_RW);
+}
+#endif
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+#ifndef pte_wrprotect
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_WRITE);
+}
+#endif
+
+#ifndef pte_mkexec
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_EXEC);
+}
+#endif
+
#ifndef pte_write
static inline int pte_write(pte_t pte)
{
- return pte_val(pte) & _PAGE_RW;
+ return pte_val(pte) & _PAGE_WRITE;
}
#endif
-#ifndef pte_read
-static inline int pte_read(pte_t pte) { return 1; }
-#endif
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
@@ -35,23 +166,6 @@ static inline bool pte_hashpte(pte_t pte) { return false; }
static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; }
static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
-#ifdef CONFIG_NUMA_BALANCING
-/*
- * These work without NUMA balancing but the kernel does not care. See the
- * comment in include/linux/pgtable.h . On powerpc, this will only
- * work for user pages and always return true for kernel pages.
- */
-static inline int pte_protnone(pte_t pte)
-{
- return pte_present(pte) && !pte_user(pte);
-}
-
-static inline int pmd_protnone(pmd_t pmd)
-{
- return pte_protnone(pmd_pte(pmd));
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
static inline int pte_present(pte_t pte)
{
return pte_val(pte) & _PAGE_PRESENT;
@@ -62,15 +176,20 @@ static inline bool pte_hw_valid(pte_t pte)
return pte_val(pte) & _PAGE_PRESENT;
}
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_ACCESSED;
+}
+
/*
- * Don't just check for any non zero bits in __PAGE_USER, since for book3e
+ * Don't just check for any non zero bits in __PAGE_READ, since for book3e
* and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
- * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too.
+ * _PAGE_READ. Need to explicitly match _PAGE_BAP_UR bit in that case too.
*/
-#ifndef pte_user
-static inline bool pte_user(pte_t pte)
+#ifndef pte_read
+static inline bool pte_read(pte_t pte)
{
- return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
+ return (pte_val(pte) & _PAGE_READ) == _PAGE_READ;
}
#endif
@@ -82,10 +201,10 @@ static inline bool pte_user(pte_t pte)
static inline bool pte_access_permitted(pte_t pte, bool write)
{
/*
- * A read-only access is controlled by _PAGE_USER bit.
- * We have _PAGE_READ set for WRITE and EXECUTE
+ * A read-only access is controlled by _PAGE_READ bit.
+ * We have _PAGE_READ set for WRITE
*/
- if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
+ if (!pte_present(pte) || !pte_read(pte))
return false;
if (write && !pte_write(pte))
@@ -132,20 +251,6 @@ static inline pte_t pte_mkhuge(pte_t pte)
}
#endif
-#ifndef pte_mkprivileged
-static inline pte_t pte_mkprivileged(pte_t pte)
-{
- return __pte(pte_val(pte) & ~_PAGE_USER);
-}
-#endif
-
-#ifndef pte_mkuser
-static inline pte_t pte_mkuser(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_USER);
-}
-#endif
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
@@ -207,11 +312,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
mb();
}
-
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
- pte_t *ptep, pte_t entry, int dirty);
-
/*
* Macro to mark a page protection value as "uncacheable".
*/
@@ -240,11 +340,6 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addre
#define pgprot_writecombine pgprot_noncached_wc
-struct file;
-extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot);
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-
#ifdef CONFIG_HUGETLB_PAGE
static inline int hugepd_ok(hugepd_t hpd)
{
@@ -269,20 +364,8 @@ static inline int pud_huge(pud_t pud)
#define is_hugepd(hpd) (hugepd_ok(hpd))
#endif
-/*
- * This gets called at the end of handling a page fault, when
- * the kernel has put a new PTE into the page table for the process.
- * We use it to ensure coherency between the i-cache and d-cache
- * for the page which has just been mapped in.
- */
-#if defined(CONFIG_PPC_E500) && defined(CONFIG_HUGETLB_PAGE)
-void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep, unsigned int nr);
-#else
-static inline void update_mmu_cache_range(struct vm_fault *vmf,
- struct vm_area_struct *vma, unsigned long address,
- pte_t *ptep, unsigned int nr) {}
-#endif
+int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
+void unmap_kernel_page(unsigned long va);
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/nohash/pte-e500.h b/arch/powerpc/include/asm/nohash/pte-e500.h
index d8924cbd61..f516f0b5b7 100644
--- a/arch/powerpc/include/asm/nohash/pte-e500.h
+++ b/arch/powerpc/include/asm/nohash/pte-e500.h
@@ -48,13 +48,20 @@
/* "Higher level" linux bit combinations */
#define _PAGE_EXEC (_PAGE_BAP_SX | _PAGE_BAP_UX) /* .. and was cache cleaned */
-#define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */
+#define _PAGE_READ (_PAGE_BAP_SR | _PAGE_BAP_UR) /* User read permission */
+#define _PAGE_WRITE (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */
+
#define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY)
#define _PAGE_KERNEL_RO (_PAGE_BAP_SR)
#define _PAGE_KERNEL_RWX (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX)
#define _PAGE_KERNEL_ROX (_PAGE_BAP_SR | _PAGE_BAP_SX)
-#define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */
-#define _PAGE_PRIVILEGED (_PAGE_BAP_SR)
+
+#define _PAGE_NA 0
+#define _PAGE_NAX _PAGE_BAP_UX
+#define _PAGE_RO _PAGE_READ
+#define _PAGE_ROX (_PAGE_READ | _PAGE_BAP_UX)
+#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
+#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_BAP_UX)
#define _PAGE_SPECIAL _PAGE_SW0
@@ -89,36 +96,12 @@
#define _PAGE_BASE (_PAGE_BASE_NC)
#endif
-/* Permission masks used to generate the __P and __S table */
-#define PAGE_NONE __pgprot(_PAGE_BASE)
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_BAP_UX)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX)
+#include <asm/pgtable-masks.h>
#ifndef __ASSEMBLY__
-static inline pte_t pte_mkprivileged(pte_t pte)
-{
- return __pte((pte_val(pte) & ~_PAGE_USER) | _PAGE_PRIVILEGED);
-}
-
-#define pte_mkprivileged pte_mkprivileged
-
-static inline pte_t pte_mkuser(pte_t pte)
-{
- return __pte((pte_val(pte) & ~_PAGE_PRIVILEGED) | _PAGE_USER);
-}
-
-#define pte_mkuser pte_mkuser
-
static inline pte_t pte_mkexec(pte_t pte)
{
- if (pte_val(pte) & _PAGE_BAP_UR)
- return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX);
- else
- return __pte((pte_val(pte) & ~_PAGE_BAP_UX) | _PAGE_BAP_SX);
+ return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX);
}
#define pte_mkexec pte_mkexec
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index a9b31cc258..b66b0c615f 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -227,7 +227,7 @@ int64_t opal_pci_set_power_state(uint64_t async_token, uint64_t id,
uint64_t data);
int64_t opal_pci_poll2(uint64_t id, uint64_t data);
-int64_t opal_int_get_xirr(uint32_t *out_xirr, bool just_poll);
+int64_t opal_int_get_xirr(__be32 *out_xirr, bool just_poll);
int64_t opal_int_set_cppr(uint8_t cppr);
int64_t opal_int_eoi(uint32_t xirr);
int64_t opal_int_set_mfrr(uint32_t cpu, uint8_t mfrr);
diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
index e08513d731..ac4279208d 100644
--- a/arch/powerpc/include/asm/paravirt.h
+++ b/arch/powerpc/include/asm/paravirt.h
@@ -71,6 +71,11 @@ static inline void yield_to_any(void)
{
plpar_hcall_norets_notrace(H_CONFER, -1, 0);
}
+
+static inline bool is_vcpu_idle(int vcpu)
+{
+ return lppaca_of(vcpu).idle;
+}
#else
static inline bool is_shared_processor(void)
{
@@ -100,6 +105,10 @@ static inline void prod_cpu(int cpu)
___bad_prod_cpu(); /* This would be a bug */
}
+static inline bool is_vcpu_idle(int vcpu)
+{
+ return false;
+}
#endif
#define vcpu_is_preempted vcpu_is_preempted
@@ -121,9 +130,23 @@ static inline bool vcpu_is_preempted(int cpu)
if (!is_shared_processor())
return false;
+ /*
+ * If the hypervisor has dispatched the target CPU on a physical
+ * processor, then the target CPU is definitely not preempted.
+ */
+ if (!(yield_count_of(cpu) & 1))
+ return false;
+
+ /*
+ * If the target CPU has yielded to Hypervisor but OS has not
+ * requested idle then the target CPU is definitely preempted.
+ */
+ if (!is_vcpu_idle(cpu))
+ return true;
+
#ifdef CONFIG_PPC_SPLPAR
if (!is_kvm_guest()) {
- int first_cpu;
+ int first_cpu, i;
/*
* The result of vcpu_is_preempted() is used in a
@@ -149,11 +172,29 @@ static inline bool vcpu_is_preempted(int cpu)
*/
if (cpu_first_thread_sibling(cpu) == first_cpu)
return false;
+
+ /*
+ * If any of the threads of the target CPU's core are not
+ * preempted or ceded, then consider target CPU to be
+ * non-preempted.
+ */
+ first_cpu = cpu_first_thread_sibling(cpu);
+ for (i = first_cpu; i < first_cpu + threads_per_core; i++) {
+ if (i == cpu)
+ continue;
+ if (!(yield_count_of(i) & 1))
+ return false;
+ if (!is_vcpu_idle(i))
+ return true;
+ }
}
#endif
- if (yield_count_of(cpu) & 1)
- return true;
+ /*
+ * None of the threads in target CPU's core are running but none of
+ * them were preempted too. Hence assume the target CPU to be
+ * non-preempted.
+ */
return false;
}
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index f5078a7dd8..46a9c4491e 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -105,9 +105,7 @@ extern void of_scan_pci_bridge(struct pci_dev *dev);
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
-struct file;
-extern pgprot_t pci_phys_mem_access_prot(struct file *file,
- unsigned long pfn,
+extern pgprot_t pci_phys_mem_access_prot(unsigned long pfn,
unsigned long size,
pgprot_t prot);
diff --git a/arch/powerpc/include/asm/pgtable-masks.h b/arch/powerpc/include/asm/pgtable-masks.h
new file mode 100644
index 0000000000..6e8e2db26a
--- /dev/null
+++ b/arch/powerpc/include/asm/pgtable-masks.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_PGTABLE_MASKS_H
+#define _ASM_POWERPC_PGTABLE_MASKS_H
+
+#ifndef _PAGE_NA
+#define _PAGE_NA 0
+#define _PAGE_NAX _PAGE_EXEC
+#define _PAGE_RO _PAGE_READ
+#define _PAGE_ROX (_PAGE_READ | _PAGE_EXEC)
+#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
+#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
+#endif
+
+/* Permission flags for kernel mappings */
+#ifndef _PAGE_KERNEL_RO
+#define _PAGE_KERNEL_RO _PAGE_RO
+#define _PAGE_KERNEL_ROX _PAGE_ROX
+#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY)
+#define _PAGE_KERNEL_RWX (_PAGE_RWX | _PAGE_DIRTY)
+#endif
+
+/* Permission masks used to generate the __P and __S table */
+#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_NA)
+#define PAGE_EXECONLY_X __pgprot(_PAGE_BASE | _PAGE_NAX)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RWX)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_RO)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_ROX)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_RO)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_ROX)
+
+#endif /* _ASM_POWERPC_PGTABLE_MASKS_H */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index d0ee46de24..9224f23065 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -71,6 +71,12 @@ static inline pgprot_t pte_pgprot(pte_t pte)
return __pgprot(pte_flags);
}
+static inline pgprot_t pgprot_nx(pgprot_t prot)
+{
+ return pte_pgprot(pte_exprotect(__pte(pgprot_val(prot))));
+}
+#define pgprot_nx pgprot_nx
+
#ifndef pmd_page_vaddr
static inline const void *pmd_page_vaddr(pmd_t pmd)
{
@@ -110,6 +116,41 @@ void mark_initmem_nx(void);
static inline void mark_initmem_nx(void) { }
#endif
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, pte_t entry, int dirty);
+
+pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
+ pgprot_t vma_prot);
+
+struct file;
+static inline pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot)
+{
+ return __phys_mem_access_prot(pfn, size, vma_prot);
+}
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
+
+/*
+ * This gets called at the end of handling a page fault, when
+ * the kernel has put a new PTE into the page table for the process.
+ * We use it to ensure coherency between the i-cache and d-cache
+ * for the page which has just been mapped in.
+ * On machines which use an MMU hash table, we use this to put a
+ * corresponding HPTE into the hash table ahead of time, instead of
+ * waiting for the inevitable extra hash-table miss exception.
+ */
+static inline void update_mmu_cache_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, unsigned int nr)
+{
+ if ((mmu_has_feature(MMU_FTR_HPTE_TABLE) && !radix_enabled()) ||
+ (IS_ENABLED(CONFIG_PPC_E500) && IS_ENABLED(CONFIG_HUGETLB_PAGE)))
+ __update_mmu_cache(vma, address, ptep);
+}
+
/*
* When used, PTE_FRAG_NR is defined in subarch pgtable.h
* so we are sure it is included when arriving here.
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index fe3d0ea005..b3ee44a40c 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -6,6 +6,7 @@
#include <linux/string.h>
#include <linux/irqflags.h>
+#include <linux/delay.h>
#include <asm/hvcall.h>
#include <asm/paca.h>
@@ -343,6 +344,212 @@ static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
return rc;
}
+static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ unsigned long token;
+ long rc;
+
+ token = -1UL;
+ do {
+ rc = plpar_hcall(H_GUEST_CREATE, retbuf, flags, token);
+ if (rc == H_SUCCESS)
+ *guest_id = retbuf[0];
+
+ if (rc == H_BUSY) {
+ token = retbuf[0];
+ cond_resched();
+ }
+
+ if (H_IS_LONG_BUSY(rc)) {
+ token = retbuf[0];
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+
+ } while (rc == H_BUSY);
+
+ return rc;
+}
+
+static inline long plpar_guest_create_vcpu(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id)
+{
+ long rc;
+
+ do {
+ rc = plpar_hcall_norets(H_GUEST_CREATE_VCPU, 0, guest_id, vcpu_id);
+
+ if (rc == H_BUSY)
+ cond_resched();
+
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+
+ } while (rc == H_BUSY);
+
+ return rc;
+}
+
+static inline long plpar_guest_set_state(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id,
+ unsigned long data_buffer,
+ unsigned long data_size,
+ unsigned long *failed_index)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ while (true) {
+ rc = plpar_hcall(H_GUEST_SET_STATE, retbuf, flags, guest_id,
+ vcpu_id, data_buffer, data_size);
+
+ if (rc == H_BUSY) {
+ cpu_relax();
+ continue;
+ }
+
+ if (H_IS_LONG_BUSY(rc)) {
+ mdelay(get_longbusy_msecs(rc));
+ continue;
+ }
+
+ if (rc == H_INVALID_ELEMENT_ID)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_SIZE)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_VALUE)
+ *failed_index = retbuf[0];
+
+ break;
+ }
+
+ return rc;
+}
+
+static inline long plpar_guest_get_state(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id,
+ unsigned long data_buffer,
+ unsigned long data_size,
+ unsigned long *failed_index)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ while (true) {
+ rc = plpar_hcall(H_GUEST_GET_STATE, retbuf, flags, guest_id,
+ vcpu_id, data_buffer, data_size);
+
+ if (rc == H_BUSY) {
+ cpu_relax();
+ continue;
+ }
+
+ if (H_IS_LONG_BUSY(rc)) {
+ mdelay(get_longbusy_msecs(rc));
+ continue;
+ }
+
+ if (rc == H_INVALID_ELEMENT_ID)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_SIZE)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_VALUE)
+ *failed_index = retbuf[0];
+
+ break;
+ }
+
+ return rc;
+}
+
+static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id,
+ unsigned long vcpu_id, int *trap,
+ unsigned long *failed_index)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ rc = plpar_hcall(H_GUEST_RUN_VCPU, retbuf, flags, guest_id, vcpu_id);
+ if (rc == H_SUCCESS)
+ *trap = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_ID)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_SIZE)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_VALUE)
+ *failed_index = retbuf[0];
+
+ return rc;
+}
+
+static inline long plpar_guest_delete(unsigned long flags, u64 guest_id)
+{
+ long rc;
+
+ do {
+ rc = plpar_hcall_norets(H_GUEST_DELETE, flags, guest_id);
+ if (rc == H_BUSY)
+ cond_resched();
+
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+
+ } while (rc == H_BUSY);
+
+ return rc;
+}
+
+static inline long plpar_guest_set_capabilities(unsigned long flags,
+ unsigned long capabilities)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ do {
+ rc = plpar_hcall(H_GUEST_SET_CAPABILITIES, retbuf, flags, capabilities);
+ if (rc == H_BUSY)
+ cond_resched();
+
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+ } while (rc == H_BUSY);
+
+ return rc;
+}
+
+static inline long plpar_guest_get_capabilities(unsigned long flags,
+ unsigned long *capabilities)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ do {
+ rc = plpar_hcall(H_GUEST_GET_CAPABILITIES, retbuf, flags);
+ if (rc == H_BUSY)
+ cond_resched();
+
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+ } while (rc == H_BUSY);
+
+ if (rc == H_SUCCESS)
+ *capabilities = retbuf[0];
+
+ return rc;
+}
+
/*
* Wrapper to H_RPT_INVALIDATE hcall that handles return values appropriately
*
@@ -355,7 +562,7 @@ static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
* error recovery of killing the process/guest will be eventually
* needed.
*/
-static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type,
+static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type,
u64 page_sizes, u64 start, u64 end)
{
long rc;
@@ -401,12 +608,68 @@ static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
return 0;
}
-static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type,
+static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type,
u64 page_sizes, u64 start, u64 end)
{
return 0;
}
+static inline long plpar_guest_create_vcpu(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id)
+{
+ return 0;
+}
+
+static inline long plpar_guest_get_state(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id,
+ unsigned long data_buffer,
+ unsigned long data_size,
+ unsigned long *failed_index)
+{
+ return 0;
+}
+
+static inline long plpar_guest_set_state(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id,
+ unsigned long data_buffer,
+ unsigned long data_size,
+ unsigned long *failed_index)
+{
+ return 0;
+}
+
+static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id,
+ unsigned long vcpu_id, int *trap,
+ unsigned long *failed_index)
+{
+ return 0;
+}
+
+static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id)
+{
+ return 0;
+}
+
+static inline long plpar_guest_delete(unsigned long flags, u64 guest_id)
+{
+ return 0;
+}
+
+static inline long plpar_guest_get_capabilities(unsigned long flags,
+ unsigned long *capabilities)
+{
+ return 0;
+}
+
+static inline long plpar_guest_set_capabilities(unsigned long flags,
+ unsigned long capabilities)
+{
+ return 0;
+}
+
#endif /* CONFIG_PPC_PSERIES */
#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index d9fcff5750..2689e7139b 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -30,6 +30,16 @@ void *pci_traverse_device_nodes(struct device_node *start,
void *data);
extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
+#if defined(CONFIG_IOMMU_API) && (defined(CONFIG_PPC_PSERIES) || \
+ defined(CONFIG_PPC_POWERNV))
+extern void ppc_iommu_register_device(struct pci_controller *phb);
+extern void ppc_iommu_unregister_device(struct pci_controller *phb);
+#else
+static inline void ppc_iommu_register_device(struct pci_controller *phb) { }
+static inline void ppc_iommu_unregister_device(struct pci_controller *phb) { }
+#endif
+
+
/* From rtas_pci.h */
extern void init_pci_config_tokens (void);
extern unsigned long get_phb_buid (struct device_node *);
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 9db8b16567..ea8f91fbc6 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -397,6 +397,23 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
return 0;
}
+/**
+ * regs_get_kernel_argument() - get Nth function argument in kernel
+ * @regs: pt_regs of that context
+ * @n: function argument number (start from 0)
+ *
+ * We support up to 8 arguments and assume they are sent in through the GPRs.
+ * This will fail for fp/vector arguments, but those aren't usually found in
+ * kernel code. This is expected to be called from kprobes or ftrace with regs.
+ */
+static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, unsigned int n)
+{
+#define NR_REG_ARGUMENTS 8
+ if (n < NR_REG_ARGUMENTS)
+ return regs_get_register(regs, offsetof(struct pt_regs, gpr[3 + n]));
+ return 0;
+}
+
#endif /* __ASSEMBLY__ */
#ifndef __powerpc64__
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 4ae4ab9090..ade5f094db 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -617,6 +617,8 @@
#endif
#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
#define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */
+#define SPRN_HID2_G2_LE 0x3F3 /* G2_LE HID2 Register */
+#define HID2_G2_LE_HBE (1<<18) /* High BAT Enable (G2_LE) */
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
#define SPRN_IABR2 0x3FA /* 83xx */
#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index bf5dde1a41..15c5691dd2 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -14,7 +14,7 @@
#ifdef __KERNEL__
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN) && CONFIG_THREAD_SHIFT < 15
#define MIN_THREAD_SHIFT (CONFIG_THREAD_SHIFT + 1)
#else
#define MIN_THREAD_SHIFT CONFIG_THREAD_SHIFT
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index fb725ec779..de10437fd2 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -74,7 +74,7 @@ __pu_failed: \
/* -mprefixed can generate offsets beyond range, fall back hack */
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __put_user_asm_goto(x, addr, label, op) \
- asm_volatile_goto( \
+ asm goto( \
"1: " op " %0,0(%1) # put_user\n" \
EX_TABLE(1b, %l2) \
: \
@@ -83,7 +83,7 @@ __pu_failed: \
: label)
#else
#define __put_user_asm_goto(x, addr, label, op) \
- asm_volatile_goto( \
+ asm goto( \
"1: " op "%U1%X1 %0,%1 # put_user\n" \
EX_TABLE(1b, %l2) \
: \
@@ -97,7 +97,7 @@ __pu_failed: \
__put_user_asm_goto(x, ptr, label, "std")
#else /* __powerpc64__ */
#define __put_user_asm2_goto(x, addr, label) \
- asm_volatile_goto( \
+ asm goto( \
"1: stw%X1 %0, %1\n" \
"2: stw%X1 %L0, %L1\n" \
EX_TABLE(1b, %l2) \
@@ -146,7 +146,7 @@ do { \
/* -mprefixed can generate offsets beyond range, fall back hack */
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __get_user_asm_goto(x, addr, label, op) \
- asm_volatile_goto( \
+ asm_goto_output( \
"1: "op" %0,0(%1) # get_user\n" \
EX_TABLE(1b, %l2) \
: "=r" (x) \
@@ -155,7 +155,7 @@ do { \
: label)
#else
#define __get_user_asm_goto(x, addr, label, op) \
- asm_volatile_goto( \
+ asm_goto_output( \
"1: "op"%U1%X1 %0, %1 # get_user\n" \
EX_TABLE(1b, %l2) \
: "=r" (x) \
@@ -169,7 +169,7 @@ do { \
__get_user_asm_goto(x, addr, label, "ld")
#else /* __powerpc64__ */
#define __get_user_asm2_goto(x, addr, label) \
- asm_volatile_goto( \
+ asm_goto_output( \
"1: lwz%X1 %0, %1\n" \
"2: lwz%X1 %L0, %L1\n" \
EX_TABLE(1b, %l2) \
@@ -374,7 +374,7 @@ copy_mc_to_user(void __user *to, const void *from, unsigned long n)
if (check_copy_size(from, n, true)) {
if (access_ok(to, n)) {
allow_write_to_user(to, n);
- n = copy_mc_generic((void *)to, from, n);
+ n = copy_mc_generic((void __force *)to, from, n);
prevent_write_to_user(to, n);
}
}
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 19e46fd623..7f63f1cdc6 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -8,6 +8,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/export.h>
+#include <linux/font.h>
#include <linux/memblock.h>
#include <linux/pgtable.h>
#include <linux/of.h>
@@ -41,10 +42,6 @@ static unsigned char *logicalDisplayBase __force_data;
unsigned long disp_BAT[2] __initdata = {0, 0};
-#define cmapsz (16*256)
-
-static unsigned char vga_font[cmapsz];
-
static int boot_text_mapped __force_data;
extern void rmci_on(void);
@@ -407,7 +404,7 @@ static unsigned int expand_bits_16[4] = {
};
-static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
+static void draw_byte_32(const unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
@@ -428,7 +425,7 @@ static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
}
}
-static inline void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
+static inline void draw_byte_16(const unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
@@ -446,7 +443,7 @@ static inline void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
}
}
-static inline void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
+static inline void draw_byte_8(const unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0x0F0F0F0FUL;
@@ -465,7 +462,8 @@ static inline void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
static noinline void draw_byte(unsigned char c, long locX, long locY)
{
unsigned char *base = calc_base(locX << 3, locY << 4);
- unsigned char *font = &vga_font[((unsigned int)c) * 16];
+ unsigned int font_index = c * 16;
+ const unsigned char *font = font_sun_8x16.data + font_index;
int rb = dispDeviceRowBytes;
rmci_maybe_on();
@@ -583,349 +581,3 @@ void __init udbg_init_btext(void)
*/
udbg_putc = btext_drawchar;
}
-
-static unsigned char vga_font[cmapsz] = {
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
-0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
-0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
-0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
-0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
-0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
-0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
-0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
-0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
-0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
-0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
-0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
-0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
-0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
-0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
-0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
-0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
-0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
-0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
-0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
-0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
-0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
-0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
-0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
-0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
-0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
-0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
-0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
-0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
-0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
-0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
-0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
-0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
-0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
-0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
-0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
-0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
-0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
-0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
-0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
-0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
-0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
-0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
-0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
-0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
-0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
-0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
-0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
-0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
-0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
-0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
-0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
-0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
-0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
-0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
-0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
-0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
-0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
-0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
-0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
-0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
-0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
-0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
-0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
-0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
-0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
-0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
-0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
-0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
-0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
-0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
-0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
-0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
-0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
-0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
-0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
-0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
-0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
-0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
-0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
-0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
-0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
-0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
-0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
-0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
-0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
-0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
-0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
-0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
-0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
-0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
-0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
-0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
-0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
-0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
-0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
-0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
-0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
-0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
-0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
-0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
-0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
-0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
-0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
-0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
-0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
-0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
-0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
-0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
-0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
-0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
-0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
-0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
-0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
-0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
-0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
-0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
-0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
-0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
-0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
-0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
-0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
-0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00,
-};
-
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index f29ce3dd61..bfd3f442e5 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -26,6 +26,15 @@ BEGIN_FTR_SECTION
bl __init_fpu_registers
END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
bl setup_common_caches
+
+ /*
+ * This assumes that all cores using __setup_cpu_603 with
+ * MMU_FTR_USE_HIGH_BATS are G2_LE compatible
+ */
+BEGIN_MMU_FTR_SECTION
+ bl setup_g2_le_hid2
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+
mtlr r5
blr
_GLOBAL(__setup_cpu_604)
@@ -115,6 +124,16 @@ SYM_FUNC_START_LOCAL(setup_604_hid0)
blr
SYM_FUNC_END(setup_604_hid0)
+/* Enable high BATs for G2_LE and derivatives like e300cX */
+SYM_FUNC_START_LOCAL(setup_g2_le_hid2)
+ mfspr r11,SPRN_HID2_G2_LE
+ oris r11,r11,HID2_G2_LE_HBE@h
+ mtspr SPRN_HID2_G2_LE,r11
+ sync
+ isync
+ blr
+SYM_FUNC_END(setup_g2_le_hid2)
+
/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
* erratas we work around here.
* Moto MPC710CE.pdf describes them, those are errata
@@ -495,4 +514,3 @@ _GLOBAL(__restore_cpu_setup)
mtcr r7
blr
_ASM_NOKPROBE_SYMBOL(__restore_cpu_setup)
-
diff --git a/arch/powerpc/kernel/cpu_specs_e500mc.h b/arch/powerpc/kernel/cpu_specs_e500mc.h
index ceb06b109f..2ae8e9a7b4 100644
--- a/arch/powerpc/kernel/cpu_specs_e500mc.h
+++ b/arch/powerpc/kernel/cpu_specs_e500mc.h
@@ -8,7 +8,8 @@
#ifdef CONFIG_PPC64
#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
- PPC_FEATURE_HAS_FPU | PPC_FEATURE_64)
+ PPC_FEATURE_HAS_FPU | PPC_FEATURE_64 | \
+ PPC_FEATURE_BOOKE)
#else
#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
PPC_FEATURE_BOOKE)
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 9a3b85bfc8..2086fa6cdc 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -19,6 +19,7 @@
#include <linux/uio.h>
#include <asm/rtas.h>
#include <asm/inst.h>
+#include <asm/fadump.h>
#ifdef DEBUG
#include <asm/udbg.h>
@@ -92,6 +93,17 @@ ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
return csize;
}
+/*
+ * Return true only when kexec based kernel dump capturing method is used.
+ * This ensures all restritions applied for kdump case are not automatically
+ * applied for fadump case.
+ */
+bool is_kdump_kernel(void)
+{
+ return !is_fadump_active() && elfcorehdr_addr != ELFCORE_ADDR_MAX;
+}
+EXPORT_SYMBOL_GPL(is_kdump_kernel);
+
#ifdef CONFIG_PPC_RTAS
/*
* The crashkernel region will almost always overlap the RTAS region, so
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 438568a472..48773d2d9b 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -39,7 +39,7 @@ static int eeh_result_priority(enum pci_ers_result result)
case PCI_ERS_RESULT_NEED_RESET:
return 6;
default:
- WARN_ONCE(1, "Unknown pci_ers_result value: %d\n", (int)result);
+ WARN_ONCE(1, "Unknown pci_ers_result value: %d\n", result);
return 0;
}
};
@@ -60,7 +60,7 @@ static const char *pci_ers_result_name(enum pci_ers_result result)
case PCI_ERS_RESULT_NO_AER_DRIVER:
return "no AER driver";
default:
- WARN_ONCE(1, "Unknown result type: %d\n", (int)result);
+ WARN_ONCE(1, "Unknown result type: %d\n", result);
return "unknown";
}
};
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index c33c8ebf86..eaf2f167c3 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -893,7 +893,7 @@ __start_interrupts:
*
* Call convention:
*
- * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
+ * syscall register convention is in Documentation/arch/powerpc/syscall64-abi.rst
*/
EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
/* SCV 0 */
@@ -1952,8 +1952,8 @@ EXC_VIRT_NONE(0x4b00, 0x100)
* Call convention:
*
* syscall and hypercalls register conventions are documented in
- * Documentation/powerpc/syscall64-abi.rst and
- * Documentation/powerpc/papr_hcalls.rst respectively.
+ * Documentation/arch/powerpc/syscall64-abi.rst and
+ * Documentation/arch/powerpc/papr_hcalls.rst respectively.
*
* The intersection of volatile registers that don't contain possible
* inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 3ff2da7b12..d14eda1e85 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -313,7 +313,7 @@ static __init u64 fadump_calculate_reserve_size(void)
* memory at a predefined offset.
*/
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
- &size, &base);
+ &size, &base, NULL, NULL);
if (ret == 0 && size > 0) {
unsigned long max_size;
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index b32e7b2ebd..9fc90410b3 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -312,13 +312,13 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */
lwz r11, 0(r11) /* Get Linux PTE */
- li r9, _PAGE_PRESENT | _PAGE_ACCESSED
+ li r9, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ
andc. r9, r9, r11 /* Check permission */
bne 5f
- rlwinm r9, r11, 1, _PAGE_RW /* dirty => rw */
- and r9, r9, r11 /* hwwrite = dirty & rw */
- rlwimi r11, r9, 0, _PAGE_RW /* replace rw by hwwrite */
+ rlwinm r9, r11, 1, _PAGE_WRITE /* dirty => w */
+ and r9, r9, r11 /* hwwrite = dirty & w */
+ rlwimi r11, r9, 0, _PAGE_WRITE /* replace w by hwwrite */
/* Create TLB tag. This is the faulting address plus a static
* set of bits. These are size, valid, E, U0.
@@ -400,9 +400,9 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
andc. r9, r9, r11 /* Check permission */
bne 5f
- rlwinm r9, r11, 1, _PAGE_RW /* dirty => rw */
- and r9, r9, r11 /* hwwrite = dirty & rw */
- rlwimi r11, r9, 0, _PAGE_RW /* replace rw by hwwrite */
+ rlwinm r9, r11, 1, _PAGE_WRITE /* dirty => w */
+ and r9, r9, r11 /* hwwrite = dirty & w */
+ rlwimi r11, r9, 0, _PAGE_WRITE /* replace w by hwwrite */
/* Create TLB tag. This is the faulting address plus a static
* set of bits. These are size, valid, E, U0.
@@ -561,10 +561,11 @@ finish_tlb_load:
/*
* Clear out the software-only bits in the PTE to generate the
* TLB_DATA value. These are the bottom 2 bits of the RPM, the
- * top 3 bits of the zone field, and M.
+ * 4 bits of the zone field, and M.
*/
- li r9, 0x0ce2
+ li r9, 0x0cf2
andc r11, r11, r9
+ rlwimi r11, r10, 8, 24, 27 /* Copy 4 upper address bit into zone */
/* load the next available TLB index. */
lwz r9, tlb_4xx_index@l(0)
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index a3197c9f72..25642e802e 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -314,8 +314,8 @@ interrupt_base:
* kernel page tables.
*/
lis r11, PAGE_OFFSET@h
- cmplw r10, r11
- blt+ 3f
+ cmplw cr7, r10, r11
+ blt+ cr7, 3f
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
@@ -342,7 +342,7 @@ interrupt_base:
mtspr SPRN_MMUCR,r12
/* Mask of required permission bits. Note that while we
- * do copy ESR:ST to _PAGE_RW position as trying to write
+ * do copy ESR:ST to _PAGE_WRITE position as trying to write
* to an RO page is pretty common, we don't do it with
* _PAGE_DIRTY. We could do it, but it's a fairly rare
* event so I'd rather take the overhead when it happens
@@ -355,7 +355,7 @@ interrupt_base:
* place or can we save a couple of instructions here ?
*/
mfspr r12,SPRN_ESR
- li r13,_PAGE_PRESENT|_PAGE_ACCESSED
+ li r13,_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_READ
rlwimi r13,r12,10,30,30
/* Load the PTE */
@@ -428,8 +428,8 @@ interrupt_base:
* kernel page tables.
*/
lis r11, PAGE_OFFSET@h
- cmplw r10, r11
- blt+ 3f
+ cmplw cr7, r10, r11
+ blt+ cr7, 3f
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
@@ -515,6 +515,7 @@ interrupt_base:
* r11 - PTE high word value
* r12 - PTE low word value
* r13 - TLB index
+ * cr7 - Result of comparison with PAGE_OFFSET
* MMUCR - loaded with proper value when we get here
* Upon exit, we reload everything and RFI.
*/
@@ -533,11 +534,10 @@ finish_tlb_load_44x:
tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */
/* And WS 2 */
- li r10,0xf85 /* Mask to apply from PTE */
- rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
+ li r10,0xf84 /* Mask to apply from PTE */
+ rlwimi r10,r12,29,30,31 /* DIRTY,READ -> SW,SR position */
and r11,r12,r10 /* Mask PTE bits to keep */
- andi. r10,r12,_PAGE_USER /* User page ? */
- beq 1f /* nope, leave U bits empty */
+ bge cr7,1f /* User page ? no, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
rlwinm r11,r11,0,~PPC44x_TLB_SX /* Clear SX if User page */
1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
@@ -568,8 +568,8 @@ finish_tlb_load_44x:
* kernel page tables.
*/
lis r11,PAGE_OFFSET@h
- cmplw cr0,r10,r11
- blt+ 3f
+ cmplw cr7,r10,r11
+ blt+ cr7,3f
lis r11,swapper_pg_dir@h
ori r11,r11, swapper_pg_dir@l
li r12,0 /* MMUCR = 0 */
@@ -586,7 +586,7 @@ finish_tlb_load_44x:
4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
/* Mask of required permission bits. Note that while we
- * do copy ESR:ST to _PAGE_RW position as trying to write
+ * do copy ESR:ST to _PAGE_WRITE position as trying to write
* to an RO page is pretty common, we don't do it with
* _PAGE_DIRTY. We could do it, but it's a fairly rare
* event so I'd rather take the overhead when it happens
@@ -599,7 +599,7 @@ finish_tlb_load_44x:
* place or can we save a couple of instructions here ?
*/
mfspr r12,SPRN_ESR
- li r13,_PAGE_PRESENT|_PAGE_ACCESSED
+ li r13,_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_READ
rlwimi r13,r12,10,30,30
/* Load the PTE */
@@ -669,8 +669,8 @@ finish_tlb_load_44x:
* kernel page tables.
*/
lis r11,PAGE_OFFSET@h
- cmplw cr0,r10,r11
- blt+ 3f
+ cmplw cr7,r10,r11
+ blt+ cr7,3f
lis r11,swapper_pg_dir@h
ori r11,r11, swapper_pg_dir@l
li r12,0 /* MMUCR = 0 */
@@ -744,6 +744,7 @@ finish_tlb_load_44x:
* r11 - PTE high word value
* r12 - PTE low word value
* r13 - free to use
+ * cr7 - Result of comparison with PAGE_OFFSET
* MMUCR - loaded with proper value when we get here
* Upon exit, we reload everything and RFI.
*/
@@ -753,11 +754,10 @@ finish_tlb_load_47x:
tlbwe r11,r13,1
/* And make up word 2 */
- li r10,0xf85 /* Mask to apply from PTE */
- rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
+ li r10,0xf84 /* Mask to apply from PTE */
+ rlwimi r10,r12,29,30,31 /* DIRTY,READ -> SW,SR position */
and r11,r12,r10 /* Mask PTE bits to keep */
- andi. r10,r12,_PAGE_USER /* User page ? */
- beq 1f /* nope, leave U bits empty */
+ bge cr7,1f /* User page ? no, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
rlwinm r11,r11,0,~PPC47x_TLB2_SX /* Clear SX if User page */
1: tlbwe r11,r13,2
diff --git a/arch/powerpc/kernel/head_85xx.S b/arch/powerpc/kernel/head_85xx.S
index 0f1641a312..39724ff5ae 100644
--- a/arch/powerpc/kernel/head_85xx.S
+++ b/arch/powerpc/kernel/head_85xx.S
@@ -471,7 +471,7 @@ END_BTB_FLUSH_SECTION
4:
/* Mask of required permission bits. Note that while we
- * do copy ESR:ST to _PAGE_RW position as trying to write
+ * do copy ESR:ST to _PAGE_WRITE position as trying to write
* to an RO page is pretty common, we don't do it with
* _PAGE_DIRTY. We could do it, but it's a fairly rare
* event so I'd rather take the overhead when it happens
@@ -485,10 +485,10 @@ END_BTB_FLUSH_SECTION
*/
mfspr r12,SPRN_ESR
#ifdef CONFIG_PTE_64BIT
- li r13,_PAGE_PRESENT
+ li r13,_PAGE_PRESENT|_PAGE_BAP_SR
oris r13,r13,_PAGE_ACCESSED@h
#else
- li r13,_PAGE_PRESENT|_PAGE_ACCESSED
+ li r13,_PAGE_PRESENT|_PAGE_READ|_PAGE_ACCESSED
#endif
rlwimi r13,r12,11,29,29
@@ -783,15 +783,15 @@ BEGIN_MMU_FTR_SECTION
mtspr SPRN_MAS7, r10
END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
#else
- li r10, (_PAGE_EXEC | _PAGE_PRESENT)
+ li r10, (_PAGE_EXEC | _PAGE_READ)
mr r13, r11
rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */
and r12, r11, r10
- andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
+ mcrf cr0, cr5 /* Test for user page */
slwi r10, r12, 1
or r10, r10, r12
rlwinm r10, r10, 0, ~_PAGE_EXEC /* Clear SX on user pages */
- iseleq r12, r12, r10
+ isellt r12, r10, r12
rlwimi r13, r12, 0, 20, 31 /* Get RPN from PTE, merge w/ perms */
mtspr SPRN_MAS3, r13
#endif
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index 6764b98ca3..c1d89764dd 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -412,10 +412,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
. = INTERRUPT_INST_TLB_MISS_603
InstructionTLBMiss:
/*
- * r0: scratch
+ * r0: userspace flag (later scratch)
* r1: linux style pte ( later becomes ppc hardware pte )
* r2: ptr to linux-style pte
- * r3: scratch
+ * r3: fault address
*/
/* Get PTE (linux-style) and check access */
mfspr r3,SPRN_IMISS
@@ -424,12 +424,13 @@ InstructionTLBMiss:
cmplw 0,r1,r3
#endif
mfspr r2, SPRN_SDR1
- li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
+ li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
rlwinm r2, r2, 28, 0xfffff000
#ifdef CONFIG_MODULES
+ li r0, 3
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
- li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+ li r0, 0
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
#endif
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
@@ -437,13 +438,15 @@ InstructionTLBMiss:
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
beq- InstructionAddressInvalid /* return if no mapping */
rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
- lwz r0,0(r2) /* get linux-style pte */
- andc. r1,r1,r0 /* check access & ~permission */
+ lwz r2,0(r2) /* get linux-style pte */
+ andc. r1,r1,r2 /* check access & ~permission */
bne- InstructionAddressInvalid /* return if access not permitted */
/* Convert linux-style PTE to low word of PPC-style PTE */
- rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
+#ifdef CONFIG_MODULES
+ rlwimi r2, r0, 0, 31, 31 /* userspace ? -> PP lsb */
+#endif
ori r1, r1, 0xe06 /* clear out reserved bits */
- andc r1, r0, r1 /* PP = user? 1 : 0 */
+ andc r1, r2, r1 /* PP = user? 1 : 0 */
BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
@@ -478,38 +481,38 @@ InstructionAddressInvalid:
. = INTERRUPT_DATA_LOAD_TLB_MISS_603
DataLoadTLBMiss:
/*
- * r0: scratch
+ * r0: userspace flag (later scratch)
* r1: linux style pte ( later becomes ppc hardware pte )
* r2: ptr to linux-style pte
- * r3: scratch
+ * r3: fault address
*/
/* Get PTE (linux-style) and check access */
mfspr r3,SPRN_DMISS
lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
mfspr r2, SPRN_SDR1
- li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
+ li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ
rlwinm r2, r2, 28, 0xfffff000
+ li r0, 3
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
- li r1, _PAGE_PRESENT | _PAGE_ACCESSED
+ li r0, 0
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
lwz r2,0(r2) /* get pmd entry */
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
beq- DataAddressInvalid /* return if no mapping */
rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
- lwz r0,0(r2) /* get linux-style pte */
- andc. r1,r1,r0 /* check access & ~permission */
+ lwz r2,0(r2) /* get linux-style pte */
+ andc. r1,r1,r2 /* check access & ~permission */
bne- DataAddressInvalid /* return if access not permitted */
/* Convert linux-style PTE to low word of PPC-style PTE */
- rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
- rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
- rlwimi r1,r0,32-3,24,24 /* _PAGE_RW -> _PAGE_DIRTY */
- rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
+ rlwinm r1,r2,32-9,30,30 /* _PAGE_WRITE -> PP msb */
+ rlwimi r2,r0,0,30,31 /* userspace ? -> PP */
+ rlwimi r1,r2,32-3,24,24 /* _PAGE_WRITE -> _PAGE_DIRTY */
xori r1,r1,_PAGE_DIRTY /* clear dirty when not rw */
ori r1,r1,0xe04 /* clear out reserved bits */
- andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
+ andc r1,r2,r1 /* PP = user? rw? 1: 3: 0 */
BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
@@ -558,34 +561,35 @@ DataAddressInvalid:
. = INTERRUPT_DATA_STORE_TLB_MISS_603
DataStoreTLBMiss:
/*
- * r0: scratch
+ * r0: userspace flag (later scratch)
* r1: linux style pte ( later becomes ppc hardware pte )
* r2: ptr to linux-style pte
- * r3: scratch
+ * r3: fault address
*/
/* Get PTE (linux-style) and check access */
mfspr r3,SPRN_DMISS
lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
mfspr r2, SPRN_SDR1
- li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
+ li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
rlwinm r2, r2, 28, 0xfffff000
+ li r0, 3
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
- li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
+ li r0, 0
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
lwz r2,0(r2) /* get pmd entry */
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
beq- DataAddressInvalid /* return if no mapping */
rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
- lwz r0,0(r2) /* get linux-style pte */
- andc. r1,r1,r0 /* check access & ~permission */
+ lwz r2,0(r2) /* get linux-style pte */
+ andc. r1,r1,r2 /* check access & ~permission */
bne- DataAddressInvalid /* return if access not permitted */
/* Convert linux-style PTE to low word of PPC-style PTE */
- rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
+ rlwimi r2,r0,0,31,31 /* userspace ? -> PP lsb */
li r1,0xe06 /* clear out reserved bits & PP msb */
- andc r1,r0,r1 /* PP = user? 1: 0 */
+ andc r1,r2,r1 /* PP = user? 1: 0 */
BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
@@ -689,7 +693,8 @@ hash_page_dsi:
mfdar r4
mfsrr0 r5
mfsrr1 r9
- rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
+ rlwinm r3, r3, 32 - 15, _PAGE_WRITE /* DSISR_STORE -> _PAGE_WRITE */
+ ori r3, r3, _PAGE_PRESENT | _PAGE_READ
bl hash_page
mfspr r10, SPRN_SPRG_THREAD
restore_regs_thread r10
@@ -699,7 +704,7 @@ hash_page_isi:
mr r11, r10
mfspr r10, SPRN_SPRG_THREAD
save_regs_thread r10
- li r3, 0
+ li r3, _PAGE_PRESENT | _PAGE_EXEC
lwz r4, SRR0(r10)
lwz r9, SRR1(r10)
bl hash_page
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index b1c0418b25..30b56c67fa 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -105,7 +105,6 @@ static struct ctl_table powersave_nap_ctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {}
};
static int __init
diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
index bd863702d8..1ad059a9e2 100644
--- a/arch/powerpc/kernel/interrupt_64.S
+++ b/arch/powerpc/kernel/interrupt_64.S
@@ -52,7 +52,8 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
mr r10,r1
ld r1,PACAKSAVE(r13)
std r10,0(r1)
- std r11,_NIP(r1)
+ std r11,_LINK(r1)
+ std r11,_NIP(r1) /* Saved LR is also the next instruction */
std r12,_MSR(r1)
std r0,GPR0(r1)
std r10,GPR1(r1)
@@ -70,7 +71,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
std r9,GPR13(r1)
SAVE_NVGPRS(r1)
std r11,_XER(r1)
- std r11,_LINK(r1)
std r11,_CTR(r1)
li r11,\trapnr
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c
index 2f29b7d432..6af5359059 100644
--- a/arch/powerpc/kernel/io.c
+++ b/arch/powerpc/kernel/io.c
@@ -33,7 +33,7 @@ void _insb(const volatile u8 __iomem *port, void *buf, long count)
return;
asm volatile("sync");
do {
- tmp = *port;
+ tmp = *(const volatile u8 __force *)port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
@@ -49,7 +49,7 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count)
return;
asm volatile("sync");
do {
- *port = *tbuf++;
+ *(volatile u8 __force *)port = *tbuf++;
} while (--count != 0);
asm volatile("sync");
}
@@ -64,7 +64,7 @@ void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
return;
asm volatile("sync");
do {
- tmp = *port;
+ tmp = *(const volatile u16 __force *)port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
@@ -80,7 +80,7 @@ void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
return;
asm volatile("sync");
do {
- *port = *tbuf++;
+ *(volatile u16 __force *)port = *tbuf++;
} while (--count != 0);
asm volatile("sync");
}
@@ -95,7 +95,7 @@ void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
return;
asm volatile("sync");
do {
- tmp = *port;
+ tmp = *(const volatile u32 __force *)port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
@@ -111,7 +111,7 @@ void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
return;
asm volatile("sync");
do {
- *port = *tbuf++;
+ *(volatile u32 __force *)port = *tbuf++;
} while (--count != 0);
asm volatile("sync");
}
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 14251bc521..2c0173e709 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1074,10 +1074,10 @@ int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
}
EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
-extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
- struct iommu_table *tbl,
- unsigned long entry, unsigned long *hpa,
- enum dma_data_direction *direction)
+long iommu_tce_xchg_no_kill(struct mm_struct *mm,
+ struct iommu_table *tbl,
+ unsigned long entry, unsigned long *hpa,
+ enum dma_data_direction *direction)
{
long ret;
unsigned long size = 0;
@@ -1280,13 +1280,21 @@ struct iommu_table_group_ops spapr_tce_table_group_ops = {
/*
* A simple iommu_ops to allow less cruft in generic VFIO code.
*/
-static int spapr_tce_blocking_iommu_attach_dev(struct iommu_domain *dom,
- struct device *dev)
+static int
+spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
+ struct device *dev)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_group *grp = iommu_group_get(dev);
struct iommu_table_group *table_group;
int ret = -EINVAL;
+ /* At first attach the ownership is already set */
+ if (!domain) {
+ iommu_group_put(grp);
+ return 0;
+ }
+
if (!grp)
return -ENODEV;
@@ -1297,17 +1305,22 @@ static int spapr_tce_blocking_iommu_attach_dev(struct iommu_domain *dom,
return ret;
}
-static void spapr_tce_blocking_iommu_set_platform_dma(struct device *dev)
-{
- struct iommu_group *grp = iommu_group_get(dev);
- struct iommu_table_group *table_group;
+static const struct iommu_domain_ops spapr_tce_platform_domain_ops = {
+ .attach_dev = spapr_tce_platform_iommu_attach_dev,
+};
- table_group = iommu_group_get_iommudata(grp);
- table_group->ops->release_ownership(table_group);
-}
+static struct iommu_domain spapr_tce_platform_domain = {
+ .type = IOMMU_DOMAIN_PLATFORM,
+ .ops = &spapr_tce_platform_domain_ops,
+};
-static const struct iommu_domain_ops spapr_tce_blocking_domain_ops = {
- .attach_dev = spapr_tce_blocking_iommu_attach_dev,
+static struct iommu_domain spapr_tce_blocked_domain = {
+ .type = IOMMU_DOMAIN_BLOCKED,
+ /*
+ * FIXME: SPAPR mixes blocked and platform behaviors, the blocked domain
+ * also sets the dma_api ops
+ */
+ .ops = &spapr_tce_platform_domain_ops,
};
static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
@@ -1322,29 +1335,13 @@ static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
return false;
}
-static struct iommu_domain *spapr_tce_iommu_domain_alloc(unsigned int type)
-{
- struct iommu_domain *dom;
-
- if (type != IOMMU_DOMAIN_BLOCKED)
- return NULL;
-
- dom = kzalloc(sizeof(*dom), GFP_KERNEL);
- if (!dom)
- return NULL;
-
- dom->ops = &spapr_tce_blocking_domain_ops;
-
- return dom;
-}
-
static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev)
{
struct pci_dev *pdev;
struct pci_controller *hose;
if (!dev_is_pci(dev))
- return ERR_PTR(-EPERM);
+ return ERR_PTR(-ENODEV);
pdev = to_pci_dev(dev);
hose = pdev->bus->sysdata;
@@ -1371,12 +1368,12 @@ static struct iommu_group *spapr_tce_iommu_device_group(struct device *dev)
}
static const struct iommu_ops spapr_tce_iommu_ops = {
+ .default_domain = &spapr_tce_platform_domain,
+ .blocked_domain = &spapr_tce_blocked_domain,
.capable = spapr_tce_iommu_capable,
- .domain_alloc = spapr_tce_iommu_domain_alloc,
.probe_device = spapr_tce_iommu_probe_device,
.release_device = spapr_tce_iommu_release_device,
.device_group = spapr_tce_iommu_device_group,
- .set_platform_dma_ops = spapr_tce_blocking_iommu_set_platform_dma,
};
static struct attribute *spapr_tce_iommu_attrs[] = {
@@ -1393,6 +1390,21 @@ static const struct attribute_group *spapr_tce_iommu_groups[] = {
NULL,
};
+void ppc_iommu_register_device(struct pci_controller *phb)
+{
+ iommu_device_sysfs_add(&phb->iommu, phb->parent,
+ spapr_tce_iommu_groups, "iommu-phb%04x",
+ phb->global_number);
+ iommu_device_register(&phb->iommu, &spapr_tce_iommu_ops,
+ phb->parent);
+}
+
+void ppc_iommu_unregister_device(struct pci_controller *phb)
+{
+ iommu_device_unregister(&phb->iommu);
+ iommu_device_sysfs_remove(&phb->iommu);
+}
+
/*
* This registers IOMMU devices of PHBs. This needs to happen
* after core_initcall(iommu_init) + postcore_initcall(pci_driver_init) and
@@ -1403,11 +1415,7 @@ static int __init spapr_tce_setup_phb_iommus_initcall(void)
struct pci_controller *hose;
list_for_each_entry(hose, &hose_list, list_node) {
- iommu_device_sysfs_add(&hose->iommu, hose->parent,
- spapr_tce_iommu_groups, "iommu-phb%04x",
- hose->global_number);
- iommu_device_register(&hose->iommu, &spapr_tce_iommu_ops,
- hose->parent);
+ ppc_iommu_register_device(hose);
}
return 0;
}
diff --git a/arch/powerpc/kernel/irq_64.c b/arch/powerpc/kernel/irq_64.c
index 938e66829e..d5c48d1b0a 100644
--- a/arch/powerpc/kernel/irq_64.c
+++ b/arch/powerpc/kernel/irq_64.c
@@ -230,7 +230,7 @@ again:
* This allows interrupts to be unmasked without hard disabling, and
* also without new hard interrupts coming in ahead of pending ones.
*/
- asm_volatile_goto(
+ asm goto(
"1: \n"
" lbz 9,%0(13) \n"
" cmpwi 9,0 \n"
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index cda4e00b67..7502066c3c 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -68,7 +68,7 @@ static void *__init alloc_shared_lppaca(unsigned long size, unsigned long limit,
memblock_set_bottom_up(true);
/*
- * See Documentation/powerpc/ultravisor.rst for more details.
+ * See Documentation/arch/powerpc/ultravisor.rst for more details.
*
* UV/HV data sharing is in PAGE_SIZE granularity. In order to
* minimize the number of pages shared, align the allocation to
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 040255ddb5..d95a48eff4 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -521,8 +521,7 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
* PCI device, it tries to find the PCI device first and calls the
* above routine
*/
-pgprot_t pci_phys_mem_access_prot(struct file *file,
- unsigned long pfn,
+pgprot_t pci_phys_mem_access_prot(unsigned long pfn,
unsigned long size,
pgprot_t prot)
{
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index d464ba4120..e67effdba8 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -947,7 +947,7 @@ struct option_vector7 {
} __packed;
struct ibm_arch_vec {
- struct { u32 mask, val; } pvrs[14];
+ struct { __be32 mask, val; } pvrs[14];
u8 num_vectors;
diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c
index 5d7a72b41a..727ed4a145 100644
--- a/arch/powerpc/kernel/ptrace/ptrace.c
+++ b/arch/powerpc/kernel/ptrace/ptrace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
@@ -9,10 +10,6 @@
*
* Modified by Cort Dougan (cort@hq.fsmlabs.com)
* and Paul Mackerras (paulus@samba.org).
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file README.legal in the main directory of
- * this archive for more details.
*/
#include <linux/regset.h>
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index 9454b8395b..f38df72e64 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -752,6 +752,8 @@ static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v)
/**
* ppc_rtas_rmo_buf_show() - Describe RTAS-addressable region for user space.
+ * @m: seq_file output target.
+ * @v: Unused.
*
* Base + size description of a range of RTAS-addressable memory set
* aside for user space to use as work area(s) for certain RTAS
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 20f72cd1d8..9b142b9d51 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -22,7 +22,6 @@
#include <linux/seq_file.h>
#include <linux/ioport.h>
#include <linux/console.h>
-#include <linux/screen_info.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
#include <linux/unistd.h>
@@ -98,21 +97,6 @@ int boot_cpu_hwid = -1;
int dcache_bsize;
int icache_bsize;
-/*
- * This still seems to be needed... -- paulus
- */
-struct screen_info screen_info = {
- .orig_x = 0,
- .orig_y = 25,
- .orig_video_cols = 80,
- .orig_video_lines = 25,
- .orig_video_isVGA = 1,
- .orig_video_points = 16
-};
-#if defined(CONFIG_FB_VGA16_MODULE)
-EXPORT_SYMBOL(screen_info);
-#endif
-
/* Variables required to store legacy IO irq routing */
int of_i8042_kbd_irq;
EXPORT_SYMBOL_GPL(of_i8042_kbd_irq);
@@ -601,7 +585,6 @@ struct seq_buf ppc_hw_desc __initdata = {
.buffer = ppc_hw_desc_buf,
.size = sizeof(ppc_hw_desc_buf),
.len = 0,
- .readpos = 0,
};
static __init void probe_machine(void)
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 246201d0d8..2f19d5e944 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -364,7 +364,7 @@ void __init early_setup(unsigned long dt_ptr)
*/
initialise_paca(&boot_paca, 0);
fixup_boot_paca(&boot_paca);
- WARN_ON(local_paca != 0);
+ WARN_ON(local_paca);
setup_paca(&boot_paca); /* install the paca into registers */
/* -------- printk is now safe to use ------- */
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 68a91e553e..aa17e62f37 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Common signal handling code for both 32 and 64 bits
*
* Copyright (c) 2007 Benjamin Herrenschmidt, IBM Corporation
* Extracted from signal_32.c and signal_64.c
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file README.legal in the main directory of
- * this archive for more details.
*/
#include <linux/resume_user_mode.h>
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index a429c57ed4..58ecea1cdc 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -1,10 +1,7 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ *
* Copyright (c) 2007 Benjamin Herrenschmidt, IBM Corporation
* Extracted from signal_32.c and signal_64.c
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file README.legal in the main directory of
- * this archive for more details.
*/
#ifndef _POWERPC_ARCH_SIGNAL_H
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 5826f5108a..ab691c89d7 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -1051,7 +1051,7 @@ static struct sched_domain_topology_level powerpc_topology[] = {
#endif
{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
{ cpu_mc_mask, SD_INIT_NAME(MC) },
- { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+ { cpu_cpu_mask, SD_INIT_NAME(PKG) },
{ NULL, },
};
@@ -1595,7 +1595,7 @@ static void add_cpu_to_masks(int cpu)
/* Skip all CPUs already part of current CPU core mask */
cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
- /* If chip_id is -1; limit the cpu_core_mask to within DIE*/
+ /* If chip_id is -1; limit the cpu_core_mask to within PKG */
if (chip_id == -1)
cpumask_and(mask, mask, cpu_cpu_mask(cpu));
@@ -1629,7 +1629,7 @@ void start_secondary(void *unused)
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
- rcu_cpu_starting(cpu);
+ rcutree_report_cpu_starting(cpu);
cpu_callin_map[cpu] = 1;
if (smp_ops->setup_cpu)
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 20e50586e8..7fab411378 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -294,7 +294,7 @@
233 32 fadvise64 sys_ppc32_fadvise64 compat_sys_ppc32_fadvise64
233 64 fadvise64 sys_fadvise64
234 nospu exit_group sys_exit_group
-235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+235 nospu lookup_dcookie sys_ni_syscall
236 common epoll_create sys_epoll_create
237 common epoll_ctl sys_epoll_ctl
238 common epoll_wait sys_epoll_wait
@@ -539,3 +539,7 @@
450 nospu set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_ni_syscall
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index fe3f720c9c..11e062b47d 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -157,7 +157,7 @@ static int die_owner = -1;
static unsigned int die_nest_count;
static int die_counter;
-extern void panic_flush_kmsg_start(void)
+void panic_flush_kmsg_start(void)
{
/*
* These are mostly taken from kernel/panic.c, but tries to do
@@ -170,7 +170,7 @@ extern void panic_flush_kmsg_start(void)
bust_spinlocks(1);
}
-extern void panic_flush_kmsg_end(void)
+void panic_flush_kmsg_end(void)
{
kmsg_dump(KMSG_DUMP_PANIC);
bust_spinlocks(0);
@@ -1439,10 +1439,12 @@ static int emulate_instruction(struct pt_regs *regs)
return -EINVAL;
}
+#ifdef CONFIG_GENERIC_BUG
int is_valid_bugaddr(unsigned long addr)
{
return is_kernel_addr(addr);
}
+#endif
#ifdef CONFIG_MATH_EMULATION
static int emulate_math(struct pt_regs *regs)
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index 005269ac32..85846cadb9 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -112,7 +112,7 @@ void __init reserve_crashkernel(void)
total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size();
/* use common parsing */
ret = parse_crashkernel(boot_command_line, total_mem_sz,
- &crash_size, &crash_base);
+ &crash_size, &crash_base, NULL, NULL);
if (ret == 0 && crash_size > 0) {
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
index a79e28c91e..0bee7ca9a7 100644
--- a/arch/powerpc/kexec/core_64.c
+++ b/arch/powerpc/kexec/core_64.c
@@ -379,8 +379,8 @@ void default_machine_kexec(struct kimage *image)
#ifdef CONFIG_PPC_64S_HASH_MMU
/* Values we need to export to the second kernel via the device tree. */
-static unsigned long htab_base;
-static unsigned long htab_size;
+static __be64 htab_base;
+static __be64 htab_size;
static struct property htab_base_prop = {
.name = "linux,htab-base",
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
index a3de5369d2..961a6dd673 100644
--- a/arch/powerpc/kexec/file_load_64.c
+++ b/arch/powerpc/kexec/file_load_64.c
@@ -32,7 +32,7 @@
#include <asm/plpks.h>
struct umem_info {
- u64 *buf; /* data buffer for usable-memory property */
+ __be64 *buf; /* data buffer for usable-memory property */
u32 size; /* size allocated for the data buffer */
u32 max_entries; /* maximum no. of entries */
u32 idx; /* index of current entry */
@@ -443,10 +443,10 @@ static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
*
* Returns buffer on success, NULL on error.
*/
-static u64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt)
+static __be64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt)
{
u32 new_size;
- u64 *tbuf;
+ __be64 *tbuf;
if ((um_info->idx + cnt) <= um_info->max_entries)
return um_info->buf;
@@ -1138,11 +1138,15 @@ static int update_pci_dma_nodes(void *fdt, const char *dmapropname)
continue;
ret = copy_property(fdt, pci_offset, dn, "ibm,dma-window");
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(dn);
break;
+ }
ret = copy_property(fdt, pci_offset, dn, dmapropname);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(dn);
break;
+ }
}
return ret;
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 5319d889b1..4bd9d12308 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -87,8 +87,12 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_hv_ras.o \
book3s_hv_builtin.o \
book3s_hv_p9_perf.o \
+ book3s_hv_nestedv2.o \
+ guest-state-buffer.o \
$(kvm-book3s_64-builtin-tm-objs-y) \
$(kvm-book3s_64-builtin-xics-objs-y)
+
+obj-$(CONFIG_GUEST_STATE_BUFFER_TEST) += test-guest-state-buffer.o
endif
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 686d8d9eda..6cd20ab9e9 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -565,7 +565,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->msr = kvmppc_get_msr(vcpu);
regs->srr0 = kvmppc_get_srr0(vcpu);
regs->srr1 = kvmppc_get_srr1(vcpu);
- regs->pid = vcpu->arch.pid;
+ regs->pid = kvmppc_get_pid(vcpu);
regs->sprg0 = kvmppc_get_sprg0(vcpu);
regs->sprg1 = kvmppc_get_sprg1(vcpu);
regs->sprg2 = kvmppc_get_sprg2(vcpu);
@@ -636,17 +636,17 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = id - KVM_REG_PPC_FPR0;
- *val = get_reg_val(id, VCPU_FPR(vcpu, i));
+ *val = get_reg_val(id, kvmppc_get_fpr(vcpu, i));
break;
case KVM_REG_PPC_FPSCR:
- *val = get_reg_val(id, vcpu->arch.fp.fpscr);
+ *val = get_reg_val(id, kvmppc_get_fpscr(vcpu));
break;
#ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
i = id - KVM_REG_PPC_VSR0;
- val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
- val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
+ val->vsxval[0] = kvmppc_get_vsx_fpr(vcpu, i, 0);
+ val->vsxval[1] = kvmppc_get_vsx_fpr(vcpu, i, 1);
} else {
r = -ENXIO;
}
@@ -683,19 +683,19 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.fscr);
break;
case KVM_REG_PPC_TAR:
- *val = get_reg_val(id, vcpu->arch.tar);
+ *val = get_reg_val(id, kvmppc_get_tar(vcpu));
break;
case KVM_REG_PPC_EBBHR:
- *val = get_reg_val(id, vcpu->arch.ebbhr);
+ *val = get_reg_val(id, kvmppc_get_ebbhr(vcpu));
break;
case KVM_REG_PPC_EBBRR:
- *val = get_reg_val(id, vcpu->arch.ebbrr);
+ *val = get_reg_val(id, kvmppc_get_ebbrr(vcpu));
break;
case KVM_REG_PPC_BESCR:
- *val = get_reg_val(id, vcpu->arch.bescr);
+ *val = get_reg_val(id, kvmppc_get_bescr(vcpu));
break;
case KVM_REG_PPC_IC:
- *val = get_reg_val(id, vcpu->arch.ic);
+ *val = get_reg_val(id, kvmppc_get_ic(vcpu));
break;
default:
r = -EINVAL;
@@ -724,7 +724,7 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = id - KVM_REG_PPC_FPR0;
- VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
+ kvmppc_set_fpr(vcpu, i, set_reg_val(id, *val));
break;
case KVM_REG_PPC_FPSCR:
vcpu->arch.fp.fpscr = set_reg_val(id, *val);
@@ -733,8 +733,8 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
i = id - KVM_REG_PPC_VSR0;
- vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
- vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
+ kvmppc_set_vsx_fpr(vcpu, i, 0, val->vsxval[0]);
+ kvmppc_set_vsx_fpr(vcpu, i, 1, val->vsxval[1]);
} else {
r = -ENXIO;
}
@@ -765,22 +765,22 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
break;
#endif /* CONFIG_KVM_XIVE */
case KVM_REG_PPC_FSCR:
- vcpu->arch.fscr = set_reg_val(id, *val);
+ kvmppc_set_fpscr(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_TAR:
- vcpu->arch.tar = set_reg_val(id, *val);
+ kvmppc_set_tar(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_EBBHR:
- vcpu->arch.ebbhr = set_reg_val(id, *val);
+ kvmppc_set_ebbhr(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_EBBRR:
- vcpu->arch.ebbrr = set_reg_val(id, *val);
+ kvmppc_set_ebbrr(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_BESCR:
- vcpu->arch.bescr = set_reg_val(id, *val);
+ kvmppc_set_bescr(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_IC:
- vcpu->arch.ic = set_reg_val(id, *val);
+ kvmppc_set_ic(vcpu, set_reg_val(id, *val));
break;
default:
r = -EINVAL;
diff --git a/arch/powerpc/kvm/book3s_64_entry.S b/arch/powerpc/kvm/book3s_64_entry.S
index 3b361af873..a9ab92abff 100644
--- a/arch/powerpc/kvm/book3s_64_entry.S
+++ b/arch/powerpc/kvm/book3s_64_entry.S
@@ -19,7 +19,7 @@
/*
* This is a hcall, so register convention is as
- * Documentation/powerpc/papr_hcalls.rst.
+ * Documentation/arch/powerpc/papr_hcalls.rst.
*
* This may also be a syscall from PR-KVM userspace that is to be
* reflected to the PR guest kernel, so registers may be set up for
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index fdfc2a62dd..2b1f0cdd8c 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -121,7 +121,7 @@ void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info)
kvm->arch.hpt = *info;
kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18);
- pr_debug("KVM guest htab at %lx (order %ld), LPID %x\n",
+ pr_debug("KVM guest htab at %lx (order %ld), LPID %llx\n",
info->virt, (long)info->order, kvm->arch.lpid);
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 10aacbf924..175a8eb268 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -97,7 +97,7 @@ static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
void *to, void *from, unsigned long n)
{
int lpid = vcpu->kvm->arch.lpid;
- int pid = vcpu->arch.pid;
+ int pid = kvmppc_get_pid(vcpu);
/* This would cause a data segment intr so don't allow the access */
if (eaddr & (0x3FFUL << 52))
@@ -271,7 +271,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Work out effective PID */
switch (eaddr >> 62) {
case 0:
- pid = vcpu->arch.pid;
+ pid = kvmppc_get_pid(vcpu);
break;
case 3:
pid = 0;
@@ -308,7 +308,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
}
void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
- unsigned int pshift, unsigned int lpid)
+ unsigned int pshift, u64 lpid)
{
unsigned long psize = PAGE_SIZE;
int psi;
@@ -345,7 +345,7 @@ void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
}
-static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
+static void kvmppc_radix_flush_pwc(struct kvm *kvm, u64 lpid)
{
long rc;
@@ -418,7 +418,7 @@ static void kvmppc_pmd_free(pmd_t *pmdp)
void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
unsigned int shift,
const struct kvm_memory_slot *memslot,
- unsigned int lpid)
+ u64 lpid)
{
unsigned long old;
@@ -469,7 +469,7 @@ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
* (or 4kB) mappings (of sub-pages of the same 2MB page).
*/
static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
- unsigned int lpid)
+ u64 lpid)
{
if (full) {
memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE);
@@ -490,7 +490,7 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
}
static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
- unsigned int lpid)
+ u64 lpid)
{
unsigned long im;
pmd_t *p = pmd;
@@ -519,7 +519,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
}
static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
- unsigned int lpid)
+ u64 lpid)
{
unsigned long iu;
pud_t *p = pud;
@@ -540,7 +540,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
pud_free(kvm->mm, pud);
}
-void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
+void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, u64 lpid)
{
unsigned long ig;
@@ -567,7 +567,7 @@ void kvmppc_free_radix(struct kvm *kvm)
}
static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
- unsigned long gpa, unsigned int lpid)
+ unsigned long gpa, u64 lpid)
{
pte_t *pte = pte_offset_kernel(pmd, 0);
@@ -583,7 +583,7 @@ static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
}
static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
- unsigned long gpa, unsigned int lpid)
+ unsigned long gpa, u64 lpid)
{
pmd_t *pmd = pmd_offset(pud, 0);
@@ -609,7 +609,7 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
unsigned long gpa, unsigned int level,
- unsigned long mmu_seq, unsigned int lpid,
+ unsigned long mmu_seq, u64 lpid,
unsigned long *rmapp, struct rmap_nested **n_rmap)
{
pgd_t *pgd;
@@ -786,7 +786,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
}
bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing,
- unsigned long gpa, unsigned int lpid)
+ unsigned long gpa, u64 lpid)
{
unsigned long pgflags;
unsigned int shift;
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 93b695b289..14c6d7e318 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -77,8 +77,8 @@ static void kvm_spapr_tce_liobn_put(struct kref *kref)
call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
}
-extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
- struct iommu_group *grp)
+void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
+ struct iommu_group *grp)
{
int i;
struct kvmppc_spapr_tce_table *stt;
@@ -105,8 +105,8 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
rcu_read_unlock();
}
-extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
- struct iommu_group *grp)
+long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ struct iommu_group *grp)
{
struct kvmppc_spapr_tce_table *stt = NULL;
bool found = false;
@@ -786,12 +786,12 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
idx = (ioba >> stt->page_shift) - stt->offset;
page = stt->pages[idx / TCES_PER_PAGE];
if (!page) {
- vcpu->arch.regs.gpr[4] = 0;
+ kvmppc_set_gpr(vcpu, 4, 0);
return H_SUCCESS;
}
tbl = (u64 *)page_address(page);
- vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
+ kvmppc_set_gpr(vcpu, 4, tbl[idx % TCES_PER_PAGE]);
return H_SUCCESS;
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 0429488ba1..b5c6af0bef 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -391,9 +391,27 @@ static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
/* Dummy value used in computing PCR value below */
#define PCR_ARCH_31 (PCR_ARCH_300 << 1)
+static inline unsigned long map_pcr_to_cap(unsigned long pcr)
+{
+ unsigned long cap = 0;
+
+ switch (pcr) {
+ case PCR_ARCH_300:
+ cap = H_GUEST_CAP_POWER9;
+ break;
+ case PCR_ARCH_31:
+ cap = H_GUEST_CAP_POWER10;
+ break;
+ default:
+ break;
+ }
+
+ return cap;
+}
+
static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
{
- unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
+ unsigned long host_pcr_bit = 0, guest_pcr_bit = 0, cap = 0;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
/* We can (emulate) our own architecture version and anything older */
@@ -437,8 +455,20 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
if (guest_pcr_bit > host_pcr_bit)
return -EINVAL;
+ if (kvmhv_on_pseries() && kvmhv_is_nestedv2()) {
+ /*
+ * 'arch_compat == 0' would mean the guest should default to
+ * L1's compatibility. In this case, the guest would pick
+ * host's PCR and evaluate the corresponding capabilities.
+ */
+ cap = map_pcr_to_cap(guest_pcr_bit);
+ if (!(cap & nested_capabilities))
+ return -EINVAL;
+ }
+
spin_lock(&vc->lock);
vc->arch_compat = arch_compat;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LOGICAL_PVR);
/*
* Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit
* Also set all reserved PCR bits
@@ -794,7 +824,7 @@ static void kvmppc_update_vpa_dispatch(struct kvm_vcpu *vcpu,
vpa->enqueue_dispatch_tb = cpu_to_be64(be64_to_cpu(vpa->enqueue_dispatch_tb) + stolen);
- __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + vc->tb_offset, stolen);
+ __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + kvmppc_get_tb_offset(vcpu), stolen);
vcpu->arch.vpa.dirty = true;
}
@@ -845,9 +875,9 @@ static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
+ if (kvmppc_get_arch_compat(vcpu) >= PVR_ARCH_207)
return true;
- if ((!vcpu->arch.vcore->arch_compat) &&
+ if ((!kvmppc_get_arch_compat(vcpu)) &&
cpu_has_feature(CPU_FTR_ARCH_207S))
return true;
return false;
@@ -1267,10 +1297,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_HOST;
break;
#endif
- case H_RANDOM:
- if (!arch_get_random_seed_longs(&vcpu->arch.regs.gpr[4], 1))
+ case H_RANDOM: {
+ unsigned long rand;
+
+ if (!arch_get_random_seed_longs(&rand, 1))
ret = H_HARDWARE;
+ kvmppc_set_gpr(vcpu, 4, rand);
break;
+ }
case H_RPT_INVALIDATE:
ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
@@ -2183,6 +2217,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
}
vc->lpcr = new_lpcr;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LPCR);
spin_unlock(&vc->lock);
}
@@ -2279,7 +2314,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.vcore->dpdes);
break;
case KVM_REG_PPC_VTB:
- *val = get_reg_val(id, vcpu->arch.vcore->vtb);
+ *val = get_reg_val(id, kvmppc_get_vtb(vcpu));
break;
case KVM_REG_PPC_DAWR:
*val = get_reg_val(id, kvmppc_get_dawr0_hv(vcpu));
@@ -2306,7 +2341,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.tcscr);
break;
case KVM_REG_PPC_PID:
- *val = get_reg_val(id, vcpu->arch.pid);
+ *val = get_reg_val(id, kvmppc_get_pid(vcpu));
break;
case KVM_REG_PPC_ACOP:
*val = get_reg_val(id, vcpu->arch.acop);
@@ -2338,11 +2373,11 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
spin_unlock(&vcpu->arch.vpa_update_lock);
break;
case KVM_REG_PPC_TB_OFFSET:
- *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
+ *val = get_reg_val(id, kvmppc_get_tb_offset(vcpu));
break;
case KVM_REG_PPC_LPCR:
case KVM_REG_PPC_LPCR_64:
- *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
+ *val = get_reg_val(id, kvmppc_get_lpcr(vcpu));
break;
case KVM_REG_PPC_PPR:
*val = get_reg_val(id, kvmppc_get_ppr_hv(vcpu));
@@ -2414,10 +2449,10 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
break;
#endif
case KVM_REG_PPC_ARCH_COMPAT:
- *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
+ *val = get_reg_val(id, kvmppc_get_arch_compat(vcpu));
break;
case KVM_REG_PPC_DEC_EXPIRY:
- *val = get_reg_val(id, vcpu->arch.dec_expires);
+ *val = get_reg_val(id, kvmppc_get_dec_expires(vcpu));
break;
case KVM_REG_PPC_ONLINE:
*val = get_reg_val(id, vcpu->arch.online);
@@ -2522,7 +2557,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
break;
case KVM_REG_PPC_VTB:
- vcpu->arch.vcore->vtb = set_reg_val(id, *val);
+ kvmppc_set_vtb(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DAWR:
kvmppc_set_dawr0_hv(vcpu, set_reg_val(id, *val));
@@ -2552,7 +2587,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.tcscr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_PID:
- vcpu->arch.pid = set_reg_val(id, *val);
+ kvmppc_set_pid(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_ACOP:
vcpu->arch.acop = set_reg_val(id, *val);
@@ -2605,10 +2640,11 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
* decrementer, which is better than a large one that
* causes a hang.
*/
- if (!vcpu->arch.dec_expires && tb_offset)
- vcpu->arch.dec_expires = get_tb() + tb_offset;
+ kvmppc_set_tb_offset(vcpu, tb_offset);
+ if (!kvmppc_get_dec_expires(vcpu) && tb_offset)
+ kvmppc_set_dec_expires(vcpu, get_tb() + tb_offset);
- vcpu->arch.vcore->tb_offset = tb_offset;
+ kvmppc_set_tb_offset(vcpu, tb_offset);
break;
}
case KVM_REG_PPC_LPCR:
@@ -2689,7 +2725,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DEC_EXPIRY:
- vcpu->arch.dec_expires = set_reg_val(id, *val);
+ kvmppc_set_dec_expires(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_ONLINE:
i = set_reg_val(id, *val);
@@ -2922,8 +2958,14 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
vcpu->arch.shared_big_endian = false;
#endif
#endif
- kvmppc_set_mmcr_hv(vcpu, 0, MMCR0_FC);
+ if (kvmhv_is_nestedv2()) {
+ err = kvmhv_nestedv2_vcpu_create(vcpu, &vcpu->arch.nestedv2_io);
+ if (err < 0)
+ return err;
+ }
+
+ kvmppc_set_mmcr_hv(vcpu, 0, MMCR0_FC);
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
kvmppc_set_mmcr_hv(vcpu, 0, kvmppc_get_mmcr_hv(vcpu, 0) | MMCR0_PMCCEXT);
kvmppc_set_mmcra_hv(vcpu, MMCRA_BHRB_DISABLE);
@@ -3079,6 +3121,8 @@ static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
spin_unlock(&vcpu->arch.vpa_update_lock);
+ if (kvmhv_is_nestedv2())
+ kvmhv_nestedv2_vcpu_free(vcpu, &vcpu->arch.nestedv2_io);
}
static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
@@ -4043,10 +4087,58 @@ static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu)
}
}
+static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
+ unsigned long lpcr, u64 *tb)
+{
+ struct kvmhv_nestedv2_io *io;
+ unsigned long msr, i;
+ int trap;
+ long rc;
+
+ io = &vcpu->arch.nestedv2_io;
+
+ msr = mfmsr();
+ kvmppc_msr_hard_disable_set_facilities(vcpu, msr);
+ if (lazy_irq_pending())
+ return 0;
+
+ rc = kvmhv_nestedv2_flush_vcpu(vcpu, time_limit);
+ if (rc < 0)
+ return -EINVAL;
+
+ accumulate_time(vcpu, &vcpu->arch.in_guest);
+ rc = plpar_guest_run_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id,
+ &trap, &i);
+
+ if (rc != H_SUCCESS) {
+ pr_err("KVM Guest Run VCPU hcall failed\n");
+ if (rc == H_INVALID_ELEMENT_ID)
+ pr_err("KVM: Guest Run VCPU invalid element id at %ld\n", i);
+ else if (rc == H_INVALID_ELEMENT_SIZE)
+ pr_err("KVM: Guest Run VCPU invalid element size at %ld\n", i);
+ else if (rc == H_INVALID_ELEMENT_VALUE)
+ pr_err("KVM: Guest Run VCPU invalid element value at %ld\n", i);
+ return -EINVAL;
+ }
+ accumulate_time(vcpu, &vcpu->arch.guest_exit);
+
+ *tb = mftb();
+ kvmppc_gsm_reset(io->vcpu_message);
+ kvmppc_gsm_reset(io->vcore_message);
+ kvmppc_gsbm_zero(&io->valids);
+
+ rc = kvmhv_nestedv2_parse_output(vcpu);
+ if (rc < 0)
+ return -EINVAL;
+
+ timer_rearm_host_dec(*tb);
+
+ return trap;
+}
+
/* call our hypervisor to load up HV regs and go */
static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
{
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long host_psscr;
unsigned long msr;
struct hv_guest_state hvregs;
@@ -4126,7 +4218,7 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
dec = (s32) dec;
*tb = mftb();
- vcpu->arch.dec_expires = dec + (*tb + vc->tb_offset);
+ vcpu->arch.dec_expires = dec + (*tb + kvmppc_get_tb_offset(vcpu));
timer_rearm_host_dec(*tb);
@@ -4161,7 +4253,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu_vpa_increment_dispatch(vcpu);
if (kvmhv_on_pseries()) {
- trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb);
+ if (kvmhv_is_nestedv1())
+ trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb);
+ else
+ trap = kvmhv_vcpu_entry_nestedv2(vcpu, time_limit, lpcr, tb);
/* H_CEDE has to be handled now, not later */
if (trap == BOOK3S_INTERRUPT_SYSCALL && !nested &&
@@ -4691,7 +4786,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
tb = mftb();
- kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + vc->tb_offset);
+ kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + kvmppc_get_tb_offset(vcpu));
trace_kvm_guest_enter(vcpu);
@@ -5147,6 +5242,14 @@ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
if (++cores_done >= kvm->arch.online_vcores)
break;
}
+
+ if (kvmhv_is_nestedv2()) {
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LPCR);
+ }
+ }
}
void kvmppc_setup_partition_table(struct kvm *kvm)
@@ -5413,15 +5516,43 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
/* Allocate the guest's logical partition ID */
- lpid = kvmppc_alloc_lpid();
- if ((long)lpid < 0)
- return -ENOMEM;
- kvm->arch.lpid = lpid;
+ if (!kvmhv_is_nestedv2()) {
+ lpid = kvmppc_alloc_lpid();
+ if ((long)lpid < 0)
+ return -ENOMEM;
+ kvm->arch.lpid = lpid;
+ }
kvmppc_alloc_host_rm_ops();
kvmhv_vm_nested_init(kvm);
+ if (kvmhv_is_nestedv2()) {
+ long rc;
+ unsigned long guest_id;
+
+ rc = plpar_guest_create(0, &guest_id);
+
+ if (rc != H_SUCCESS)
+ pr_err("KVM: Create Guest hcall failed, rc=%ld\n", rc);
+
+ switch (rc) {
+ case H_PARAMETER:
+ case H_FUNCTION:
+ case H_STATE:
+ return -EINVAL;
+ case H_NOT_ENOUGH_RESOURCES:
+ case H_ABORTED:
+ return -ENOMEM;
+ case H_AUTHORITY:
+ return -EPERM;
+ case H_NOT_AVAILABLE:
+ return -EBUSY;
+ }
+ kvm->arch.lpid = guest_id;
+ }
+
+
/*
* Since we don't flush the TLB when tearing down a VM,
* and this lpid might have previously been used,
@@ -5491,7 +5622,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
lpcr |= LPCR_HAIL;
ret = kvmppc_init_vm_radix(kvm);
if (ret) {
- kvmppc_free_lpid(kvm->arch.lpid);
+ if (kvmhv_is_nestedv2())
+ plpar_guest_delete(0, kvm->arch.lpid);
+ else
+ kvmppc_free_lpid(kvm->arch.lpid);
return ret;
}
kvmppc_setup_partition_table(kvm);
@@ -5581,10 +5715,14 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
kvm->arch.process_table = 0;
if (kvm->arch.secure_guest)
uv_svm_terminate(kvm->arch.lpid);
- kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
+ if (!kvmhv_is_nestedv2())
+ kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
}
- kvmppc_free_lpid(kvm->arch.lpid);
+ if (kvmhv_is_nestedv2())
+ plpar_guest_delete(0, kvm->arch.lpid);
+ else
+ kvmppc_free_lpid(kvm->arch.lpid);
kvmppc_free_pimap(kvm);
}
@@ -5996,6 +6134,8 @@ static int kvmhv_enable_nested(struct kvm *kvm)
return -ENODEV;
if (!radix_enabled())
return -ENODEV;
+ if (kvmhv_is_nestedv2())
+ return -ENODEV;
/* kvm == NULL means the caller is testing if the capability exists */
if (kvm)
diff --git a/arch/powerpc/kvm/book3s_hv.h b/arch/powerpc/kvm/book3s_hv.h
index 95241764df..47b2c81564 100644
--- a/arch/powerpc/kvm/book3s_hv.h
+++ b/arch/powerpc/kvm/book3s_hv.h
@@ -3,6 +3,8 @@
/*
* Privileged (non-hypervisor) host registers to save.
*/
+#include "asm/guest-state-buffer.h"
+
struct p9_host_os_sprs {
unsigned long iamr;
unsigned long amr;
@@ -54,67 +56,73 @@ void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next);
static inline void __kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 val)
{
vcpu->arch.shregs.msr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_MSR);
}
static inline u64 __kvmppc_get_msr_hv(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_MSR) < 0);
return vcpu->arch.shregs.msr;
}
-#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size) \
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size, iden) \
static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, u##size val) \
{ \
vcpu->arch.reg = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
}
-#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size) \
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size, iden) \
static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu) \
{ \
+ kvmhv_nestedv2_cached_reload(vcpu, iden); \
return vcpu->arch.reg; \
}
-#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(reg, size) \
- KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size) \
- KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size) \
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size, iden) \
-#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size) \
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size, iden) \
static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, int i, u##size val) \
{ \
vcpu->arch.reg[i] = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden(i)); \
}
-#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size) \
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size, iden) \
static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu, int i) \
{ \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden(i)) < 0); \
return vcpu->arch.reg[i]; \
}
-#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(reg, size) \
- KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size) \
- KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size) \
-
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(mmcra, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hfscr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(fscr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dscr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(purr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(spurr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(amr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(uamor, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(siar, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(sdar, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(iamr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ctrl, 64)
-
-KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(mmcr, 64)
-KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(sier, 64)
-KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(pmc, 32)
-
-KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(pspb, 32)
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size, iden) \
+
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(mmcra, 64, KVMPPC_GSID_MMCRA)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hfscr, 64, KVMPPC_GSID_HFSCR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(fscr, 64, KVMPPC_GSID_FSCR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dscr, 64, KVMPPC_GSID_DSCR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(purr, 64, KVMPPC_GSID_PURR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(spurr, 64, KVMPPC_GSID_SPURR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(amr, 64, KVMPPC_GSID_AMR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(uamor, 64, KVMPPC_GSID_UAMOR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(siar, 64, KVMPPC_GSID_SIAR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(sdar, 64, KVMPPC_GSID_SDAR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(iamr, 64, KVMPPC_GSID_IAMR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64, KVMPPC_GSID_DAWR0)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64, KVMPPC_GSID_DAWR1)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64, KVMPPC_GSID_DAWRX0)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64, KVMPPC_GSID_DAWRX1)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64, KVMPPC_GSID_CIABR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64, KVMPPC_GSID_WORT)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64, KVMPPC_GSID_PPR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ctrl, 64, KVMPPC_GSID_CTRL);
+
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(mmcr, 64, KVMPPC_GSID_MMCR)
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(sier, 64, KVMPPC_GSID_SIER)
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(pmc, 32, KVMPPC_GSID_PMC)
+
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(pspb, 32, KVMPPC_GSID_PSPB)
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 663f5222f3..fa0e3a22ca 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -183,9 +183,13 @@ EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
long kvmppc_rm_h_random(struct kvm_vcpu *vcpu)
{
+ unsigned long rand;
+
if (ppc_md.get_random_seed &&
- ppc_md.get_random_seed(&vcpu->arch.regs.gpr[4]))
+ ppc_md.get_random_seed(&rand)) {
+ kvmppc_set_gpr(vcpu, 4, rand);
return H_SUCCESS;
+ }
return H_HARDWARE;
}
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 377d0b4a05..3b658b8696 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -428,10 +428,12 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
return vcpu->arch.trap;
}
+unsigned long nested_capabilities;
+
long kvmhv_nested_init(void)
{
long int ptb_order;
- unsigned long ptcr;
+ unsigned long ptcr, host_capabilities;
long rc;
if (!kvmhv_on_pseries())
@@ -439,6 +441,29 @@ long kvmhv_nested_init(void)
if (!radix_enabled())
return -ENODEV;
+ rc = plpar_guest_get_capabilities(0, &host_capabilities);
+ if (rc == H_SUCCESS) {
+ unsigned long capabilities = 0;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ capabilities |= H_GUEST_CAP_POWER10;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ capabilities |= H_GUEST_CAP_POWER9;
+
+ nested_capabilities = capabilities & host_capabilities;
+ rc = plpar_guest_set_capabilities(0, nested_capabilities);
+ if (rc != H_SUCCESS) {
+ pr_err("kvm-hv: Could not configure parent hypervisor capabilities (rc=%ld)",
+ rc);
+ return -ENODEV;
+ }
+
+ static_branch_enable(&__kvmhv_is_nestedv2);
+ return 0;
+ }
+
+ pr_info("kvm-hv: nestedv2 get capabilities hcall failed, falling back to nestedv1 (rc=%ld)\n",
+ rc);
/* Partition table entry is 1<<4 bytes in size, hence the 4. */
ptb_order = KVM_MAX_NESTED_GUESTS_SHIFT + 4;
/* Minimum partition table size is 1<<12 bytes */
@@ -478,7 +503,7 @@ void kvmhv_nested_exit(void)
}
}
-static void kvmhv_flush_lpid(unsigned int lpid)
+static void kvmhv_flush_lpid(u64 lpid)
{
long rc;
@@ -500,17 +525,22 @@ static void kvmhv_flush_lpid(unsigned int lpid)
pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
}
-void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
+void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1)
{
if (!kvmhv_on_pseries()) {
mmu_partition_table_set_entry(lpid, dw0, dw1, true);
return;
}
- pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
- pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
- /* L0 will do the necessary barriers */
- kvmhv_flush_lpid(lpid);
+ if (kvmhv_is_nestedv1()) {
+ pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
+ pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
+ /* L0 will do the necessary barriers */
+ kvmhv_flush_lpid(lpid);
+ }
+
+ if (kvmhv_is_nestedv2())
+ kvmhv_nestedv2_set_ptbl_entry(lpid, dw0, dw1);
}
static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
diff --git a/arch/powerpc/kvm/book3s_hv_nestedv2.c b/arch/powerpc/kvm/book3s_hv_nestedv2.c
new file mode 100644
index 0000000000..f354af7e85
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_nestedv2.c
@@ -0,0 +1,1010 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Jordan Niethe, IBM Corp. <jniethe5@gmail.com>
+ *
+ * Authors:
+ * Jordan Niethe <jniethe5@gmail.com>
+ *
+ * Description: KVM functions specific to running on Book 3S
+ * processors as a NESTEDv2 guest.
+ *
+ */
+
+#include "linux/blk-mq.h"
+#include "linux/console.h"
+#include "linux/gfp_types.h"
+#include "linux/signal.h"
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/pgtable.h>
+
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/hvcall.h>
+#include <asm/pgalloc.h>
+#include <asm/reg.h>
+#include <asm/plpar_wrappers.h>
+#include <asm/guest-state-buffer.h>
+#include "trace_hv.h"
+
+struct static_key_false __kvmhv_is_nestedv2 __read_mostly;
+EXPORT_SYMBOL_GPL(__kvmhv_is_nestedv2);
+
+
+static size_t
+gs_msg_ops_kvmhv_nestedv2_config_get_size(struct kvmppc_gs_msg *gsm)
+{
+ u16 ids[] = {
+ KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
+ KVMPPC_GSID_RUN_INPUT,
+ KVMPPC_GSID_RUN_OUTPUT,
+
+ };
+ size_t size = 0;
+
+ for (int i = 0; i < ARRAY_SIZE(ids); i++)
+ size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
+ return size;
+}
+
+static int
+gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ struct kvmhv_nestedv2_config *cfg;
+ int rc;
+
+ cfg = gsm->data;
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE)) {
+ rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
+ cfg->vcpu_run_output_size);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT)) {
+ rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT,
+ cfg->vcpu_run_input_cfg);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) {
+ kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
+ cfg->vcpu_run_output_cfg);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+gs_msg_ops_kvmhv_nestedv2_config_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb)
+{
+ struct kvmhv_nestedv2_config *cfg;
+ struct kvmppc_gs_parser gsp = { 0 };
+ struct kvmppc_gs_elem *gse;
+ int rc;
+
+ cfg = gsm->data;
+
+ rc = kvmppc_gse_parse(&gsp, gsb);
+ if (rc < 0)
+ return rc;
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
+ if (gse)
+ cfg->vcpu_run_output_size = kvmppc_gse_get_u64(gse);
+ return 0;
+}
+
+static struct kvmppc_gs_msg_ops config_msg_ops = {
+ .get_size = gs_msg_ops_kvmhv_nestedv2_config_get_size,
+ .fill_info = gs_msg_ops_kvmhv_nestedv2_config_fill_info,
+ .refresh_info = gs_msg_ops_kvmhv_nestedv2_config_refresh_info,
+};
+
+static size_t gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg *gsm)
+{
+ struct kvmppc_gs_bitmap gsbm = { 0 };
+ size_t size = 0;
+ u16 iden;
+
+ kvmppc_gsbm_fill(&gsbm);
+ kvmppc_gsbm_for_each(&gsbm, iden)
+ {
+ switch (iden) {
+ case KVMPPC_GSID_HOST_STATE_SIZE:
+ case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE:
+ case KVMPPC_GSID_PARTITION_TABLE:
+ case KVMPPC_GSID_PROCESS_TABLE:
+ case KVMPPC_GSID_RUN_INPUT:
+ case KVMPPC_GSID_RUN_OUTPUT:
+ break;
+ default:
+ size += kvmppc_gse_total_size(kvmppc_gsid_size(iden));
+ }
+ }
+ return size;
+}
+
+static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ struct kvm_vcpu *vcpu;
+ vector128 v;
+ int rc, i;
+ u16 iden;
+ u32 arch_compat = 0;
+
+ vcpu = gsm->data;
+
+ kvmppc_gsm_for_each(gsm, iden)
+ {
+ rc = 0;
+
+ if ((gsm->flags & KVMPPC_GS_FLAGS_WIDE) !=
+ (kvmppc_gsid_flags(iden) & KVMPPC_GS_FLAGS_WIDE))
+ continue;
+
+ switch (iden) {
+ case KVMPPC_GSID_DSCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dscr);
+ break;
+ case KVMPPC_GSID_MMCRA:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcra);
+ break;
+ case KVMPPC_GSID_HFSCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hfscr);
+ break;
+ case KVMPPC_GSID_PURR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.purr);
+ break;
+ case KVMPPC_GSID_SPURR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.spurr);
+ break;
+ case KVMPPC_GSID_AMR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.amr);
+ break;
+ case KVMPPC_GSID_UAMOR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.uamor);
+ break;
+ case KVMPPC_GSID_SIAR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.siar);
+ break;
+ case KVMPPC_GSID_SDAR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sdar);
+ break;
+ case KVMPPC_GSID_IAMR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.iamr);
+ break;
+ case KVMPPC_GSID_DAWR0:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr0);
+ break;
+ case KVMPPC_GSID_DAWR1:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr1);
+ break;
+ case KVMPPC_GSID_DAWRX0:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx0);
+ break;
+ case KVMPPC_GSID_DAWRX1:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1);
+ break;
+ case KVMPPC_GSID_CIABR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr);
+ break;
+ case KVMPPC_GSID_WORT:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.wort);
+ break;
+ case KVMPPC_GSID_PPR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ppr);
+ break;
+ case KVMPPC_GSID_PSPB:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pspb);
+ break;
+ case KVMPPC_GSID_TAR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.tar);
+ break;
+ case KVMPPC_GSID_FSCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fscr);
+ break;
+ case KVMPPC_GSID_EBBHR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbhr);
+ break;
+ case KVMPPC_GSID_EBBRR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbrr);
+ break;
+ case KVMPPC_GSID_BESCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.bescr);
+ break;
+ case KVMPPC_GSID_IC:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ic);
+ break;
+ case KVMPPC_GSID_CTRL:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ctrl);
+ break;
+ case KVMPPC_GSID_PIDR:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pid);
+ break;
+ case KVMPPC_GSID_AMOR: {
+ u64 amor = ~0;
+
+ rc = kvmppc_gse_put_u64(gsb, iden, amor);
+ break;
+ }
+ case KVMPPC_GSID_VRSAVE:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.vrsave);
+ break;
+ case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
+ i = iden - KVMPPC_GSID_MMCR(0);
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcr[i]);
+ break;
+ case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
+ i = iden - KVMPPC_GSID_SIER(0);
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sier[i]);
+ break;
+ case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
+ i = iden - KVMPPC_GSID_PMC(0);
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pmc[i]);
+ break;
+ case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
+ i = iden - KVMPPC_GSID_GPR(0);
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.regs.gpr[i]);
+ break;
+ case KVMPPC_GSID_CR:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.regs.ccr);
+ break;
+ case KVMPPC_GSID_XER:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.xer);
+ break;
+ case KVMPPC_GSID_CTR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.ctr);
+ break;
+ case KVMPPC_GSID_LR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.regs.link);
+ break;
+ case KVMPPC_GSID_NIA:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.nip);
+ break;
+ case KVMPPC_GSID_SRR0:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.srr0);
+ break;
+ case KVMPPC_GSID_SRR1:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.srr1);
+ break;
+ case KVMPPC_GSID_SPRG0:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.sprg0);
+ break;
+ case KVMPPC_GSID_SPRG1:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.sprg1);
+ break;
+ case KVMPPC_GSID_SPRG2:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.sprg2);
+ break;
+ case KVMPPC_GSID_SPRG3:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.sprg3);
+ break;
+ case KVMPPC_GSID_DAR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.dar);
+ break;
+ case KVMPPC_GSID_DSISR:
+ rc = kvmppc_gse_put_u32(gsb, iden,
+ vcpu->arch.shregs.dsisr);
+ break;
+ case KVMPPC_GSID_MSR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.msr);
+ break;
+ case KVMPPC_GSID_VTB:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.vcore->vtb);
+ break;
+ case KVMPPC_GSID_LPCR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.vcore->lpcr);
+ break;
+ case KVMPPC_GSID_TB_OFFSET:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.vcore->tb_offset);
+ break;
+ case KVMPPC_GSID_FPSCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fp.fpscr);
+ break;
+ case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
+ i = iden - KVMPPC_GSID_VSRS(0);
+ memcpy(&v, &vcpu->arch.fp.fpr[i],
+ sizeof(vcpu->arch.fp.fpr[i]));
+ rc = kvmppc_gse_put_vector128(gsb, iden, &v);
+ break;
+#ifdef CONFIG_VSX
+ case KVMPPC_GSID_VSCR:
+ rc = kvmppc_gse_put_u32(gsb, iden,
+ vcpu->arch.vr.vscr.u[3]);
+ break;
+ case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
+ i = iden - KVMPPC_GSID_VSRS(32);
+ rc = kvmppc_gse_put_vector128(gsb, iden,
+ &vcpu->arch.vr.vr[i]);
+ break;
+#endif
+ case KVMPPC_GSID_DEC_EXPIRY_TB: {
+ u64 dw;
+
+ dw = vcpu->arch.dec_expires -
+ vcpu->arch.vcore->tb_offset;
+ rc = kvmppc_gse_put_u64(gsb, iden, dw);
+ break;
+ }
+ case KVMPPC_GSID_LOGICAL_PVR:
+ /*
+ * Though 'arch_compat == 0' would mean the default
+ * compatibility, arch_compat, being a Guest Wide
+ * Element, cannot be filled with a value of 0 in GSB
+ * as this would result into a kernel trap.
+ * Hence, when `arch_compat == 0`, arch_compat should
+ * default to L1's PVR.
+ */
+ if (!vcpu->arch.vcore->arch_compat) {
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ arch_compat = PVR_ARCH_31;
+ else if (cpu_has_feature(CPU_FTR_ARCH_300))
+ arch_compat = PVR_ARCH_300;
+ } else {
+ arch_compat = vcpu->arch.vcore->arch_compat;
+ }
+ rc = kvmppc_gse_put_u32(gsb, iden, arch_compat);
+ break;
+ }
+
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb)
+{
+ struct kvmppc_gs_parser gsp = { 0 };
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_bitmap *valids;
+ struct kvm_vcpu *vcpu;
+ struct kvmppc_gs_elem *gse;
+ vector128 v;
+ int rc, i;
+ u16 iden;
+
+ vcpu = gsm->data;
+
+ rc = kvmppc_gse_parse(&gsp, gsb);
+ if (rc < 0)
+ return rc;
+
+ io = &vcpu->arch.nestedv2_io;
+ valids = &io->valids;
+
+ kvmppc_gsp_for_each(&gsp, iden, gse)
+ {
+ switch (iden) {
+ case KVMPPC_GSID_DSCR:
+ vcpu->arch.dscr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_MMCRA:
+ vcpu->arch.mmcra = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_HFSCR:
+ vcpu->arch.hfscr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_PURR:
+ vcpu->arch.purr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPURR:
+ vcpu->arch.spurr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_AMR:
+ vcpu->arch.amr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_UAMOR:
+ vcpu->arch.uamor = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SIAR:
+ vcpu->arch.siar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SDAR:
+ vcpu->arch.sdar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_IAMR:
+ vcpu->arch.iamr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DAWR0:
+ vcpu->arch.dawr0 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DAWR1:
+ vcpu->arch.dawr1 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DAWRX0:
+ vcpu->arch.dawrx0 = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_DAWRX1:
+ vcpu->arch.dawrx1 = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_CIABR:
+ vcpu->arch.ciabr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_WORT:
+ vcpu->arch.wort = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_PPR:
+ vcpu->arch.ppr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_PSPB:
+ vcpu->arch.pspb = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_TAR:
+ vcpu->arch.tar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_FSCR:
+ vcpu->arch.fscr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_EBBHR:
+ vcpu->arch.ebbhr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_EBBRR:
+ vcpu->arch.ebbrr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_BESCR:
+ vcpu->arch.bescr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_IC:
+ vcpu->arch.ic = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_CTRL:
+ vcpu->arch.ctrl = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_PIDR:
+ vcpu->arch.pid = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_AMOR:
+ break;
+ case KVMPPC_GSID_VRSAVE:
+ vcpu->arch.vrsave = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
+ i = iden - KVMPPC_GSID_MMCR(0);
+ vcpu->arch.mmcr[i] = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
+ i = iden - KVMPPC_GSID_SIER(0);
+ vcpu->arch.sier[i] = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
+ i = iden - KVMPPC_GSID_PMC(0);
+ vcpu->arch.pmc[i] = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
+ i = iden - KVMPPC_GSID_GPR(0);
+ vcpu->arch.regs.gpr[i] = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_CR:
+ vcpu->arch.regs.ccr = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_XER:
+ vcpu->arch.regs.xer = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_CTR:
+ vcpu->arch.regs.ctr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_LR:
+ vcpu->arch.regs.link = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_NIA:
+ vcpu->arch.regs.nip = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SRR0:
+ vcpu->arch.shregs.srr0 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SRR1:
+ vcpu->arch.shregs.srr1 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPRG0:
+ vcpu->arch.shregs.sprg0 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPRG1:
+ vcpu->arch.shregs.sprg1 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPRG2:
+ vcpu->arch.shregs.sprg2 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPRG3:
+ vcpu->arch.shregs.sprg3 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DAR:
+ vcpu->arch.shregs.dar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DSISR:
+ vcpu->arch.shregs.dsisr = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_MSR:
+ vcpu->arch.shregs.msr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_VTB:
+ vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_LPCR:
+ vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_TB_OFFSET:
+ vcpu->arch.vcore->tb_offset = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_FPSCR:
+ vcpu->arch.fp.fpscr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
+ kvmppc_gse_get_vector128(gse, &v);
+ i = iden - KVMPPC_GSID_VSRS(0);
+ memcpy(&vcpu->arch.fp.fpr[i], &v,
+ sizeof(vcpu->arch.fp.fpr[i]));
+ break;
+#ifdef CONFIG_VSX
+ case KVMPPC_GSID_VSCR:
+ vcpu->arch.vr.vscr.u[3] = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
+ i = iden - KVMPPC_GSID_VSRS(32);
+ kvmppc_gse_get_vector128(gse, &vcpu->arch.vr.vr[i]);
+ break;
+#endif
+ case KVMPPC_GSID_HDAR:
+ vcpu->arch.fault_dar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_HDSISR:
+ vcpu->arch.fault_dsisr = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_ASDR:
+ vcpu->arch.fault_gpa = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_HEIR:
+ vcpu->arch.emul_inst = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DEC_EXPIRY_TB: {
+ u64 dw;
+
+ dw = kvmppc_gse_get_u64(gse);
+ vcpu->arch.dec_expires =
+ dw + vcpu->arch.vcore->tb_offset;
+ break;
+ }
+ case KVMPPC_GSID_LOGICAL_PVR:
+ vcpu->arch.vcore->arch_compat = kvmppc_gse_get_u32(gse);
+ break;
+ default:
+ continue;
+ }
+ kvmppc_gsbm_set(valids, iden);
+ }
+
+ return 0;
+}
+
+static struct kvmppc_gs_msg_ops vcpu_message_ops = {
+ .get_size = gs_msg_ops_vcpu_get_size,
+ .fill_info = gs_msg_ops_vcpu_fill_info,
+ .refresh_info = gs_msg_ops_vcpu_refresh_info,
+};
+
+static int kvmhv_nestedv2_host_create(struct kvm_vcpu *vcpu,
+ struct kvmhv_nestedv2_io *io)
+{
+ struct kvmhv_nestedv2_config *cfg;
+ struct kvmppc_gs_buff *gsb, *vcpu_run_output, *vcpu_run_input;
+ unsigned long guest_id, vcpu_id;
+ struct kvmppc_gs_msg *gsm, *vcpu_message, *vcore_message;
+ int rc;
+
+ cfg = &io->cfg;
+ guest_id = vcpu->kvm->arch.lpid;
+ vcpu_id = vcpu->vcpu_id;
+
+ gsm = kvmppc_gsm_new(&config_msg_ops, cfg, KVMPPC_GS_FLAGS_WIDE,
+ GFP_KERNEL);
+ if (!gsm) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), guest_id, vcpu_id,
+ GFP_KERNEL);
+ if (!gsb) {
+ rc = -ENOMEM;
+ goto free_gsm;
+ }
+
+ rc = kvmppc_gsb_receive_datum(gsb, gsm,
+ KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't get vcpu run output buffer minimum size\n");
+ goto free_gsb;
+ }
+
+ vcpu_run_output = kvmppc_gsb_new(cfg->vcpu_run_output_size, guest_id,
+ vcpu_id, GFP_KERNEL);
+ if (!vcpu_run_output) {
+ rc = -ENOMEM;
+ goto free_gsb;
+ }
+
+ cfg->vcpu_run_output_cfg.address = kvmppc_gsb_paddress(vcpu_run_output);
+ cfg->vcpu_run_output_cfg.size = kvmppc_gsb_capacity(vcpu_run_output);
+ io->vcpu_run_output = vcpu_run_output;
+
+ gsm->flags = 0;
+ rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_OUTPUT);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't set vcpu run output buffer\n");
+ goto free_gs_out;
+ }
+
+ vcpu_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu, 0, GFP_KERNEL);
+ if (!vcpu_message) {
+ rc = -ENOMEM;
+ goto free_gs_out;
+ }
+ kvmppc_gsm_include_all(vcpu_message);
+
+ io->vcpu_message = vcpu_message;
+
+ vcpu_run_input = kvmppc_gsb_new(kvmppc_gsm_size(vcpu_message), guest_id,
+ vcpu_id, GFP_KERNEL);
+ if (!vcpu_run_input) {
+ rc = -ENOMEM;
+ goto free_vcpu_message;
+ }
+
+ io->vcpu_run_input = vcpu_run_input;
+ cfg->vcpu_run_input_cfg.address = kvmppc_gsb_paddress(vcpu_run_input);
+ cfg->vcpu_run_input_cfg.size = kvmppc_gsb_capacity(vcpu_run_input);
+ rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_INPUT);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't set vcpu run input buffer\n");
+ goto free_vcpu_run_input;
+ }
+
+ vcore_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu,
+ KVMPPC_GS_FLAGS_WIDE, GFP_KERNEL);
+ if (!vcore_message) {
+ rc = -ENOMEM;
+ goto free_vcpu_run_input;
+ }
+
+ kvmppc_gsm_include_all(vcore_message);
+ kvmppc_gsbm_clear(&vcore_message->bitmap, KVMPPC_GSID_LOGICAL_PVR);
+ io->vcore_message = vcore_message;
+
+ kvmppc_gsbm_fill(&io->valids);
+ kvmppc_gsm_free(gsm);
+ kvmppc_gsb_free(gsb);
+ return 0;
+
+free_vcpu_run_input:
+ kvmppc_gsb_free(vcpu_run_input);
+free_vcpu_message:
+ kvmppc_gsm_free(vcpu_message);
+free_gs_out:
+ kvmppc_gsb_free(vcpu_run_output);
+free_gsb:
+ kvmppc_gsb_free(gsb);
+free_gsm:
+ kvmppc_gsm_free(gsm);
+err:
+ return rc;
+}
+
+/**
+ * __kvmhv_nestedv2_mark_dirty() - mark a Guest State ID to be sent to the host
+ * @vcpu: vcpu
+ * @iden: guest state ID
+ *
+ * Mark a guest state ID as having been changed by the L1 host and thus
+ * the new value must be sent to the L0 hypervisor. See kvmhv_nestedv2_flush_vcpu()
+ */
+int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_bitmap *valids;
+ struct kvmppc_gs_msg *gsm;
+
+ if (!iden)
+ return 0;
+
+ io = &vcpu->arch.nestedv2_io;
+ valids = &io->valids;
+ gsm = io->vcpu_message;
+ kvmppc_gsm_include(gsm, iden);
+ gsm = io->vcore_message;
+ kvmppc_gsm_include(gsm, iden);
+ kvmppc_gsbm_set(valids, iden);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty);
+
+/**
+ * __kvmhv_nestedv2_cached_reload() - reload a Guest State ID from the host
+ * @vcpu: vcpu
+ * @iden: guest state ID
+ *
+ * Reload the value for the guest state ID from the L0 host into the L1 host.
+ * This is cached so that going out to the L0 host only happens if necessary.
+ */
+int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_bitmap *valids;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_msg gsm;
+ int rc;
+
+ if (!iden)
+ return 0;
+
+ io = &vcpu->arch.nestedv2_io;
+ valids = &io->valids;
+ if (kvmppc_gsbm_test(valids, iden))
+ return 0;
+
+ gsb = io->vcpu_run_input;
+ kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, kvmppc_gsid_flags(iden));
+ rc = kvmppc_gsb_receive_datum(gsb, &gsm, iden);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't get GSID: 0x%x\n", iden);
+ return rc;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_cached_reload);
+
+/**
+ * kvmhv_nestedv2_flush_vcpu() - send modified Guest State IDs to the host
+ * @vcpu: vcpu
+ * @time_limit: hdec expiry tb
+ *
+ * Send the values marked by __kvmhv_nestedv2_mark_dirty() to the L0 host.
+ * Thread wide values are copied to the H_GUEST_RUN_VCPU input buffer. Guest
+ * wide values need to be sent with H_GUEST_SET first.
+ *
+ * The hdec tb offset is always sent to L0 host.
+ */
+int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_msg *gsm;
+ int rc;
+
+ io = &vcpu->arch.nestedv2_io;
+ gsb = io->vcpu_run_input;
+ gsm = io->vcore_message;
+ rc = kvmppc_gsb_send_data(gsb, gsm);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't set guest wide elements\n");
+ return rc;
+ }
+
+ gsm = io->vcpu_message;
+ kvmppc_gsb_reset(gsb);
+ rc = kvmppc_gsm_fill_info(gsm, gsb);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't fill vcpu run input buffer\n");
+ return rc;
+ }
+
+ rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_HDEC_EXPIRY_TB, time_limit);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_flush_vcpu);
+
+/**
+ * kvmhv_nestedv2_set_ptbl_entry() - send partition and process table state to
+ * L0 host
+ * @lpid: guest id
+ * @dw0: partition table double word
+ * @dw1: process table double word
+ */
+int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1)
+{
+ struct kvmppc_gs_part_table patbl;
+ struct kvmppc_gs_proc_table prtbl;
+ struct kvmppc_gs_buff *gsb;
+ size_t size;
+ int rc;
+
+ size = kvmppc_gse_total_size(
+ kvmppc_gsid_size(KVMPPC_GSID_PARTITION_TABLE)) +
+ kvmppc_gse_total_size(
+ kvmppc_gsid_size(KVMPPC_GSID_PROCESS_TABLE)) +
+ sizeof(struct kvmppc_gs_header);
+ gsb = kvmppc_gsb_new(size, lpid, 0, GFP_KERNEL);
+ if (!gsb)
+ return -ENOMEM;
+
+ patbl.address = dw0 & RPDB_MASK;
+ patbl.ea_bits = ((((dw0 & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
+ ((dw0 & RTS2_MASK) >> RTS2_SHIFT)) +
+ 31);
+ patbl.gpd_size = 1ul << ((dw0 & RPDS_MASK) + 3);
+ rc = kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE, patbl);
+ if (rc < 0)
+ goto free_gsb;
+
+ prtbl.address = dw1 & PRTB_MASK;
+ prtbl.gpd_size = 1ul << ((dw1 & PRTS_MASK) + 12);
+ rc = kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PROCESS_TABLE, prtbl);
+ if (rc < 0)
+ goto free_gsb;
+
+ rc = kvmppc_gsb_send(gsb, KVMPPC_GS_FLAGS_WIDE);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't set the PATE\n");
+ goto free_gsb;
+ }
+
+ kvmppc_gsb_free(gsb);
+ return 0;
+
+free_gsb:
+ kvmppc_gsb_free(gsb);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_ptbl_entry);
+
+/**
+ * kvmhv_nestedv2_parse_output() - receive values from H_GUEST_RUN_VCPU output
+ * @vcpu: vcpu
+ *
+ * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
+ */
+int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_msg gsm;
+
+ io = &vcpu->arch.nestedv2_io;
+ gsb = io->vcpu_run_output;
+
+ vcpu->arch.fault_dar = 0;
+ vcpu->arch.fault_dsisr = 0;
+ vcpu->arch.fault_gpa = 0;
+ vcpu->arch.emul_inst = KVM_INST_FETCH_FAILED;
+
+ kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
+ return kvmppc_gsm_refresh_info(&gsm, gsb);
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_parse_output);
+
+static void kvmhv_nestedv2_host_free(struct kvm_vcpu *vcpu,
+ struct kvmhv_nestedv2_io *io)
+{
+ kvmppc_gsm_free(io->vcpu_message);
+ kvmppc_gsm_free(io->vcore_message);
+ kvmppc_gsb_free(io->vcpu_run_input);
+ kvmppc_gsb_free(io->vcpu_run_output);
+}
+
+int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_bitmap *valids;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_msg gsm;
+ int rc = 0;
+
+
+ io = &vcpu->arch.nestedv2_io;
+ valids = &io->valids;
+
+ gsb = io->vcpu_run_input;
+ kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
+
+ for (int i = 0; i < 32; i++) {
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_GPR(i)))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_GPR(i));
+ }
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CR))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_CR);
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_XER))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_XER);
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CTR))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_CTR);
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_LR))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_LR);
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_NIA))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_NIA);
+
+ rc = kvmppc_gsb_receive_data(gsb, &gsm);
+ if (rc < 0)
+ pr_err("KVM-NESTEDv2: couldn't reload ptregs\n");
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_reload_ptregs);
+
+int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ for (int i = 0; i < 32; i++)
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(i));
+
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty_ptregs);
+
+/**
+ * kvmhv_nestedv2_vcpu_create() - create nested vcpu for the NESTEDv2 API
+ * @vcpu: vcpu
+ * @io: NESTEDv2 nested io state
+ *
+ * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
+ */
+int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu,
+ struct kvmhv_nestedv2_io *io)
+{
+ long rc;
+
+ rc = plpar_guest_create_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id);
+
+ if (rc != H_SUCCESS) {
+ pr_err("KVM: Create Guest vcpu hcall failed, rc=%ld\n", rc);
+ switch (rc) {
+ case H_NOT_ENOUGH_RESOURCES:
+ case H_ABORTED:
+ return -ENOMEM;
+ case H_AUTHORITY:
+ return -EPERM;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ rc = kvmhv_nestedv2_host_create(vcpu, io);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_create);
+
+/**
+ * kvmhv_nestedv2_vcpu_free() - free the NESTEDv2 state
+ * @vcpu: vcpu
+ * @io: NESTEDv2 nested io state
+ */
+void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu,
+ struct kvmhv_nestedv2_io *io)
+{
+ kvmhv_nestedv2_host_free(vcpu, io);
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_free);
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index 34f1db2128..34bc0a8a12 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -305,7 +305,7 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
u32 pid;
lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
- pid = vcpu->arch.pid;
+ pid = kvmppc_get_pid(vcpu);
/*
* Prior memory accesses to host PID Q3 must be completed before we
@@ -330,7 +330,7 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
int i;
lpid = kvm->arch.lpid;
- pid = vcpu->arch.pid;
+ pid = kvmppc_get_pid(vcpu);
/*
* See switch_mmu_to_guest_radix. ptesync should not be required here
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 82be6d8751..9012acadbc 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -174,14 +174,14 @@ long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu)
ppc_md.hmi_exception_early(NULL);
out:
- if (vc->tb_offset) {
+ if (kvmppc_get_tb_offset(vcpu)) {
u64 new_tb = mftb() + vc->tb_offset;
mtspr(SPRN_TBU40, new_tb);
if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
new_tb += 0x1000000;
mtspr(SPRN_TBU40, new_tb);
}
- vc->tb_offset_applied = vc->tb_offset;
+ vc->tb_offset_applied = kvmppc_get_tb_offset(vcpu);
}
return ret;
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 9182324dbe..17cb75a127 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -776,8 +776,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
r &= ~HPTE_GR_RESERVED;
}
- vcpu->arch.regs.gpr[4 + i * 2] = v;
- vcpu->arch.regs.gpr[5 + i * 2] = r;
+ kvmppc_set_gpr(vcpu, 4 + i * 2, v);
+ kvmppc_set_gpr(vcpu, 5 + i * 2, r);
}
return H_SUCCESS;
}
@@ -824,7 +824,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
}
}
}
- vcpu->arch.regs.gpr[4] = gr;
+ kvmppc_set_gpr(vcpu, 4, gr);
ret = H_SUCCESS;
out:
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
@@ -872,7 +872,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
kvmppc_set_dirty_from_hpte(kvm, v, gr);
}
}
- vcpu->arch.regs.gpr[4] = gr;
+ kvmppc_set_gpr(vcpu, 4, gr);
ret = H_SUCCESS;
out:
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index e165bfa842..e429848785 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -481,7 +481,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
unsigned long xics_rm_h_xirr_x(struct kvm_vcpu *vcpu)
{
- vcpu->arch.regs.gpr[5] = get_tb();
+ kvmppc_set_gpr(vcpu, 5, get_tb());
return xics_rm_h_xirr(vcpu);
}
@@ -518,7 +518,7 @@ unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
} while (!icp_rm_try_update(icp, old_state, new_state));
/* Return the result in GPR4 */
- vcpu->arch.regs.gpr[4] = xirr;
+ kvmppc_set_gpr(vcpu, 4, xirr);
return check_too_hard(xics, icp);
}
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index e2d6f9327f..92f3311514 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -858,7 +858,7 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
}
kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
- pr_info("LPID %d went secure\n", kvm->arch.lpid);
+ pr_info("LPID %lld went secure\n", kvm->arch.lpid);
out:
srcu_read_unlock(&kvm->srcu, srcu_idx);
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index f4115819e7..29a3822497 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -328,7 +328,7 @@ static unsigned long xive_vm_h_xirr(struct kvm_vcpu *vcpu)
*/
/* Return interrupt and old CPPR in GPR4 */
- vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
+ kvmppc_set_gpr(vcpu, 4, hirq | (old_cppr << 24));
return H_SUCCESS;
}
@@ -364,7 +364,7 @@ static unsigned long xive_vm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server
hirq = xive_vm_scan_interrupts(xc, pending, scan_poll);
/* Return interrupt and old CPPR in GPR4 */
- vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
+ kvmppc_set_gpr(vcpu, 4, hirq | (xc->cppr << 24));
return H_SUCCESS;
}
@@ -884,10 +884,10 @@ int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
}
if (single_escalation)
- name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
+ name = kasprintf(GFP_KERNEL, "kvm-%lld-%d",
vcpu->kvm->arch.lpid, xc->server_num);
else
- name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
+ name = kasprintf(GFP_KERNEL, "kvm-%lld-%d-%d",
vcpu->kvm->arch.lpid, xc->server_num, prio);
if (!name) {
pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
@@ -2779,8 +2779,6 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
{
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
-
/* The VM should have configured XICS mode before doing XICS hcalls. */
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
@@ -2799,7 +2797,7 @@ int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
case H_XIRR_X:
xive_vm_h_xirr(vcpu);
- kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset);
+ kvmppc_set_gpr(vcpu, 5, get_tb() + kvmppc_get_tb_offset(vcpu));
return H_SUCCESS;
}
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index 712ab91ced..6e2ebbd8aa 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -567,7 +567,7 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
u8 priority;
struct kvm_ppc_xive_eq kvm_eq;
int rc;
- __be32 *qaddr = 0;
+ __be32 *qaddr = NULL;
struct page *page;
struct xive_q *q;
gfn_t gfn;
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 059c08ae03..077fd88a0b 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -92,7 +92,8 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_host_swabbed = 0;
emulated = EMULATE_FAIL;
- vcpu->arch.regs.msr = vcpu->arch.shared->msr;
+ vcpu->arch.regs.msr = kvmppc_get_msr(vcpu);
+ kvmhv_nestedv2_reload_ptregs(vcpu, &vcpu->arch.regs);
if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
int type = op.type & INSTR_TYPE_MASK;
int size = GETSIZE(op.type);
@@ -250,7 +251,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_sp64_extend = 1;
emulated = kvmppc_handle_store(vcpu,
- VCPU_FPR(vcpu, op.reg), size, 1);
+ kvmppc_get_fpr(vcpu, op.reg), size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
@@ -357,6 +358,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
}
trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated);
+ kvmhv_nestedv2_mark_dirty_ptregs(vcpu, &vcpu->arch.regs);
/* Advance past emulated instruction. */
if (emulated != EMULATE_FAIL)
diff --git a/arch/powerpc/kvm/guest-state-buffer.c b/arch/powerpc/kvm/guest-state-buffer.c
new file mode 100644
index 0000000000..b80dbc5862
--- /dev/null
+++ b/arch/powerpc/kvm/guest-state-buffer.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "asm/hvcall.h"
+#include <linux/log2.h>
+#include <asm/pgalloc.h>
+#include <asm/guest-state-buffer.h>
+
+static const u16 kvmppc_gse_iden_len[__KVMPPC_GSE_TYPE_MAX] = {
+ [KVMPPC_GSE_BE32] = sizeof(__be32),
+ [KVMPPC_GSE_BE64] = sizeof(__be64),
+ [KVMPPC_GSE_VEC128] = sizeof(vector128),
+ [KVMPPC_GSE_PARTITION_TABLE] = sizeof(struct kvmppc_gs_part_table),
+ [KVMPPC_GSE_PROCESS_TABLE] = sizeof(struct kvmppc_gs_proc_table),
+ [KVMPPC_GSE_BUFFER] = sizeof(struct kvmppc_gs_buff_info),
+};
+
+/**
+ * kvmppc_gsb_new() - create a new guest state buffer
+ * @size: total size of the guest state buffer (includes header)
+ * @guest_id: guest_id
+ * @vcpu_id: vcpu_id
+ * @flags: GFP flags
+ *
+ * Returns a guest state buffer.
+ */
+struct kvmppc_gs_buff *kvmppc_gsb_new(size_t size, unsigned long guest_id,
+ unsigned long vcpu_id, gfp_t flags)
+{
+ struct kvmppc_gs_buff *gsb;
+
+ gsb = kzalloc(sizeof(*gsb), flags);
+ if (!gsb)
+ return NULL;
+
+ size = roundup_pow_of_two(size);
+ gsb->hdr = kzalloc(size, GFP_KERNEL);
+ if (!gsb->hdr)
+ goto free;
+
+ gsb->capacity = size;
+ gsb->len = sizeof(struct kvmppc_gs_header);
+ gsb->vcpu_id = vcpu_id;
+ gsb->guest_id = guest_id;
+
+ gsb->hdr->nelems = cpu_to_be32(0);
+
+ return gsb;
+
+free:
+ kfree(gsb);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_new);
+
+/**
+ * kvmppc_gsb_free() - free a guest state buffer
+ * @gsb: guest state buffer
+ */
+void kvmppc_gsb_free(struct kvmppc_gs_buff *gsb)
+{
+ kfree(gsb->hdr);
+ kfree(gsb);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_free);
+
+/**
+ * kvmppc_gsb_put() - allocate space in a guest state buffer
+ * @gsb: buffer to allocate in
+ * @size: amount of space to allocate
+ *
+ * Returns a pointer to the amount of space requested within the buffer and
+ * increments the count of elements in the buffer.
+ *
+ * Does not check if there is enough space in the buffer.
+ */
+void *kvmppc_gsb_put(struct kvmppc_gs_buff *gsb, size_t size)
+{
+ u32 nelems = kvmppc_gsb_nelems(gsb);
+ void *p;
+
+ p = (void *)kvmppc_gsb_header(gsb) + kvmppc_gsb_len(gsb);
+ gsb->len += size;
+
+ kvmppc_gsb_header(gsb)->nelems = cpu_to_be32(nelems + 1);
+ return p;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_put);
+
+static int kvmppc_gsid_class(u16 iden)
+{
+ if ((iden >= KVMPPC_GSE_GUESTWIDE_START) &&
+ (iden <= KVMPPC_GSE_GUESTWIDE_END))
+ return KVMPPC_GS_CLASS_GUESTWIDE;
+
+ if ((iden >= KVMPPC_GSE_META_START) && (iden <= KVMPPC_GSE_META_END))
+ return KVMPPC_GS_CLASS_META;
+
+ if ((iden >= KVMPPC_GSE_DW_REGS_START) &&
+ (iden <= KVMPPC_GSE_DW_REGS_END))
+ return KVMPPC_GS_CLASS_DWORD_REG;
+
+ if ((iden >= KVMPPC_GSE_W_REGS_START) &&
+ (iden <= KVMPPC_GSE_W_REGS_END))
+ return KVMPPC_GS_CLASS_WORD_REG;
+
+ if ((iden >= KVMPPC_GSE_VSRS_START) && (iden <= KVMPPC_GSE_VSRS_END))
+ return KVMPPC_GS_CLASS_VECTOR;
+
+ if ((iden >= KVMPPC_GSE_INTR_REGS_START) &&
+ (iden <= KVMPPC_GSE_INTR_REGS_END))
+ return KVMPPC_GS_CLASS_INTR;
+
+ return -1;
+}
+
+static int kvmppc_gsid_type(u16 iden)
+{
+ int type = -1;
+
+ switch (kvmppc_gsid_class(iden)) {
+ case KVMPPC_GS_CLASS_GUESTWIDE:
+ switch (iden) {
+ case KVMPPC_GSID_HOST_STATE_SIZE:
+ case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE:
+ case KVMPPC_GSID_TB_OFFSET:
+ type = KVMPPC_GSE_BE64;
+ break;
+ case KVMPPC_GSID_PARTITION_TABLE:
+ type = KVMPPC_GSE_PARTITION_TABLE;
+ break;
+ case KVMPPC_GSID_PROCESS_TABLE:
+ type = KVMPPC_GSE_PROCESS_TABLE;
+ break;
+ case KVMPPC_GSID_LOGICAL_PVR:
+ type = KVMPPC_GSE_BE32;
+ break;
+ }
+ break;
+ case KVMPPC_GS_CLASS_META:
+ switch (iden) {
+ case KVMPPC_GSID_RUN_INPUT:
+ case KVMPPC_GSID_RUN_OUTPUT:
+ type = KVMPPC_GSE_BUFFER;
+ break;
+ case KVMPPC_GSID_VPA:
+ type = KVMPPC_GSE_BE64;
+ break;
+ }
+ break;
+ case KVMPPC_GS_CLASS_DWORD_REG:
+ type = KVMPPC_GSE_BE64;
+ break;
+ case KVMPPC_GS_CLASS_WORD_REG:
+ type = KVMPPC_GSE_BE32;
+ break;
+ case KVMPPC_GS_CLASS_VECTOR:
+ type = KVMPPC_GSE_VEC128;
+ break;
+ case KVMPPC_GS_CLASS_INTR:
+ switch (iden) {
+ case KVMPPC_GSID_HDAR:
+ case KVMPPC_GSID_ASDR:
+ case KVMPPC_GSID_HEIR:
+ type = KVMPPC_GSE_BE64;
+ break;
+ case KVMPPC_GSID_HDSISR:
+ type = KVMPPC_GSE_BE32;
+ break;
+ }
+ break;
+ }
+
+ return type;
+}
+
+/**
+ * kvmppc_gsid_flags() - the flags for a guest state ID
+ * @iden: guest state ID
+ *
+ * Returns any flags for the guest state ID.
+ */
+unsigned long kvmppc_gsid_flags(u16 iden)
+{
+ unsigned long flags = 0;
+
+ switch (kvmppc_gsid_class(iden)) {
+ case KVMPPC_GS_CLASS_GUESTWIDE:
+ flags = KVMPPC_GS_FLAGS_WIDE;
+ break;
+ case KVMPPC_GS_CLASS_META:
+ case KVMPPC_GS_CLASS_DWORD_REG:
+ case KVMPPC_GS_CLASS_WORD_REG:
+ case KVMPPC_GS_CLASS_VECTOR:
+ case KVMPPC_GS_CLASS_INTR:
+ break;
+ }
+
+ return flags;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsid_flags);
+
+/**
+ * kvmppc_gsid_size() - the size of a guest state ID
+ * @iden: guest state ID
+ *
+ * Returns the size of guest state ID.
+ */
+u16 kvmppc_gsid_size(u16 iden)
+{
+ int type;
+
+ type = kvmppc_gsid_type(iden);
+ if (type == -1)
+ return 0;
+
+ if (type >= __KVMPPC_GSE_TYPE_MAX)
+ return 0;
+
+ return kvmppc_gse_iden_len[type];
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsid_size);
+
+/**
+ * kvmppc_gsid_mask() - the settable bits of a guest state ID
+ * @iden: guest state ID
+ *
+ * Returns a mask of settable bits for a guest state ID.
+ */
+u64 kvmppc_gsid_mask(u16 iden)
+{
+ u64 mask = ~0ull;
+
+ switch (iden) {
+ case KVMPPC_GSID_LPCR:
+ mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER |
+ LPCR_GTSE;
+ break;
+ case KVMPPC_GSID_MSR:
+ mask = ~(MSR_HV | MSR_S | MSR_ME);
+ break;
+ }
+
+ return mask;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsid_mask);
+
+/**
+ * __kvmppc_gse_put() - add a guest state element to a buffer
+ * @gsb: buffer to the element to
+ * @iden: guest state ID
+ * @size: length of data
+ * @data: pointer to data
+ */
+int __kvmppc_gse_put(struct kvmppc_gs_buff *gsb, u16 iden, u16 size,
+ const void *data)
+{
+ struct kvmppc_gs_elem *gse;
+ u16 total_size;
+
+ total_size = sizeof(*gse) + size;
+ if (total_size + kvmppc_gsb_len(gsb) > kvmppc_gsb_capacity(gsb))
+ return -ENOMEM;
+
+ if (kvmppc_gsid_size(iden) != size)
+ return -EINVAL;
+
+ gse = kvmppc_gsb_put(gsb, total_size);
+ gse->iden = cpu_to_be16(iden);
+ gse->len = cpu_to_be16(size);
+ memcpy(gse->data, data, size);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kvmppc_gse_put);
+
+/**
+ * kvmppc_gse_parse() - create a parse map from a guest state buffer
+ * @gsp: guest state parser
+ * @gsb: guest state buffer
+ */
+int kvmppc_gse_parse(struct kvmppc_gs_parser *gsp, struct kvmppc_gs_buff *gsb)
+{
+ struct kvmppc_gs_elem *curr;
+ int rem, i;
+
+ kvmppc_gsb_for_each_elem(i, curr, gsb, rem) {
+ if (kvmppc_gse_len(curr) !=
+ kvmppc_gsid_size(kvmppc_gse_iden(curr)))
+ return -EINVAL;
+ kvmppc_gsp_insert(gsp, kvmppc_gse_iden(curr), curr);
+ }
+
+ if (kvmppc_gsb_nelems(gsb) != i)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gse_parse);
+
+static inline int kvmppc_gse_flatten_iden(u16 iden)
+{
+ int bit = 0;
+ int class;
+
+ class = kvmppc_gsid_class(iden);
+
+ if (class == KVMPPC_GS_CLASS_GUESTWIDE) {
+ bit += iden - KVMPPC_GSE_GUESTWIDE_START;
+ return bit;
+ }
+
+ bit += KVMPPC_GSE_GUESTWIDE_COUNT;
+
+ if (class == KVMPPC_GS_CLASS_META) {
+ bit += iden - KVMPPC_GSE_META_START;
+ return bit;
+ }
+
+ bit += KVMPPC_GSE_META_COUNT;
+
+ if (class == KVMPPC_GS_CLASS_DWORD_REG) {
+ bit += iden - KVMPPC_GSE_DW_REGS_START;
+ return bit;
+ }
+
+ bit += KVMPPC_GSE_DW_REGS_COUNT;
+
+ if (class == KVMPPC_GS_CLASS_WORD_REG) {
+ bit += iden - KVMPPC_GSE_W_REGS_START;
+ return bit;
+ }
+
+ bit += KVMPPC_GSE_W_REGS_COUNT;
+
+ if (class == KVMPPC_GS_CLASS_VECTOR) {
+ bit += iden - KVMPPC_GSE_VSRS_START;
+ return bit;
+ }
+
+ bit += KVMPPC_GSE_VSRS_COUNT;
+
+ if (class == KVMPPC_GS_CLASS_INTR) {
+ bit += iden - KVMPPC_GSE_INTR_REGS_START;
+ return bit;
+ }
+
+ return 0;
+}
+
+static inline u16 kvmppc_gse_unflatten_iden(int bit)
+{
+ u16 iden;
+
+ if (bit < KVMPPC_GSE_GUESTWIDE_COUNT) {
+ iden = KVMPPC_GSE_GUESTWIDE_START + bit;
+ return iden;
+ }
+ bit -= KVMPPC_GSE_GUESTWIDE_COUNT;
+
+ if (bit < KVMPPC_GSE_META_COUNT) {
+ iden = KVMPPC_GSE_META_START + bit;
+ return iden;
+ }
+ bit -= KVMPPC_GSE_META_COUNT;
+
+ if (bit < KVMPPC_GSE_DW_REGS_COUNT) {
+ iden = KVMPPC_GSE_DW_REGS_START + bit;
+ return iden;
+ }
+ bit -= KVMPPC_GSE_DW_REGS_COUNT;
+
+ if (bit < KVMPPC_GSE_W_REGS_COUNT) {
+ iden = KVMPPC_GSE_W_REGS_START + bit;
+ return iden;
+ }
+ bit -= KVMPPC_GSE_W_REGS_COUNT;
+
+ if (bit < KVMPPC_GSE_VSRS_COUNT) {
+ iden = KVMPPC_GSE_VSRS_START + bit;
+ return iden;
+ }
+ bit -= KVMPPC_GSE_VSRS_COUNT;
+
+ if (bit < KVMPPC_GSE_IDEN_COUNT) {
+ iden = KVMPPC_GSE_INTR_REGS_START + bit;
+ return iden;
+ }
+
+ return 0;
+}
+
+/**
+ * kvmppc_gsp_insert() - add a mapping from an guest state ID to an element
+ * @gsp: guest state parser
+ * @iden: guest state id (key)
+ * @gse: guest state element (value)
+ */
+void kvmppc_gsp_insert(struct kvmppc_gs_parser *gsp, u16 iden,
+ struct kvmppc_gs_elem *gse)
+{
+ int i;
+
+ i = kvmppc_gse_flatten_iden(iden);
+ kvmppc_gsbm_set(&gsp->iterator, iden);
+ gsp->gses[i] = gse;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsp_insert);
+
+/**
+ * kvmppc_gsp_lookup() - lookup an element from a guest state ID
+ * @gsp: guest state parser
+ * @iden: guest state ID (key)
+ *
+ * Returns the guest state element if present.
+ */
+struct kvmppc_gs_elem *kvmppc_gsp_lookup(struct kvmppc_gs_parser *gsp, u16 iden)
+{
+ int i;
+
+ i = kvmppc_gse_flatten_iden(iden);
+ return gsp->gses[i];
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsp_lookup);
+
+/**
+ * kvmppc_gsbm_set() - set the guest state ID
+ * @gsbm: guest state bitmap
+ * @iden: guest state ID
+ */
+void kvmppc_gsbm_set(struct kvmppc_gs_bitmap *gsbm, u16 iden)
+{
+ set_bit(kvmppc_gse_flatten_iden(iden), gsbm->bitmap);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsbm_set);
+
+/**
+ * kvmppc_gsbm_clear() - clear the guest state ID
+ * @gsbm: guest state bitmap
+ * @iden: guest state ID
+ */
+void kvmppc_gsbm_clear(struct kvmppc_gs_bitmap *gsbm, u16 iden)
+{
+ clear_bit(kvmppc_gse_flatten_iden(iden), gsbm->bitmap);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsbm_clear);
+
+/**
+ * kvmppc_gsbm_test() - test the guest state ID
+ * @gsbm: guest state bitmap
+ * @iden: guest state ID
+ */
+bool kvmppc_gsbm_test(struct kvmppc_gs_bitmap *gsbm, u16 iden)
+{
+ return test_bit(kvmppc_gse_flatten_iden(iden), gsbm->bitmap);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsbm_test);
+
+/**
+ * kvmppc_gsbm_next() - return the next set guest state ID
+ * @gsbm: guest state bitmap
+ * @prev: last guest state ID
+ */
+u16 kvmppc_gsbm_next(struct kvmppc_gs_bitmap *gsbm, u16 prev)
+{
+ int bit, pbit;
+
+ pbit = prev ? kvmppc_gse_flatten_iden(prev) + 1 : 0;
+ bit = find_next_bit(gsbm->bitmap, KVMPPC_GSE_IDEN_COUNT, pbit);
+
+ if (bit < KVMPPC_GSE_IDEN_COUNT)
+ return kvmppc_gse_unflatten_iden(bit);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsbm_next);
+
+/**
+ * kvmppc_gsm_init() - initialize a guest state message
+ * @gsm: guest state message
+ * @ops: callbacks
+ * @data: private data
+ * @flags: guest wide or thread wide
+ */
+int kvmppc_gsm_init(struct kvmppc_gs_msg *gsm, struct kvmppc_gs_msg_ops *ops,
+ void *data, unsigned long flags)
+{
+ memset(gsm, 0, sizeof(*gsm));
+ gsm->ops = ops;
+ gsm->data = data;
+ gsm->flags = flags;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_init);
+
+/**
+ * kvmppc_gsm_new() - creates a new guest state message
+ * @ops: callbacks
+ * @data: private data
+ * @flags: guest wide or thread wide
+ * @gfp_flags: GFP allocation flags
+ *
+ * Returns an initialized guest state message.
+ */
+struct kvmppc_gs_msg *kvmppc_gsm_new(struct kvmppc_gs_msg_ops *ops, void *data,
+ unsigned long flags, gfp_t gfp_flags)
+{
+ struct kvmppc_gs_msg *gsm;
+
+ gsm = kzalloc(sizeof(*gsm), gfp_flags);
+ if (!gsm)
+ return NULL;
+
+ kvmppc_gsm_init(gsm, ops, data, flags);
+
+ return gsm;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_new);
+
+/**
+ * kvmppc_gsm_size() - creates a new guest state message
+ * @gsm: self
+ *
+ * Returns the size required for the message.
+ */
+size_t kvmppc_gsm_size(struct kvmppc_gs_msg *gsm)
+{
+ if (gsm->ops->get_size)
+ return gsm->ops->get_size(gsm);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_size);
+
+/**
+ * kvmppc_gsm_free() - free guest state message
+ * @gsm: guest state message
+ *
+ * Returns the size required for the message.
+ */
+void kvmppc_gsm_free(struct kvmppc_gs_msg *gsm)
+{
+ kfree(gsm);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_free);
+
+/**
+ * kvmppc_gsm_fill_info() - serialises message to guest state buffer format
+ * @gsm: self
+ * @gsb: buffer to serialise into
+ */
+int kvmppc_gsm_fill_info(struct kvmppc_gs_msg *gsm, struct kvmppc_gs_buff *gsb)
+{
+ if (!gsm->ops->fill_info)
+ return -EINVAL;
+
+ return gsm->ops->fill_info(gsb, gsm);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_fill_info);
+
+/**
+ * kvmppc_gsm_refresh_info() - deserialises from guest state buffer
+ * @gsm: self
+ * @gsb: buffer to serialise from
+ */
+int kvmppc_gsm_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb)
+{
+ if (!gsm->ops->fill_info)
+ return -EINVAL;
+
+ return gsm->ops->refresh_info(gsm, gsb);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_refresh_info);
+
+/**
+ * kvmppc_gsb_send - send all elements in the buffer to the hypervisor.
+ * @gsb: guest state buffer
+ * @flags: guest wide or thread wide
+ *
+ * Performs the H_GUEST_SET_STATE hcall for the guest state buffer.
+ */
+int kvmppc_gsb_send(struct kvmppc_gs_buff *gsb, unsigned long flags)
+{
+ unsigned long hflags = 0;
+ unsigned long i;
+ int rc;
+
+ if (kvmppc_gsb_nelems(gsb) == 0)
+ return 0;
+
+ if (flags & KVMPPC_GS_FLAGS_WIDE)
+ hflags |= H_GUEST_FLAGS_WIDE;
+
+ rc = plpar_guest_set_state(hflags, gsb->guest_id, gsb->vcpu_id,
+ __pa(gsb->hdr), gsb->capacity, &i);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_send);
+
+/**
+ * kvmppc_gsb_recv - request all elements in the buffer have their value
+ * updated.
+ * @gsb: guest state buffer
+ * @flags: guest wide or thread wide
+ *
+ * Performs the H_GUEST_GET_STATE hcall for the guest state buffer.
+ * After returning from the hcall the guest state elements that were
+ * present in the buffer will have updated values from the hypervisor.
+ */
+int kvmppc_gsb_recv(struct kvmppc_gs_buff *gsb, unsigned long flags)
+{
+ unsigned long hflags = 0;
+ unsigned long i;
+ int rc;
+
+ if (flags & KVMPPC_GS_FLAGS_WIDE)
+ hflags |= H_GUEST_FLAGS_WIDE;
+
+ rc = plpar_guest_get_state(hflags, gsb->guest_id, gsb->vcpu_id,
+ __pa(gsb->hdr), gsb->capacity, &i);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_recv);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 7197c82566..f6af752698 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -934,11 +934,11 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
return;
if (index >= 32) {
- val.vval = VCPU_VSX_VR(vcpu, index - 32);
+ kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
val.vsxval[offset] = gpr;
- VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
- VCPU_VSX_FPR(vcpu, index, offset) = gpr;
+ kvmppc_set_vsx_fpr(vcpu, index, offset, gpr);
}
}
@@ -949,13 +949,13 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
if (index >= 32) {
- val.vval = VCPU_VSX_VR(vcpu, index - 32);
+ kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
val.vsxval[0] = gpr;
val.vsxval[1] = gpr;
- VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
- VCPU_VSX_FPR(vcpu, index, 0) = gpr;
- VCPU_VSX_FPR(vcpu, index, 1) = gpr;
+ kvmppc_set_vsx_fpr(vcpu, index, 0, gpr);
+ kvmppc_set_vsx_fpr(vcpu, index, 1, gpr);
}
}
@@ -970,12 +970,12 @@ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
val.vsx32val[1] = gpr;
val.vsx32val[2] = gpr;
val.vsx32val[3] = gpr;
- VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
val.vsx32val[0] = gpr;
val.vsx32val[1] = gpr;
- VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
- VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
+ kvmppc_set_vsx_fpr(vcpu, index, 0, val.vsxval[0]);
+ kvmppc_set_vsx_fpr(vcpu, index, 1, val.vsxval[0]);
}
}
@@ -991,15 +991,15 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
return;
if (index >= 32) {
- val.vval = VCPU_VSX_VR(vcpu, index - 32);
+ kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
val.vsx32val[offset] = gpr32;
- VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
dword_offset = offset / 2;
word_offset = offset % 2;
- val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
+ val.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, index, dword_offset);
val.vsx32val[word_offset] = gpr32;
- VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
+ kvmppc_set_vsx_fpr(vcpu, index, dword_offset, val.vsxval[0]);
}
}
#endif /* CONFIG_VSX */
@@ -1058,9 +1058,9 @@ static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- val.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsxval[offset] = gpr;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
@@ -1074,9 +1074,9 @@ static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- val.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsx32val[offset] = gpr32;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
@@ -1090,9 +1090,9 @@ static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- val.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsx16val[offset] = gpr16;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
@@ -1106,9 +1106,9 @@ static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- val.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsx8val[offset] = gpr8;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
#endif /* CONFIG_ALTIVEC */
@@ -1194,14 +1194,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
- VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
+ kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
break;
#ifdef CONFIG_PPC_BOOK3S
case KVM_MMIO_REG_QPR:
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
case KVM_MMIO_REG_FQPR:
- VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
+ kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
#endif
@@ -1419,9 +1419,9 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
}
if (rs < 32) {
- *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
+ *val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset);
} else {
- reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
+ kvmppc_get_vsx_vr(vcpu, rs - 32, &reg.vval);
*val = reg.vsxval[vsx_offset];
}
break;
@@ -1438,10 +1438,10 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
if (rs < 32) {
dword_offset = vsx_offset / 2;
word_offset = vsx_offset % 2;
- reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
+ reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, dword_offset);
*val = reg.vsx32val[word_offset];
} else {
- reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
+ kvmppc_get_vsx_vr(vcpu, rs - 32, &reg.vval);
*val = reg.vsx32val[vsx_offset];
}
break;
@@ -1556,7 +1556,7 @@ static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
- reg.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsxval[vmx_offset];
return result;
@@ -1574,7 +1574,7 @@ static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
- reg.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsx32val[vmx_offset];
return result;
@@ -1592,7 +1592,7 @@ static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
- reg.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsx16val[vmx_offset];
return result;
@@ -1610,7 +1610,7 @@ static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
- reg.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsx8val[vmx_offset];
return result;
@@ -1719,17 +1719,17 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
- val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
+ kvmppc_get_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval);
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
+ val = get_reg_val(reg->id, kvmppc_get_vscr(vcpu));
break;
case KVM_REG_PPC_VRSAVE:
- val = get_reg_val(reg->id, vcpu->arch.vrsave);
+ val = get_reg_val(reg->id, kvmppc_get_vrsave(vcpu));
break;
#endif /* CONFIG_ALTIVEC */
default:
@@ -1770,21 +1770,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
- vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+ kvmppc_set_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval);
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
+ kvmppc_set_vscr(vcpu, set_reg_val(reg->id, val));
break;
case KVM_REG_PPC_VRSAVE:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- vcpu->arch.vrsave = set_reg_val(reg->id, val);
+ kvmppc_set_vrsave(vcpu, set_reg_val(reg->id, val));
break;
#endif /* CONFIG_ALTIVEC */
default:
diff --git a/arch/powerpc/kvm/test-guest-state-buffer.c b/arch/powerpc/kvm/test-guest-state-buffer.c
new file mode 100644
index 0000000000..4720b8dc88
--- /dev/null
+++ b/arch/powerpc/kvm/test-guest-state-buffer.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <kunit/test.h>
+
+#include <asm/guest-state-buffer.h>
+
+static void test_creating_buffer(struct kunit *test)
+{
+ struct kvmppc_gs_buff *gsb;
+ size_t size = 0x100;
+
+ gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb->hdr);
+
+ KUNIT_EXPECT_EQ(test, gsb->capacity, roundup_pow_of_two(size));
+ KUNIT_EXPECT_EQ(test, gsb->len, sizeof(__be32));
+
+ kvmppc_gsb_free(gsb);
+}
+
+static void test_adding_element(struct kunit *test)
+{
+ const struct kvmppc_gs_elem *head, *curr;
+ union {
+ __vector128 v;
+ u64 dw[2];
+ } u;
+ int rem;
+ struct kvmppc_gs_buff *gsb;
+ size_t size = 0x1000;
+ int i, rc;
+ u64 data;
+
+ gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
+
+ /* Single elements, direct use of __kvmppc_gse_put() */
+ data = 0xdeadbeef;
+ rc = __kvmppc_gse_put(gsb, KVMPPC_GSID_GPR(0), 8, &data);
+ KUNIT_EXPECT_GE(test, rc, 0);
+
+ head = kvmppc_gsb_data(gsb);
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(head), KVMPPC_GSID_GPR(0));
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_len(head), 8);
+ data = 0;
+ memcpy(&data, kvmppc_gse_data(head), 8);
+ KUNIT_EXPECT_EQ(test, data, 0xdeadbeef);
+
+ /* Multiple elements, simple wrapper */
+ rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(1), 0xcafef00d);
+ KUNIT_EXPECT_GE(test, rc, 0);
+
+ u.dw[0] = 0x1;
+ u.dw[1] = 0x2;
+ rc = kvmppc_gse_put_vector128(gsb, KVMPPC_GSID_VSRS(0), &u.v);
+ KUNIT_EXPECT_GE(test, rc, 0);
+ u.dw[0] = 0x0;
+ u.dw[1] = 0x0;
+
+ kvmppc_gsb_for_each_elem(i, curr, gsb, rem) {
+ switch (i) {
+ case 0:
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
+ KVMPPC_GSID_GPR(0));
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 8);
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_get_be64(curr),
+ 0xdeadbeef);
+ break;
+ case 1:
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
+ KVMPPC_GSID_GPR(1));
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 8);
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_get_u64(curr),
+ 0xcafef00d);
+ break;
+ case 2:
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
+ KVMPPC_GSID_VSRS(0));
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 16);
+ kvmppc_gse_get_vector128(curr, &u.v);
+ KUNIT_EXPECT_EQ(test, u.dw[0], 0x1);
+ KUNIT_EXPECT_EQ(test, u.dw[1], 0x2);
+ break;
+ }
+ }
+ KUNIT_EXPECT_EQ(test, i, 3);
+
+ kvmppc_gsb_reset(gsb);
+ KUNIT_EXPECT_EQ(test, kvmppc_gsb_nelems(gsb), 0);
+ KUNIT_EXPECT_EQ(test, kvmppc_gsb_len(gsb),
+ sizeof(struct kvmppc_gs_header));
+
+ kvmppc_gsb_free(gsb);
+}
+
+static void test_gs_parsing(struct kunit *test)
+{
+ struct kvmppc_gs_elem *gse;
+ struct kvmppc_gs_parser gsp = { 0 };
+ struct kvmppc_gs_buff *gsb;
+ size_t size = 0x1000;
+ u64 tmp1, tmp2;
+
+ gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
+
+ tmp1 = 0xdeadbeefull;
+ kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(0), tmp1);
+
+ KUNIT_EXPECT_GE(test, kvmppc_gse_parse(&gsp, gsb), 0);
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_GPR(0));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gse);
+
+ tmp2 = kvmppc_gse_get_u64(gse);
+ KUNIT_EXPECT_EQ(test, tmp2, 0xdeadbeefull);
+
+ kvmppc_gsb_free(gsb);
+}
+
+static void test_gs_bitmap(struct kunit *test)
+{
+ struct kvmppc_gs_bitmap gsbm = { 0 };
+ struct kvmppc_gs_bitmap gsbm1 = { 0 };
+ struct kvmppc_gs_bitmap gsbm2 = { 0 };
+ u16 iden;
+ int i, j;
+
+ i = 0;
+ for (u16 iden = KVMPPC_GSID_HOST_STATE_SIZE;
+ iden <= KVMPPC_GSID_PROCESS_TABLE; iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ for (u16 iden = KVMPPC_GSID_RUN_INPUT; iden <= KVMPPC_GSID_VPA;
+ iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSID_CTRL; iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ for (u16 iden = KVMPPC_GSID_CR; iden <= KVMPPC_GSID_PSPB; iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ for (u16 iden = KVMPPC_GSID_VSRS(0); iden <= KVMPPC_GSID_VSRS(63);
+ iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ for (u16 iden = KVMPPC_GSID_HDAR; iden <= KVMPPC_GSID_ASDR; iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ j = 0;
+ kvmppc_gsbm_for_each(&gsbm1, iden)
+ {
+ kvmppc_gsbm_set(&gsbm2, iden);
+ j++;
+ }
+ KUNIT_EXPECT_EQ(test, i, j);
+ KUNIT_EXPECT_MEMEQ(test, &gsbm1, &gsbm2, sizeof(gsbm1));
+}
+
+struct kvmppc_gs_msg_test1_data {
+ u64 a;
+ u32 b;
+ struct kvmppc_gs_part_table c;
+ struct kvmppc_gs_proc_table d;
+ struct kvmppc_gs_buff_info e;
+};
+
+static size_t test1_get_size(struct kvmppc_gs_msg *gsm)
+{
+ size_t size = 0;
+ u16 ids[] = {
+ KVMPPC_GSID_PARTITION_TABLE,
+ KVMPPC_GSID_PROCESS_TABLE,
+ KVMPPC_GSID_RUN_INPUT,
+ KVMPPC_GSID_GPR(0),
+ KVMPPC_GSID_CR,
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(ids); i++)
+ size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
+ return size;
+}
+
+static int test1_fill_info(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ struct kvmppc_gs_msg_test1_data *data = gsm->data;
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_GPR(0)))
+ kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(0), data->a);
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_CR))
+ kvmppc_gse_put_u32(gsb, KVMPPC_GSID_CR, data->b);
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_PARTITION_TABLE))
+ kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE,
+ data->c);
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_PROCESS_TABLE))
+ kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PARTITION_TABLE,
+ data->d);
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT))
+ kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT, data->e);
+
+ return 0;
+}
+
+static int test1_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb)
+{
+ struct kvmppc_gs_parser gsp = { 0 };
+ struct kvmppc_gs_msg_test1_data *data = gsm->data;
+ struct kvmppc_gs_elem *gse;
+ int rc;
+
+ rc = kvmppc_gse_parse(&gsp, gsb);
+ if (rc < 0)
+ return rc;
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_GPR(0));
+ if (gse)
+ data->a = kvmppc_gse_get_u64(gse);
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_CR);
+ if (gse)
+ data->b = kvmppc_gse_get_u32(gse);
+
+ return 0;
+}
+
+static struct kvmppc_gs_msg_ops gs_msg_test1_ops = {
+ .get_size = test1_get_size,
+ .fill_info = test1_fill_info,
+ .refresh_info = test1_refresh_info,
+};
+
+static void test_gs_msg(struct kunit *test)
+{
+ struct kvmppc_gs_msg_test1_data test1_data = {
+ .a = 0xdeadbeef,
+ .b = 0x1,
+ };
+ struct kvmppc_gs_msg *gsm;
+ struct kvmppc_gs_buff *gsb;
+
+ gsm = kvmppc_gsm_new(&gs_msg_test1_ops, &test1_data, GSM_SEND,
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsm);
+
+ gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), 0, 0, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
+
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_PARTITION_TABLE);
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_PROCESS_TABLE);
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_RUN_INPUT);
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_GPR(0));
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_CR);
+
+ kvmppc_gsm_fill_info(gsm, gsb);
+
+ memset(&test1_data, 0, sizeof(test1_data));
+
+ kvmppc_gsm_refresh_info(gsm, gsb);
+ KUNIT_EXPECT_EQ(test, test1_data.a, 0xdeadbeef);
+ KUNIT_EXPECT_EQ(test, test1_data.b, 0x1);
+
+ kvmppc_gsm_free(gsm);
+}
+
+static struct kunit_case guest_state_buffer_testcases[] = {
+ KUNIT_CASE(test_creating_buffer),
+ KUNIT_CASE(test_adding_element),
+ KUNIT_CASE(test_gs_bitmap),
+ KUNIT_CASE(test_gs_parsing),
+ KUNIT_CASE(test_gs_msg),
+ {}
+};
+
+static struct kunit_suite guest_state_buffer_test_suite = {
+ .name = "guest_state_buffer_test",
+ .test_cases = guest_state_buffer_testcases,
+};
+
+kunit_test_suites(&guest_state_buffer_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index b00112d7ad..c6ab46156c 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -38,6 +38,7 @@ static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr
return 0;
failed:
+ mb(); /* sync */
return -EPERM;
}
@@ -204,9 +205,6 @@ void __init poking_init(void)
{
int ret;
- if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
- return;
-
if (mm_patch_enabled())
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"powerpc/text_poke_mm:online",
@@ -309,10 +307,6 @@ static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
err = __patch_instruction(addr, instr, patch_addr);
- /* hwsync performed by __patch_instruction (sync) if successful */
- if (err)
- mb(); /* sync */
-
/* context synchronisation performed by __patch_instruction (isync or exception) */
stop_using_temp_mm(patching_mm, orig_mm);
@@ -378,6 +372,144 @@ int patch_instruction(u32 *addr, ppc_inst_t instr)
}
NOKPROBE_SYMBOL(patch_instruction);
+static int __patch_instructions(u32 *patch_addr, u32 *code, size_t len, bool repeat_instr)
+{
+ unsigned long start = (unsigned long)patch_addr;
+
+ /* Repeat instruction */
+ if (repeat_instr) {
+ ppc_inst_t instr = ppc_inst_read(code);
+
+ if (ppc_inst_prefixed(instr)) {
+ u64 val = ppc_inst_as_ulong(instr);
+
+ memset64((u64 *)patch_addr, val, len / 8);
+ } else {
+ u32 val = ppc_inst_val(instr);
+
+ memset32(patch_addr, val, len / 4);
+ }
+ } else {
+ memcpy(patch_addr, code, len);
+ }
+
+ smp_wmb(); /* smp write barrier */
+ flush_icache_range(start, start + len);
+ return 0;
+}
+
+/*
+ * A page is mapped and instructions that fit the page are patched.
+ * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
+ */
+static int __do_patch_instructions_mm(u32 *addr, u32 *code, size_t len, bool repeat_instr)
+{
+ struct mm_struct *patching_mm, *orig_mm;
+ unsigned long pfn = get_patch_pfn(addr);
+ unsigned long text_poke_addr;
+ spinlock_t *ptl;
+ u32 *patch_addr;
+ pte_t *pte;
+ int err;
+
+ patching_mm = __this_cpu_read(cpu_patching_context.mm);
+ text_poke_addr = __this_cpu_read(cpu_patching_context.addr);
+ patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
+
+ pte = get_locked_pte(patching_mm, text_poke_addr, &ptl);
+ if (!pte)
+ return -ENOMEM;
+
+ __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
+
+ /* order PTE update before use, also serves as the hwsync */
+ asm volatile("ptesync" ::: "memory");
+
+ /* order context switch after arbitrary prior code */
+ isync();
+
+ orig_mm = start_using_temp_mm(patching_mm);
+
+ err = __patch_instructions(patch_addr, code, len, repeat_instr);
+
+ /* context synchronisation performed by __patch_instructions */
+ stop_using_temp_mm(patching_mm, orig_mm);
+
+ pte_clear(patching_mm, text_poke_addr, pte);
+ /*
+ * ptesync to order PTE update before TLB invalidation done
+ * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
+ */
+ local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize);
+
+ pte_unmap_unlock(pte, ptl);
+
+ return err;
+}
+
+/*
+ * A page is mapped and instructions that fit the page are patched.
+ * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
+ */
+static int __do_patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr)
+{
+ unsigned long pfn = get_patch_pfn(addr);
+ unsigned long text_poke_addr;
+ u32 *patch_addr;
+ pte_t *pte;
+ int err;
+
+ text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK;
+ patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
+
+ pte = __this_cpu_read(cpu_patching_context.pte);
+ __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
+ /* See ptesync comment in radix__set_pte_at() */
+ if (radix_enabled())
+ asm volatile("ptesync" ::: "memory");
+
+ err = __patch_instructions(patch_addr, code, len, repeat_instr);
+
+ pte_clear(&init_mm, text_poke_addr, pte);
+ flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
+
+ return err;
+}
+
+/*
+ * Patch 'addr' with 'len' bytes of instructions from 'code'.
+ *
+ * If repeat_instr is true, the same instruction is filled for
+ * 'len' bytes.
+ */
+int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr)
+{
+ while (len > 0) {
+ unsigned long flags;
+ size_t plen;
+ int err;
+
+ plen = min_t(size_t, PAGE_SIZE - offset_in_page(addr), len);
+
+ local_irq_save(flags);
+ if (mm_patch_enabled())
+ err = __do_patch_instructions_mm(addr, code, plen, repeat_instr);
+ else
+ err = __do_patch_instructions(addr, code, plen, repeat_instr);
+ local_irq_restore(flags);
+ if (err)
+ return err;
+
+ len -= plen;
+ addr = (u32 *)((unsigned long)addr + plen);
+ if (!repeat_instr)
+ code = (u32 *)((unsigned long)code + plen);
+ }
+
+ return 0;
+}
+NOKPROBE_SYMBOL(patch_instructions);
+
int patch_branch(u32 *addr, unsigned long target, int flags)
{
ppc_inst_t instr;
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 6dd2f46bd3..5de4dd549f 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -16,7 +16,8 @@ struct qnode {
struct qnode *next;
struct qspinlock *lock;
int cpu;
- int yield_cpu;
+ u8 sleepy; /* 1 if the previous vCPU was preempted or
+ * if the previous node was sleepy */
u8 locked; /* 1 if lock acquired */
};
@@ -43,7 +44,7 @@ static bool pv_sleepy_lock_sticky __read_mostly = false;
static u64 pv_sleepy_lock_interval_ns __read_mostly = 0;
static int pv_sleepy_lock_factor __read_mostly = 256;
static bool pv_yield_prev __read_mostly = true;
-static bool pv_yield_propagate_owner __read_mostly = true;
+static bool pv_yield_sleepy_owner __read_mostly = true;
static bool pv_prod_head __read_mostly = false;
static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes);
@@ -247,22 +248,18 @@ static __always_inline void seen_sleepy_lock(void)
this_cpu_write(sleepy_lock_seen_clock, sched_clock());
}
-static __always_inline void seen_sleepy_node(struct qspinlock *lock, u32 val)
+static __always_inline void seen_sleepy_node(void)
{
if (pv_sleepy_lock) {
if (pv_sleepy_lock_interval_ns)
this_cpu_write(sleepy_lock_seen_clock, sched_clock());
- if (val & _Q_LOCKED_VAL) {
- if (!(val & _Q_SLEEPY_VAL))
- try_set_sleepy(lock, val);
- }
+ /* Don't set sleepy because we likely have a stale val */
}
}
-static struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val)
+static struct qnode *get_tail_qnode(struct qspinlock *lock, int prev_cpu)
{
- int cpu = decode_tail_cpu(val);
- struct qnodes *qnodesp = per_cpu_ptr(&qnodes, cpu);
+ struct qnodes *qnodesp = per_cpu_ptr(&qnodes, prev_cpu);
int idx;
/*
@@ -353,77 +350,66 @@ static __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u
return __yield_to_locked_owner(lock, val, paravirt, mustq);
}
-static __always_inline void propagate_yield_cpu(struct qnode *node, u32 val, int *set_yield_cpu, bool paravirt)
+static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool paravirt)
{
struct qnode *next;
int owner;
if (!paravirt)
return;
- if (!pv_yield_propagate_owner)
- return;
-
- owner = get_owner_cpu(val);
- if (*set_yield_cpu == owner)
+ if (!pv_yield_sleepy_owner)
return;
next = READ_ONCE(node->next);
if (!next)
return;
- if (vcpu_is_preempted(owner)) {
- next->yield_cpu = owner;
- *set_yield_cpu = owner;
- } else if (*set_yield_cpu != -1) {
- next->yield_cpu = owner;
- *set_yield_cpu = owner;
- }
+ if (next->sleepy)
+ return;
+
+ owner = get_owner_cpu(val);
+ if (vcpu_is_preempted(owner))
+ next->sleepy = 1;
}
/* Called inside spin_begin() */
-static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *node, u32 val, bool paravirt)
+static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *node, int prev_cpu, bool paravirt)
{
- int prev_cpu = decode_tail_cpu(val);
u32 yield_count;
- int yield_cpu;
bool preempted = false;
if (!paravirt)
goto relax;
- if (!pv_yield_propagate_owner)
- goto yield_prev;
-
- yield_cpu = READ_ONCE(node->yield_cpu);
- if (yield_cpu == -1) {
- /* Propagate back the -1 CPU */
- if (node->next && node->next->yield_cpu != -1)
- node->next->yield_cpu = yield_cpu;
+ if (!pv_yield_sleepy_owner)
goto yield_prev;
- }
-
- yield_count = yield_count_of(yield_cpu);
- if ((yield_count & 1) == 0)
- goto yield_prev; /* owner vcpu is running */
-
- if (get_owner_cpu(READ_ONCE(lock->val)) != yield_cpu)
- goto yield_prev; /* re-sample lock owner */
- spin_end();
-
- preempted = true;
- seen_sleepy_node(lock, val);
+ /*
+ * If the previous waiter was preempted it might not be able to
+ * propagate sleepy to us, so check the lock in that case too.
+ */
+ if (node->sleepy || vcpu_is_preempted(prev_cpu)) {
+ u32 val = READ_ONCE(lock->val);
- smp_rmb();
+ if (val & _Q_LOCKED_VAL) {
+ if (node->next && !node->next->sleepy) {
+ /*
+ * Propagate sleepy to next waiter. Only if
+ * owner is preempted, which allows the queue
+ * to become "non-sleepy" if vCPU preemption
+ * ceases to occur, even if the lock remains
+ * highly contended.
+ */
+ if (vcpu_is_preempted(get_owner_cpu(val)))
+ node->next->sleepy = 1;
+ }
- if (yield_cpu == node->yield_cpu) {
- if (node->next && node->next->yield_cpu != yield_cpu)
- node->next->yield_cpu = yield_cpu;
- yield_to_preempted(yield_cpu, yield_count);
- spin_begin();
- return preempted;
+ preempted = yield_to_locked_owner(lock, val, paravirt);
+ if (preempted)
+ return preempted;
+ }
+ node->sleepy = false;
}
- spin_begin();
yield_prev:
if (!pv_yield_prev)
@@ -436,7 +422,7 @@ yield_prev:
spin_end();
preempted = true;
- seen_sleepy_node(lock, val);
+ seen_sleepy_node();
smp_rmb(); /* See __yield_to_locked_owner comment */
@@ -546,7 +532,6 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
bool sleepy = false;
bool mustq = false;
int idx;
- int set_yield_cpu = -1;
int iters = 0;
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
@@ -570,7 +555,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
node->next = NULL;
node->lock = lock;
node->cpu = smp_processor_id();
- node->yield_cpu = -1;
+ node->sleepy = 0;
node->locked = 0;
tail = encode_tail_cpu(node->cpu);
@@ -587,7 +572,8 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
* head of the waitqueue.
*/
if (old & _Q_TAIL_CPU_MASK) {
- struct qnode *prev = get_tail_qnode(lock, old);
+ int prev_cpu = decode_tail_cpu(old);
+ struct qnode *prev = get_tail_qnode(lock, prev_cpu);
/* Link @node into the waitqueue. */
WRITE_ONCE(prev->next, node);
@@ -597,16 +583,12 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
while (!READ_ONCE(node->locked)) {
spec_barrier();
- if (yield_to_prev(lock, node, old, paravirt))
+ if (yield_to_prev(lock, node, prev_cpu, paravirt))
seen_preempted = true;
}
spec_barrier();
spin_end();
- /* Clear out stale propagated yield_cpu */
- if (paravirt && pv_yield_propagate_owner && node->yield_cpu != -1)
- node->yield_cpu = -1;
-
smp_rmb(); /* acquire barrier for the mcs lock */
/*
@@ -648,7 +630,7 @@ again:
}
}
- propagate_yield_cpu(node, val, &set_yield_cpu, paravirt);
+ propagate_sleepy(node, val, paravirt);
preempted = yield_head_to_locked_owner(lock, val, paravirt);
if (!maybe_stealers)
continue;
@@ -952,21 +934,21 @@ static int pv_yield_prev_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_prev, pv_yield_prev_get, pv_yield_prev_set, "%llu\n");
-static int pv_yield_propagate_owner_set(void *data, u64 val)
+static int pv_yield_sleepy_owner_set(void *data, u64 val)
{
- pv_yield_propagate_owner = !!val;
+ pv_yield_sleepy_owner = !!val;
return 0;
}
-static int pv_yield_propagate_owner_get(void *data, u64 *val)
+static int pv_yield_sleepy_owner_get(void *data, u64 *val)
{
- *val = pv_yield_propagate_owner;
+ *val = pv_yield_sleepy_owner;
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_propagate_owner, pv_yield_propagate_owner_get, pv_yield_propagate_owner_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_sleepy_owner, pv_yield_sleepy_owner_get, pv_yield_sleepy_owner_set, "%llu\n");
static int pv_prod_head_set(void *data, u64 val)
{
@@ -998,7 +980,7 @@ static __init int spinlock_debugfs_init(void)
debugfs_create_file("qspl_pv_sleepy_lock_interval_ns", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_interval_ns);
debugfs_create_file("qspl_pv_sleepy_lock_factor", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_factor);
debugfs_create_file("qspl_pv_yield_prev", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_prev);
- debugfs_create_file("qspl_pv_yield_propagate_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_propagate_owner);
+ debugfs_create_file("qspl_pv_yield_sleepy_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_sleepy_owner);
debugfs_create_file("qspl_pv_prod_head", 0600, arch_debugfs_dir, NULL, &fops_pv_prod_head);
}
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index a4ab862506..6af97dc0f6 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -586,6 +586,8 @@ static int do_fp_load(struct instruction_op *op, unsigned long ea,
} u;
nb = GETSIZE(op->type);
+ if (nb > sizeof(u))
+ return -EINVAL;
if (!address_ok(regs, ea, nb))
return -EFAULT;
rn = op->reg;
@@ -636,6 +638,8 @@ static int do_fp_store(struct instruction_op *op, unsigned long ea,
} u;
nb = GETSIZE(op->type);
+ if (nb > sizeof(u))
+ return -EINVAL;
if (!address_ok(regs, ea, nb))
return -EFAULT;
rn = op->reg;
@@ -680,6 +684,9 @@ static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
u8 b[sizeof(__vector128)];
} u = {};
+ if (size > sizeof(u))
+ return -EINVAL;
+
if (!address_ok(regs, ea & ~0xfUL, 16))
return -EFAULT;
/* align to multiple of size */
@@ -707,6 +714,9 @@ static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
u8 b[sizeof(__vector128)];
} u;
+ if (size > sizeof(u))
+ return -EINVAL;
+
if (!address_ok(regs, ea & ~0xfUL, 16))
return -EFAULT;
/* align to multiple of size */
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index 8b804e1a9f..4ed0efd03d 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -36,8 +36,9 @@
/*
* Load a PTE into the hash table, if possible.
- * The address is in r4, and r3 contains an access flag:
- * _PAGE_RW (0x400) if a write.
+ * The address is in r4, and r3 contains required access flags:
+ * - For ISI: _PAGE_PRESENT | _PAGE_EXEC
+ * - For DSI: _PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE if a write.
* r9 contains the SRR1 value, from which we use the MSR_PR bit.
* SPRG_THREAD contains the physical address of the current task's thread.
*
@@ -67,12 +68,16 @@ _GLOBAL(hash_page)
lis r0, TASK_SIZE@h /* check if kernel address */
cmplw 0,r4,r0
mfspr r8,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
- ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
lwz r5,PGDIR(r8) /* virt page-table root */
blt+ 112f /* assume user more likely */
lis r5,swapper_pg_dir@ha /* if kernel address, use */
+ andi. r0,r9,MSR_PR /* Check usermode */
addi r5,r5,swapper_pg_dir@l /* kernel page table */
- rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
+#ifdef CONFIG_SMP
+ bne- .Lhash_page_out /* return if usermode */
+#else
+ bnelr-
+#endif
112: tophys(r5, r5)
#ifndef CONFIG_PTE_64BIT
rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
@@ -113,15 +118,15 @@ _GLOBAL(hash_page)
lwarx r6,0,r8 /* get linux-style pte, flag word */
#ifdef CONFIG_PPC_KUAP
mfsrin r5,r4
- rlwinm r0,r9,28,_PAGE_RW /* MSR[PR] => _PAGE_RW */
- rlwinm r5,r5,12,_PAGE_RW /* Ks => _PAGE_RW */
+ rlwinm r0,r9,28,_PAGE_WRITE /* MSR[PR] => _PAGE_WRITE */
+ rlwinm r5,r5,12,_PAGE_WRITE /* Ks => _PAGE_WRITE */
andc r5,r5,r0 /* Ks & ~MSR[PR] */
- andc r5,r6,r5 /* Clear _PAGE_RW when Ks = 1 && MSR[PR] = 0 */
+ andc r5,r6,r5 /* Clear _PAGE_WRITE when Ks = 1 && MSR[PR] = 0 */
andc. r5,r3,r5 /* check access & ~permission */
#else
andc. r5,r3,r6 /* check access & ~permission */
#endif
- rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
+ rlwinm r0,r3,32-3,24,24 /* _PAGE_WRITE access -> _PAGE_DIRTY */
ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
#ifdef CONFIG_SMP
bne- .Lhash_page_out /* return if access not permitted */
@@ -307,12 +312,15 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64)
__REF
_GLOBAL(create_hpte)
/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
- rlwinm r8,r5,32-9,30,30 /* _PAGE_RW -> PP msb */
+ lis r0, TASK_SIZE@h
+ rlwinm r5,r5,0,~3 /* Clear PP bits */
+ cmplw r4,r0
+ rlwinm r8,r5,32-9,30,30 /* _PAGE_WRITE -> PP msb */
rlwinm r0,r5,32-6,30,30 /* _PAGE_DIRTY -> PP msb */
and r8,r8,r0 /* writable if _RW & _DIRTY */
- rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
- rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
- ori r8,r8,0xe04 /* clear out reserved bits */
+ bge- 1f /* Kernelspace ? Skip */
+ ori r5,r5,3 /* Userspace ? PP = 3 */
+1: ori r8,r8,0xe04 /* clear out reserved bits */
andc r8,r5,r8 /* PP = user? (rw&dirty? 1: 3): 0 */
BEGIN_FTR_SECTION
rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 850783cfa9..5445587bfe 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -127,7 +127,7 @@ static void setibat(int index, unsigned long virt, phys_addr_t phys,
wimgxpp = (flags & _PAGE_COHERENT) | (_PAGE_EXEC ? BPP_RX : BPP_XX);
bat[0].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
bat[0].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
- if (flags & _PAGE_USER)
+ if (!is_kernel_addr(virt))
bat[0].batu |= 1; /* Vp = 1 */
}
@@ -277,10 +277,10 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys,
/* Do DBAT first */
wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
| _PAGE_COHERENT | _PAGE_GUARDED);
- wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
+ wimgxpp |= (flags & _PAGE_WRITE) ? BPP_RW : BPP_RX;
bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
- if (flags & _PAGE_USER)
+ if (!is_kernel_addr(virt))
bat[1].batu |= 1; /* Vp = 1 */
if (flags & _PAGE_GUARDED) {
/* G bit must be zero in IBATs */
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 8f8a62d3ff..3438ab72c3 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -542,6 +542,7 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
set_pte_at(vma->vm_mm, addr, ptep, pte);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* For hash translation mode, we use the deposited table to store hash slot
* information and they are stored at PTRS_PER_PMD offset from related pmd
@@ -563,6 +564,7 @@ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
return true;
}
+#endif
/*
* Does the CPU support tlbie?
@@ -635,12 +637,10 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
unsigned long prot;
/* Radix supports execute-only, but protection_map maps X -> RX */
- if (radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)) {
- prot = pgprot_val(PAGE_EXECONLY);
- } else {
- prot = pgprot_val(protection_map[vm_flags &
- (VM_ACCESS_FLAGS | VM_SHARED)]);
- }
+ if (!radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC))
+ vm_flags |= VM_READ;
+
+ prot = pgprot_val(protection_map[vm_flags & (VM_ACCESS_FLAGS | VM_SHARED)]);
if (vm_flags & VM_SAO)
prot |= _PAGE_SAO;
diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
index 2369d1bf24..fde7790277 100644
--- a/arch/powerpc/mm/drmem.c
+++ b/arch/powerpc/mm/drmem.c
@@ -67,7 +67,7 @@ static int drmem_update_dt_v1(struct device_node *memory,
struct property *new_prop;
struct of_drconf_cell_v1 *dr_cell;
struct drmem_lmb *lmb;
- u32 *p;
+ __be32 *p;
new_prop = clone_property(prop, prop->length);
if (!new_prop)
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index b1723094d4..53335ae21a 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -266,14 +266,15 @@ static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma
}
/*
- * VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as
- * defined in protection_map[]. Read faults can only be caused by
- * a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix.
+ * VM_READ, VM_WRITE and VM_EXEC may imply read permissions, as
+ * defined in protection_map[]. In that case Read faults can only be
+ * caused by a PROT_NONE mapping. However a non exec access on a
+ * VM_EXEC only mapping is invalid anyway, so report it as such.
*/
if (unlikely(!vma_is_accessible(vma)))
return true;
- if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)))
+ if ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)
return true;
/*
@@ -496,6 +497,8 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ if (fault & VM_FAULT_MAJOR)
+ flags |= FAULT_FLAG_TRIED;
if (fault_signal_pending(fault, regs))
return user_mode(regs) ? 0 : SIGBUS;
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index 119ef491f7..d3a7726ecf 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -126,7 +126,7 @@ void pgtable_cache_add(unsigned int shift)
* as to leave enough 0 bits in the address to contain it. */
unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
HUGEPD_SHIFT_MASK + 1);
- struct kmem_cache *new;
+ struct kmem_cache *new = NULL;
/* It would be nice if this was a BUILD_BUG_ON(), but at the
* moment, gcc doesn't seem to recognize is_power_of_2 as a
@@ -139,7 +139,8 @@ void pgtable_cache_add(unsigned int shift)
align = max_t(unsigned long, align, minalign);
name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
- new = kmem_cache_create(name, table_size, align, 0, ctor(shift));
+ if (name)
+ new = kmem_cache_create(name, table_size, align, 0, ctor(shift));
if (!new)
panic("Could not allocate pgtable cache for order %d", shift);
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index d8adc452f4..4e71dfe7d0 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -39,6 +39,7 @@
#include <asm/hugetlb.h>
#include <asm/kup.h>
#include <asm/kasan.h>
+#include <asm/fixmap.h>
#include <mm/mmu_decl.h>
diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
index 705e8e8ffd..7b0afcabd8 100644
--- a/arch/powerpc/mm/ioremap.c
+++ b/arch/powerpc/mm/ioremap.c
@@ -50,10 +50,6 @@ void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags)
if (pte_write(pte))
pte = pte_mkdirty(pte);
- /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
- pte = pte_exprotect(pte);
- pte = pte_mkprivileged(pte);
-
if (iowa_is_active())
return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
@@ -66,7 +62,7 @@ int early_ioremap_range(unsigned long ea, phys_addr_t pa,
unsigned long i;
for (i = 0; i < size; i += PAGE_SIZE) {
- int err = map_kernel_page(ea + i, pa + i, prot);
+ int err = map_kernel_page(ea + i, pa + i, pgprot_nx(prot));
if (WARN_ON_ONCE(err)) /* Should clean up */
return err;
diff --git a/arch/powerpc/mm/kasan/init_32.c b/arch/powerpc/mm/kasan/init_32.c
index a70828a6d9..aa9aa11927 100644
--- a/arch/powerpc/mm/kasan/init_32.c
+++ b/arch/powerpc/mm/kasan/init_32.c
@@ -64,6 +64,7 @@ int __init __weak kasan_init_region(void *start, size_t size)
if (ret)
return ret;
+ k_start = k_start & PAGE_MASK;
block = memblock_alloc(k_end - k_start, PAGE_SIZE);
if (!block)
return -ENOMEM;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 07e8f4f1e0..3a440004b9 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -26,6 +26,7 @@
#include <asm/ftrace.h>
#include <asm/code-patching.h>
#include <asm/setup.h>
+#include <asm/fixmap.h>
#include <mm/mmu_decl.h>
@@ -34,18 +35,18 @@ unsigned long long memory_limit;
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
-pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot)
+pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
+ pgprot_t vma_prot)
{
if (ppc_md.phys_mem_access_prot)
- return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
+ return ppc_md.phys_mem_access_prot(pfn, size, vma_prot);
if (!page_is_ram(pfn))
vma_prot = pgprot_noncached(vma_prot);
return vma_prot;
}
-EXPORT_SYMBOL(phys_mem_access_prot);
+EXPORT_SYMBOL(__phys_mem_access_prot);
#ifdef CONFIG_MEMORY_HOTPLUG
static DEFINE_MUTEX(linear_mapping_mutex);
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 7f9ff06401..72341b9fb5 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -181,3 +181,8 @@ static inline bool debug_pagealloc_enabled_or_kfence(void)
{
return IS_ENABLED(CONFIG_KFENCE) || debug_pagealloc_enabled();
}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int create_section_mapping(unsigned long start, unsigned long end,
+ int nid, pgprot_t prot);
+#endif
diff --git a/arch/powerpc/mm/nohash/40x.c b/arch/powerpc/mm/nohash/40x.c
index 3684d6e570..e835e80c09 100644
--- a/arch/powerpc/mm/nohash/40x.c
+++ b/arch/powerpc/mm/nohash/40x.c
@@ -48,20 +48,25 @@
*/
void __init MMU_init_hw(void)
{
+ int i;
+ unsigned long zpr;
+
/*
* The Zone Protection Register (ZPR) defines how protection will
- * be applied to every page which is a member of a given zone. At
- * present, we utilize only two of the 4xx's zones.
+ * be applied to every page which is a member of a given zone.
* The zone index bits (of ZSEL) in the PTE are used for software
- * indicators, except the LSB. For user access, zone 1 is used,
- * for kernel access, zone 0 is used. We set all but zone 1
- * to zero, allowing only kernel access as indicated in the PTE.
- * For zone 1, we set a 01 binary (a value of 10 will not work)
+ * indicators. We use the 4 upper bits of virtual address to select
+ * the zone. We set all zones above TASK_SIZE to zero, allowing
+ * only kernel access as indicated in the PTE. For zones below
+ * TASK_SIZE, we set a 01 binary (a value of 10 will not work)
* to allow user access as indicated in the PTE. This also allows
* kernel access as indicated in the PTE.
*/
- mtspr(SPRN_ZPR, 0x10000000);
+ for (i = 0, zpr = 0; i < TASK_SIZE >> 28; i++)
+ zpr |= 1 << (30 - i * 2);
+
+ mtspr(SPRN_ZPR, zpr);
flush_instruction_cache();
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index a642a79298..6be6421086 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -10,6 +10,8 @@
#include <linux/memblock.h>
#include <linux/hugetlb.h>
+#include <asm/fixmap.h>
+
#include <mm/mmu_decl.h>
#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
diff --git a/arch/powerpc/mm/nohash/book3e_pgtable.c b/arch/powerpc/mm/nohash/book3e_pgtable.c
index b80fc4a91a..1c5e4ecbeb 100644
--- a/arch/powerpc/mm/nohash/book3e_pgtable.c
+++ b/arch/powerpc/mm/nohash/book3e_pgtable.c
@@ -71,7 +71,7 @@ static void __init *early_alloc_pgtable(unsigned long size)
* map_kernel_page adds an entry to the ioremap page table
* and adds an entry to the HPT, possibly bolting it
*/
-int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
+int __ref map_kernel_page(unsigned long ea, phys_addr_t pa, pgprot_t prot)
{
pgd_t *pgdp;
p4d_t *p4dp;
diff --git a/arch/powerpc/mm/nohash/e500.c b/arch/powerpc/mm/nohash/e500.c
index 40a4e69ae1..921c3521ec 100644
--- a/arch/powerpc/mm/nohash/e500.c
+++ b/arch/powerpc/mm/nohash/e500.c
@@ -117,15 +117,15 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR;
- TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_SW : 0;
+ TLBCAM[index].MAS3 |= (flags & _PAGE_WRITE) ? MAS3_SW : 0;
if (mmu_has_feature(MMU_FTR_BIG_PHYS))
TLBCAM[index].MAS7 = (u64)phys >> 32;
/* Below is unlikely -- only for large user pages or similar */
- if (pte_user(__pte(flags))) {
+ if (!is_kernel_addr(virt)) {
TLBCAM[index].MAS3 |= MAS3_UR;
TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0;
- TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0;
+ TLBCAM[index].MAS3 |= (flags & _PAGE_WRITE) ? MAS3_UW : 0;
} else {
TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0;
}
diff --git a/arch/powerpc/mm/nohash/e500_hugetlbpage.c b/arch/powerpc/mm/nohash/e500_hugetlbpage.c
index 6b30e40d45..a134d28a0e 100644
--- a/arch/powerpc/mm/nohash/e500_hugetlbpage.c
+++ b/arch/powerpc/mm/nohash/e500_hugetlbpage.c
@@ -178,8 +178,7 @@ book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
*
* This must always be called with the pte lock held.
*/
-void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep, unsigned int nr)
+void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
if (is_vm_hugetlb_page(vma))
book3e_hugetlb_preload(vma, address, *ptep);
diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
index 2fb3edafe9..b4f2786a7d 100644
--- a/arch/powerpc/mm/nohash/kaslr_booke.c
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -178,7 +178,7 @@ static void __init get_crash_kernel(void *fdt, unsigned long size)
int ret;
ret = parse_crashkernel(boot_command_line, size, &crash_size,
- &crash_base);
+ &crash_base, NULL, NULL);
if (ret != 0 || crash_size == 0)
return;
if (crash_base == 0)
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 4d69bfb9bc..a04ae4449a 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -46,13 +46,13 @@ static inline int is_exec_fault(void)
* and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
* on userspace PTEs
*/
-static inline int pte_looks_normal(pte_t pte)
+static inline int pte_looks_normal(pte_t pte, unsigned long addr)
{
if (pte_present(pte) && !pte_special(pte)) {
if (pte_ci(pte))
return 0;
- if (pte_user(pte))
+ if (!is_kernel_addr(addr))
return 1;
}
return 0;
@@ -79,11 +79,11 @@ static struct folio *maybe_pte_to_folio(pte_t pte)
* support falls into the same category.
*/
-static pte_t set_pte_filter_hash(pte_t pte)
+static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr)
{
pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
- if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
- cpu_has_feature(CPU_FTR_NOEXECUTE))) {
+ if (pte_looks_normal(pte, addr) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
+ cpu_has_feature(CPU_FTR_NOEXECUTE))) {
struct folio *folio = maybe_pte_to_folio(pte);
if (!folio)
return pte;
@@ -97,7 +97,7 @@ static pte_t set_pte_filter_hash(pte_t pte)
#else /* CONFIG_PPC_BOOK3S */
-static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
+static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr) { return pte; }
#endif /* CONFIG_PPC_BOOK3S */
@@ -107,7 +107,7 @@ static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
*
* This is also called once for the folio. So only work with folio->flags here.
*/
-static inline pte_t set_pte_filter(pte_t pte)
+static inline pte_t set_pte_filter(pte_t pte, unsigned long addr)
{
struct folio *folio;
@@ -115,10 +115,10 @@ static inline pte_t set_pte_filter(pte_t pte)
return pte;
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
- return set_pte_filter_hash(pte);
+ return set_pte_filter_hash(pte, addr);
/* No exec permission in the first place, move on */
- if (!pte_exec(pte) || !pte_looks_normal(pte))
+ if (!pte_exec(pte) || !pte_looks_normal(pte, addr))
return pte;
/* If you set _PAGE_EXEC on weird pages you're on your own */
@@ -198,7 +198,7 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
* is called. Filter the pte value and use the filtered value
* to setup all the ptes in the range.
*/
- pte = set_pte_filter(pte);
+ pte = set_pte_filter(pte, addr);
/*
* We don't need to call arch_enter/leave_lazy_mmu_mode()
@@ -314,7 +314,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
*/
VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
- pte = set_pte_filter(pte);
+ pte = set_pte_filter(pte, addr);
val = pte_val(pte);
@@ -505,7 +505,7 @@ const pgprot_t protection_map[16] = {
[VM_READ] = PAGE_READONLY,
[VM_WRITE] = PAGE_COPY,
[VM_WRITE | VM_READ] = PAGE_COPY,
- [VM_EXEC] = PAGE_READONLY_X,
+ [VM_EXEC] = PAGE_EXECONLY_X,
[VM_EXEC | VM_READ] = PAGE_READONLY_X,
[VM_EXEC | VM_WRITE] = PAGE_COPY_X,
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
@@ -513,7 +513,7 @@ const pgprot_t protection_map[16] = {
[VM_SHARED | VM_READ] = PAGE_READONLY,
[VM_SHARED | VM_WRITE] = PAGE_SHARED,
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
- [VM_SHARED | VM_EXEC] = PAGE_READONLY_X,
+ [VM_SHARED | VM_EXEC] = PAGE_EXECONLY_X,
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_X,
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
diff --git a/arch/powerpc/mm/ptdump/8xx.c b/arch/powerpc/mm/ptdump/8xx.c
index fac932eb8f..b5c79b11ea 100644
--- a/arch/powerpc/mm/ptdump/8xx.c
+++ b/arch/powerpc/mm/ptdump/8xx.c
@@ -21,11 +21,6 @@ static const struct flag_info flag_array[] = {
.set = "huge",
.clear = " ",
}, {
- .mask = _PAGE_SH,
- .val = 0,
- .set = "user",
- .clear = " ",
- }, {
.mask = _PAGE_RO | _PAGE_NA,
.val = 0,
.set = "rw",
diff --git a/arch/powerpc/mm/ptdump/shared.c b/arch/powerpc/mm/ptdump/shared.c
index f884760ca5..39c30c62b7 100644
--- a/arch/powerpc/mm/ptdump/shared.c
+++ b/arch/powerpc/mm/ptdump/shared.c
@@ -11,15 +11,15 @@
static const struct flag_info flag_array[] = {
{
- .mask = _PAGE_USER,
- .val = _PAGE_USER,
- .set = "user",
- .clear = " ",
+ .mask = _PAGE_READ,
+ .val = 0,
+ .set = " ",
+ .clear = "r",
}, {
- .mask = _PAGE_RW,
+ .mask = _PAGE_WRITE,
.val = 0,
- .set = "r ",
- .clear = "rw",
+ .set = " ",
+ .clear = "w",
}, {
.mask = _PAGE_EXEC,
.val = _PAGE_EXEC,
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 72b7bb34fa..cdea5dccae 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -36,9 +36,6 @@
EMIT(PPC_RAW_BRANCH(offset)); \
} while (0)
-/* bl (unconditional 'branch' with link) */
-#define PPC_BL(dest) EMIT(PPC_RAW_BL((dest) - (unsigned long)(image + ctx->idx)))
-
/* "cond" here covers BO:BI fields. */
#define PPC_BCC_SHORT(cond, dest) \
do { \
@@ -147,12 +144,6 @@ struct codegen_context {
#define BPF_FIXUP_LEN 2 /* Two instructions => 8 bytes */
#endif
-static inline void bpf_flush_icache(void *start, void *end)
-{
- smp_wmb(); /* smp write barrier */
- flush_icache_range((unsigned long)start, (unsigned long)end);
-}
-
static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
{
return ctx->seen & (1 << (31 - i));
@@ -169,16 +160,17 @@ static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i)
}
void bpf_jit_init_reg_mapping(struct codegen_context *ctx);
-int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func);
-int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
+int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func);
+int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
u32 *addrs, int pass, bool extra_pass);
void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx);
void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx);
void bpf_jit_realloc_regs(struct codegen_context *ctx);
int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr);
-int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
- int insn_idx, int jmp_off, int dst_reg);
+int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass,
+ struct codegen_context *ctx, int insn_idx,
+ int jmp_off, int dst_reg);
#endif
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 37043dfc1a..0f9a217833 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -13,9 +13,13 @@
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
-#include <asm/kprobes.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
#include <linux/bpf.h>
+#include <asm/kprobes.h>
+#include <asm/code-patching.h>
+
#include "bpf_jit.h"
static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
@@ -39,10 +43,13 @@ int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg,
return 0;
}
-struct powerpc64_jit_data {
- struct bpf_binary_header *header;
+struct powerpc_jit_data {
+ /* address of rw header */
+ struct bpf_binary_header *hdr;
+ /* address of ro final header */
+ struct bpf_binary_header *fhdr;
u32 *addrs;
- u8 *image;
+ u8 *fimage;
u32 proglen;
struct codegen_context ctx;
};
@@ -59,15 +66,18 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
u8 *image = NULL;
u32 *code_base;
u32 *addrs;
- struct powerpc64_jit_data *jit_data;
+ struct powerpc_jit_data *jit_data;
struct codegen_context cgctx;
int pass;
int flen;
- struct bpf_binary_header *bpf_hdr;
+ struct bpf_binary_header *fhdr = NULL;
+ struct bpf_binary_header *hdr = NULL;
struct bpf_prog *org_fp = fp;
struct bpf_prog *tmp_fp;
bool bpf_blinded = false;
bool extra_pass = false;
+ u8 *fimage = NULL;
+ u32 *fcode_base;
u32 extable_len;
u32 fixup_len;
@@ -97,9 +107,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
addrs = jit_data->addrs;
if (addrs) {
cgctx = jit_data->ctx;
- image = jit_data->image;
- bpf_hdr = jit_data->header;
+ /*
+ * JIT compiled to a writable location (image/code_base) first.
+ * It is then moved to the readonly final location (fimage/fcode_base)
+ * using instruction patching.
+ */
+ fimage = jit_data->fimage;
+ fhdr = jit_data->fhdr;
proglen = jit_data->proglen;
+ hdr = jit_data->hdr;
+ image = (void *)hdr + ((void *)fimage - (void *)fhdr);
extra_pass = true;
/* During extra pass, ensure index is reset before repopulating extable entries */
cgctx.exentry_idx = 0;
@@ -119,7 +136,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
/* Scouting faux-generate pass 0 */
- if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0, false)) {
+ if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
/* We hit something illegal or unsupported. */
fp = org_fp;
goto out_addrs;
@@ -134,7 +151,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
*/
if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
cgctx.idx = 0;
- if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0, false)) {
+ if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
fp = org_fp;
goto out_addrs;
}
@@ -146,9 +163,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
* update ctgtx.idx as it pretends to output instructions, then we can
* calculate total size from idx.
*/
- bpf_jit_build_prologue(0, &cgctx);
+ bpf_jit_build_prologue(NULL, &cgctx);
addrs[fp->len] = cgctx.idx * 4;
- bpf_jit_build_epilogue(0, &cgctx);
+ bpf_jit_build_epilogue(NULL, &cgctx);
fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4;
extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry);
@@ -156,17 +173,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
proglen = cgctx.idx * 4;
alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len;
- bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns);
- if (!bpf_hdr) {
+ fhdr = bpf_jit_binary_pack_alloc(alloclen, &fimage, 4, &hdr, &image,
+ bpf_jit_fill_ill_insns);
+ if (!fhdr) {
fp = org_fp;
goto out_addrs;
}
if (extable_len)
- fp->aux->extable = (void *)image + FUNCTION_DESCR_SIZE + proglen + fixup_len;
+ fp->aux->extable = (void *)fimage + FUNCTION_DESCR_SIZE + proglen + fixup_len;
skip_init_ctx:
code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
+ fcode_base = (u32 *)(fimage + FUNCTION_DESCR_SIZE);
/* Code generation passes 1-2 */
for (pass = 1; pass < 3; pass++) {
@@ -174,8 +193,10 @@ skip_init_ctx:
cgctx.idx = 0;
cgctx.alt_exit_addr = 0;
bpf_jit_build_prologue(code_base, &cgctx);
- if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, pass, extra_pass)) {
- bpf_jit_binary_free(bpf_hdr);
+ if (bpf_jit_build_body(fp, code_base, fcode_base, &cgctx, addrs, pass,
+ extra_pass)) {
+ bpf_arch_text_copy(&fhdr->size, &hdr->size, sizeof(hdr->size));
+ bpf_jit_binary_pack_free(fhdr, hdr);
fp = org_fp;
goto out_addrs;
}
@@ -195,17 +216,19 @@ skip_init_ctx:
#ifdef CONFIG_PPC64_ELF_ABI_V1
/* Function descriptor nastiness: Address + TOC */
- ((u64 *)image)[0] = (u64)code_base;
+ ((u64 *)image)[0] = (u64)fcode_base;
((u64 *)image)[1] = local_paca->kernel_toc;
#endif
- fp->bpf_func = (void *)image;
+ fp->bpf_func = (void *)fimage;
fp->jited = 1;
fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
- bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + bpf_hdr->size);
if (!fp->is_func || extra_pass) {
- bpf_jit_binary_lock_ro(bpf_hdr);
+ if (bpf_jit_binary_pack_finalize(fp, fhdr, hdr)) {
+ fp = org_fp;
+ goto out_addrs;
+ }
bpf_prog_fill_jited_linfo(fp, addrs);
out_addrs:
kfree(addrs);
@@ -215,8 +238,9 @@ out_addrs:
jit_data->addrs = addrs;
jit_data->ctx = cgctx;
jit_data->proglen = proglen;
- jit_data->image = image;
- jit_data->header = bpf_hdr;
+ jit_data->fimage = fimage;
+ jit_data->fhdr = fhdr;
+ jit_data->hdr = hdr;
}
out:
@@ -230,12 +254,13 @@ out:
* The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling
* this function, as this only applies to BPF_PROBE_MEM, for now.
*/
-int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
- int insn_idx, int jmp_off, int dst_reg)
+int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass,
+ struct codegen_context *ctx, int insn_idx, int jmp_off,
+ int dst_reg)
{
off_t offset;
unsigned long pc;
- struct exception_table_entry *ex;
+ struct exception_table_entry *ex, *ex_entry;
u32 *fixup;
/* Populate extable entries only in the last pass */
@@ -246,9 +271,16 @@ int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct code
WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries))
return -EINVAL;
+ /*
+ * Program is first written to image before copying to the
+ * final location (fimage). Accordingly, update in the image first.
+ * As all offsets used are relative, copying as is to the
+ * final location should be alright.
+ */
pc = (unsigned long)&image[insn_idx];
+ ex = (void *)fp->aux->extable - (void *)fimage + (void *)image;
- fixup = (void *)fp->aux->extable -
+ fixup = (void *)ex -
(fp->aux->num_exentries * BPF_FIXUP_LEN * 4) +
(ctx->exentry_idx * BPF_FIXUP_LEN * 4);
@@ -259,18 +291,71 @@ int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct code
fixup[BPF_FIXUP_LEN - 1] =
PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]);
- ex = &fp->aux->extable[ctx->exentry_idx];
+ ex_entry = &ex[ctx->exentry_idx];
- offset = pc - (long)&ex->insn;
+ offset = pc - (long)&ex_entry->insn;
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
return -ERANGE;
- ex->insn = offset;
+ ex_entry->insn = offset;
- offset = (long)fixup - (long)&ex->fixup;
+ offset = (long)fixup - (long)&ex_entry->fixup;
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
return -ERANGE;
- ex->fixup = offset;
+ ex_entry->fixup = offset;
ctx->exentry_idx++;
return 0;
}
+
+void *bpf_arch_text_copy(void *dst, void *src, size_t len)
+{
+ int err;
+
+ if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst)))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&text_mutex);
+ err = patch_instructions(dst, src, len, false);
+ mutex_unlock(&text_mutex);
+
+ return err ? ERR_PTR(err) : dst;
+}
+
+int bpf_arch_text_invalidate(void *dst, size_t len)
+{
+ u32 insn = BREAKPOINT_INSTRUCTION;
+ int ret;
+
+ if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst)))
+ return -EINVAL;
+
+ mutex_lock(&text_mutex);
+ ret = patch_instructions(dst, &insn, len, true);
+ mutex_unlock(&text_mutex);
+
+ return ret;
+}
+
+void bpf_jit_free(struct bpf_prog *fp)
+{
+ if (fp->jited) {
+ struct powerpc_jit_data *jit_data = fp->aux->jit_data;
+ struct bpf_binary_header *hdr;
+
+ /*
+ * If we fail the final pass of JIT (from jit_subprogs),
+ * the program may not be finalized yet. Call finalize here
+ * before freeing it.
+ */
+ if (jit_data) {
+ bpf_jit_binary_pack_finalize(fp, jit_data->fhdr, jit_data->hdr);
+ kvfree(jit_data->addrs);
+ kfree(jit_data);
+ }
+ hdr = bpf_jit_binary_pack_hdr(fp);
+ bpf_jit_binary_pack_free(hdr, NULL);
+ WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
+ }
+
+ bpf_prog_unlock_free(fp);
+}
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index 7f91ea064c..2f39c50ca7 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -200,12 +200,13 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
EMIT(PPC_RAW_BLR());
}
-int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
+/* Relative offset needs to be calculated based on final image location */
+int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
{
- s32 rel = (s32)func - (s32)(image + ctx->idx);
+ s32 rel = (s32)func - (s32)(fimage + ctx->idx);
if (image && rel < 0x2000000 && rel >= -0x2000000) {
- PPC_BL(func);
+ EMIT(PPC_RAW_BL(rel));
} else {
/* Load function address into r0 */
EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
@@ -278,7 +279,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
}
/* Assemble the body code between the prologue & epilogue */
-int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
+int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
u32 *addrs, int pass, bool extra_pass)
{
const struct bpf_insn *insn = fp->insnsi;
@@ -940,7 +941,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
* !fp->aux->verifier_zext. Emit NOP otherwise.
*
* Note that "li reg_h,0" is emitted for BPF_B/H/W case,
- * if necessary. So, jump there insted of emitting an
+ * if necessary. So, jump there instead of emitting an
* additional "li reg_h,0" instruction.
*/
if (size == BPF_DW && !fp->aux->verifier_zext)
@@ -997,7 +998,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
jmp_off += 4;
}
- ret = bpf_add_extable_entry(fp, image, pass, ctx, insn_idx,
+ ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, insn_idx,
jmp_off, dst_reg);
if (ret)
return ret;
@@ -1053,7 +1054,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5), _R1, 12));
}
- ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+ ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
if (ret)
return ret;
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 0f8048f6da..79f23974a3 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -240,7 +240,7 @@ static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u
return 0;
}
-int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
+int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
{
unsigned int i, ctx_idx = ctx->idx;
@@ -361,7 +361,7 @@ asm (
);
/* Assemble the body code between the prologue & epilogue */
-int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
+int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
u32 *addrs, int pass, bool extra_pass)
{
enum stf_barrier_type stf_barrier = stf_barrier_type_get();
@@ -940,8 +940,8 @@ emit_clear:
addrs[++i] = ctx->idx * 4;
if (BPF_MODE(code) == BPF_PROBE_MEM) {
- ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1,
- 4, dst_reg);
+ ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
+ ctx->idx - 1, 4, dst_reg);
if (ret)
return ret;
}
@@ -995,7 +995,7 @@ emit_clear:
if (func_addr_fixed)
ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
else
- ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+ ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
if (ret)
return ret;
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 3449be7c0d..057ec2e345 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -1338,7 +1338,7 @@ static int get_count_from_result(struct perf_event *event,
for (i = count = 0, element_data = res->elements + data_offset;
i < num_elements;
i++, element_data += data_size + data_offset)
- count += be64_to_cpu(*((u64 *) element_data));
+ count += be64_to_cpu(*((__be64 *)element_data));
*countp = count;
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 56d82f7f97..8664a7d297 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -550,7 +550,7 @@ static int nest_imc_event_init(struct perf_event *event)
break;
}
pcni++;
- } while (pcni->vbase != 0);
+ } while (pcni->vbase);
if (!flag)
return -ENODEV;
@@ -1031,16 +1031,16 @@ static bool is_thread_imc_pmu(struct perf_event *event)
return false;
}
-static u64 * get_event_base_addr(struct perf_event *event)
+static __be64 *get_event_base_addr(struct perf_event *event)
{
u64 addr;
if (is_thread_imc_pmu(event)) {
addr = (u64)per_cpu(thread_imc_mem, smp_processor_id());
- return (u64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK));
+ return (__be64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK));
}
- return (u64 *)event->hw.event_base;
+ return (__be64 *)event->hw.event_base;
}
static void thread_imc_pmu_start_txn(struct pmu *pmu,
@@ -1064,7 +1064,8 @@ static int thread_imc_pmu_commit_txn(struct pmu *pmu)
static u64 imc_read_counter(struct perf_event *event)
{
- u64 *addr, data;
+ __be64 *addr;
+ u64 data;
/*
* In-Memory Collection (IMC) counters are free flowing counters.
diff --git a/arch/powerpc/perf/power6-pmu.c b/arch/powerpc/perf/power6-pmu.c
index 5729b6e059..9f720b522e 100644
--- a/arch/powerpc/perf/power6-pmu.c
+++ b/arch/powerpc/perf/power6-pmu.c
@@ -335,26 +335,38 @@ static const unsigned int event_alternatives[][MAX_ALT] = {
{ 0x3000fe, 0x400056 }, /* PM_DATA_FROM_L3MISS */
};
-/*
- * This could be made more efficient with a binary search on
- * a presorted list, if necessary
- */
static int find_alternatives_list(u64 event)
{
- int i, j;
- unsigned int alt;
-
- for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
- if (event < event_alternatives[i][0])
- return -1;
- for (j = 0; j < MAX_ALT; ++j) {
- alt = event_alternatives[i][j];
- if (!alt || event < alt)
- break;
- if (event == alt)
- return i;
- }
+ const unsigned int presorted_event_table[] = {
+ 0x0130e8, 0x080080, 0x080088, 0x10000a, 0x10000b, 0x10000d, 0x10000e,
+ 0x100010, 0x10001a, 0x100026, 0x100054, 0x100056, 0x1000f0, 0x1000f8,
+ 0x1000fc, 0x200008, 0x20000e, 0x200010, 0x200012, 0x200054, 0x2000f0,
+ 0x2000f2, 0x2000f4, 0x2000f5, 0x2000f6, 0x2000f8, 0x2000fc, 0x2000fe,
+ 0x2d0030, 0x30000a, 0x30000c, 0x300010, 0x300012, 0x30001a, 0x300056,
+ 0x3000f0, 0x3000f2, 0x3000f6, 0x3000f8, 0x3000fc, 0x3000fe, 0x400006,
+ 0x400007, 0x40000a, 0x40000e, 0x400010, 0x400018, 0x400056, 0x4000f0,
+ 0x4000f8, 0x600005
+ };
+ const unsigned int event_index_table[] = {
+ 0, 1, 2, 3, 4, 1, 5, 6, 7, 8, 9, 10, 11, 12, 13, 12, 14,
+ 7, 15, 2, 9, 16, 3, 4, 0, 17, 10, 18, 19, 20, 1, 17, 15, 19,
+ 18, 2, 16, 21, 8, 0, 22, 13, 14, 11, 21, 5, 20, 22, 1, 6, 3
+ };
+ int hi = ARRAY_SIZE(presorted_event_table) - 1;
+ int lo = 0;
+
+ while (lo <= hi) {
+ int mid = lo + (hi - lo) / 2;
+ unsigned int alt = presorted_event_table[mid];
+
+ if (alt < event)
+ lo = mid + 1;
+ else if (alt > event)
+ hi = mid - 1;
+ else
+ return event_index_table[mid];
}
+
return -1;
}
diff --git a/arch/powerpc/platforms/4xx/soc.c b/arch/powerpc/platforms/4xx/soc.c
index b2d940437a..5412e6b21e 100644
--- a/arch/powerpc/platforms/4xx/soc.c
+++ b/arch/powerpc/platforms/4xx/soc.c
@@ -112,7 +112,7 @@ static int __init ppc4xx_l2c_probe(void)
}
/* Install error handler */
- if (request_irq(irq, l2c_error_handler, 0, "L2C", 0) < 0) {
+ if (request_irq(irq, l2c_error_handler, 0, "L2C", NULL) < 0) {
printk(KERN_ERR "Cannot install L2C error handler"
", cache is not enabled\n");
of_node_put(np);
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index 2fb2a85d13..1135c1ab92 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -14,6 +14,8 @@
#include <asm/io.h>
#include <asm/hw_irq.h>
#include <asm/ipic.h>
+#include <asm/fixmap.h>
+
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c
index ebb5f6a27d..b24d4102fb 100644
--- a/arch/powerpc/platforms/8xx/cpm1.c
+++ b/arch/powerpc/platforms/8xx/cpm1.c
@@ -40,6 +40,7 @@
#include <asm/io.h>
#include <asm/rheap.h>
#include <asm/cpm.h>
+#include <asm/fixmap.h>
#include <sysdev/fsl_soc.h>
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 1a58761801..18daafbe2e 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -66,7 +66,7 @@ static int match_context(const void *v, struct file *file, unsigned fd)
*/
static struct spu_context *coredump_next_context(int *fd)
{
- struct spu_context *ctx;
+ struct spu_context *ctx = NULL;
struct file *file;
int n = iterate_fd(current->files, *fd, match_context, NULL);
if (!n)
@@ -74,10 +74,13 @@ static struct spu_context *coredump_next_context(int *fd)
*fd = n - 1;
rcu_read_lock();
- file = lookup_fd_rcu(*fd);
- ctx = SPUFS_I(file_inode(file))->i_ctx;
- get_spu_context(ctx);
+ file = lookup_fdget_rcu(*fd);
rcu_read_unlock();
+ if (file) {
+ ctx = SPUFS_I(file_inode(file))->i_ctx;
+ get_spu_context(ctx);
+ fput(file);
+ }
return ctx;
}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 38c5be34c8..10c1320adf 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -86,7 +86,7 @@ spufs_new_inode(struct super_block *sb, umode_t mode)
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
out:
return inode;
}
diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig
index 130707ec9f..8bdae0caf2 100644
--- a/arch/powerpc/platforms/powermac/Kconfig
+++ b/arch/powerpc/platforms/powermac/Kconfig
@@ -2,6 +2,7 @@
config PPC_PMAC
bool "Apple PowerMac based machines"
depends on PPC_BOOK3S && CPU_BIG_ENDIAN
+ select ADB_CUDA if POWER_RESET && PPC32
select MPIC
select FORCE_PCI
select PPC_INDIRECT_PCI if PPC32
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index ae62d432db..81c9fbae88 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -2614,7 +2614,8 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ
struct device_node* node;
int i;
volatile u32 __iomem *base;
- const u32 *addrp, *revp;
+ const __be32 *addrp;
+ const u32 *revp;
phys_addr_t addr;
u64 size;
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index 40f3aa432f..c097d59167 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -925,8 +925,10 @@ static void __init smu_i2c_probe(void)
sz = sizeof(struct pmac_i2c_bus) + sizeof(struct smu_i2c_cmd);
bus = kzalloc(sz, GFP_KERNEL);
- if (bus == NULL)
+ if (bus == NULL) {
+ of_node_put(busnode);
return;
+ }
bus->controller = controller;
bus->busnode = of_node_get(busnode);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 8be71920e6..c83d1e1407 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -598,8 +598,10 @@ static void __init smp_core99_setup_i2c_hwsync(int ncpus)
name = "Pulsar";
break;
}
- if (pmac_tb_freeze != NULL)
+ if (pmac_tb_freeze != NULL) {
+ of_node_put(cc);
break;
+ }
}
if (pmac_tb_freeze != NULL) {
/* Open i2c bus for synchronous access */
diff --git a/arch/powerpc/platforms/powernv/opal-fadump.h b/arch/powerpc/platforms/powernv/opal-fadump.h
index 3f715efb0a..5eeb794b5e 100644
--- a/arch/powerpc/platforms/powernv/opal-fadump.h
+++ b/arch/powerpc/platforms/powernv/opal-fadump.h
@@ -135,7 +135,7 @@ static inline void opal_fadump_read_regs(char *bufp, unsigned int regs_cnt,
for (i = 0; i < regs_cnt; i++, bufp += reg_entry_size) {
reg_entry = (struct hdat_fadump_reg_entry *)bufp;
val = (cpu_endian ? be64_to_cpu(reg_entry->reg_val) :
- (u64)(reg_entry->reg_val));
+ (u64 __force)(reg_entry->reg_val));
opal_fadump_set_regval_regnum(regs,
be32_to_cpu(reg_entry->reg_type),
be32_to_cpu(reg_entry->reg_num),
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 4ebf2ef284..afc0f6a613 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -164,6 +164,12 @@ config PSERIES_PLPKS
# This option is selected by in-kernel consumers that require
# access to the PKS.
+config PSERIES_PLPKS_SED
+ depends on PPC_PSERIES
+ bool
+ # This option is selected by in-kernel consumers that require
+ # access to the SED PKS keystore.
+
config PAPR_SCM
depends on PPC_PSERIES && MEMORY_HOTPLUG && LIBNVDIMM
tristate "Support for the PAPR Storage Class Memory interface"
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 53c3b91af2..1476c5e443 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_PPC_SVM) += svm.o
obj-$(CONFIG_FA_DUMP) += rtas-fadump.o
obj-$(CONFIG_PSERIES_PLPKS) += plpks.o
obj-$(CONFIG_PPC_SECURE_BOOT) += plpks-secvar.o
+obj-$(CONFIG_PSERIES_PLPKS_SED) += plpks_sed_ops.o
obj-$(CONFIG_SUSPEND) += suspend.o
obj-$(CONFIG_PPC_VAS) += vas.o vas-sysfs.o
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 4adca5b61d..6f2eebae7b 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -55,7 +55,8 @@ static bool find_aa_index(struct device_node *dr_node,
struct property *ala_prop,
const u32 *lmb_assoc, u32 *aa_index)
{
- u32 *assoc_arrays, new_prop_size;
+ __be32 *assoc_arrays;
+ u32 new_prop_size;
struct property *new_prop;
int aa_arrays, aa_array_entries, aa_array_sz;
int i, index;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index d4d6de0628..4e9916bb03 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -192,9 +192,9 @@ static void free_dtl_buffers(unsigned long *time_limit)
continue;
kmem_cache_free(dtl_cache, pp->dispatch_log);
pp->dtl_ridx = 0;
- pp->dispatch_log = 0;
- pp->dispatch_log_end = 0;
- pp->dtl_curr = 0;
+ pp->dispatch_log = NULL;
+ pp->dispatch_log_end = NULL;
+ pp->dtl_curr = NULL;
if (time_limit && time_after(jiffies, *time_limit)) {
cond_resched();
@@ -223,7 +223,7 @@ static void destroy_cpu_associativity(void)
{
kfree(vcpu_associativity);
kfree(pcpu_associativity);
- vcpu_associativity = pcpu_associativity = 0;
+ vcpu_associativity = pcpu_associativity = NULL;
}
static __be32 *__get_cpu_associativity(int cpu, __be32 *cpu_assoc, int flag)
@@ -662,8 +662,12 @@ u64 pseries_paravirt_steal_clock(int cpu)
{
struct lppaca *lppaca = &lppaca_of(cpu);
- return be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) +
- be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb));
+ /*
+ * VPA steal time counters are reported at TB frequency. Hence do a
+ * conversion to ns before returning
+ */
+ return tb_to_ns(be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) +
+ be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb)));
}
#endif
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 0161226d8f..1798f0f14d 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -61,7 +61,6 @@ static struct ctl_table nmi_wd_lpm_factor_ctl_table[] = {
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
},
- {}
};
static int __init register_nmi_wd_lpm_factor_sysctl(void)
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 4ba8245681..4448386268 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -35,6 +35,8 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
pseries_msi_allocate_domains(phb);
+ ppc_iommu_register_device(phb);
+
/* Create EEH devices for the PHB */
eeh_phb_pe_create(phb);
@@ -76,6 +78,8 @@ int remove_phb_dynamic(struct pci_controller *phb)
}
}
+ ppc_iommu_unregister_device(phb);
+
pseries_msi_free_domains(phb);
/* Keep a reference so phb isn't freed yet */
diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c
index 2d40304eb6..febe18f251 100644
--- a/arch/powerpc/platforms/pseries/plpks.c
+++ b/arch/powerpc/platforms/pseries/plpks.c
@@ -150,7 +150,7 @@ static int plpks_gen_password(void)
ospasswordlength = maxpwsize;
ospassword = kzalloc(maxpwsize, GFP_KERNEL);
if (!ospassword) {
- kfree(password);
+ kfree_sensitive(password);
return -ENOMEM;
}
memcpy(ospassword, password, ospasswordlength);
@@ -163,7 +163,7 @@ static int plpks_gen_password(void)
}
}
out:
- kfree(password);
+ kfree_sensitive(password);
return pseries_status_to_err(rc);
}
diff --git a/arch/powerpc/platforms/pseries/plpks_sed_ops.c b/arch/powerpc/platforms/pseries/plpks_sed_ops.c
new file mode 100644
index 0000000000..7c873c9589
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/plpks_sed_ops.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * POWER Platform specific code for non-volatile SED key access
+ * Copyright (C) 2022 IBM Corporation
+ *
+ * Define operations for SED Opal to read/write keys
+ * from POWER LPAR Platform KeyStore(PLPKS).
+ *
+ * Self Encrypting Drives(SED) key storage using PLPKS
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/ioctl.h>
+#include <linux/sed-opal-key.h>
+#include <asm/plpks.h>
+
+static bool plpks_sed_initialized = false;
+static bool plpks_sed_available = false;
+
+/*
+ * structure that contains all SED data
+ */
+struct plpks_sed_object_data {
+ u_char version;
+ u_char pad1[7];
+ u_long authority;
+ u_long range;
+ u_int key_len;
+ u_char key[32];
+};
+
+#define PLPKS_SED_OBJECT_DATA_V0 0
+#define PLPKS_SED_MANGLED_LABEL "/default/pri"
+#define PLPKS_SED_COMPONENT "sed-opal"
+#define PLPKS_SED_KEY "opal-boot-pin"
+
+/*
+ * authority is admin1 and range is global
+ */
+#define PLPKS_SED_AUTHORITY 0x0000000900010001
+#define PLPKS_SED_RANGE 0x0000080200000001
+
+static void plpks_init_var(struct plpks_var *var, char *keyname)
+{
+ if (!plpks_sed_initialized) {
+ plpks_sed_initialized = true;
+ plpks_sed_available = plpks_is_available();
+ if (!plpks_sed_available)
+ pr_err("SED: plpks not available\n");
+ }
+
+ var->name = keyname;
+ var->namelen = strlen(keyname);
+ if (strcmp(PLPKS_SED_KEY, keyname) == 0) {
+ var->name = PLPKS_SED_MANGLED_LABEL;
+ var->namelen = strlen(keyname);
+ }
+ var->policy = PLPKS_WORLDREADABLE;
+ var->os = PLPKS_VAR_COMMON;
+ var->data = NULL;
+ var->datalen = 0;
+ var->component = PLPKS_SED_COMPONENT;
+}
+
+/*
+ * Read the SED Opal key from PLPKS given the label
+ */
+int sed_read_key(char *keyname, char *key, u_int *keylen)
+{
+ struct plpks_var var;
+ struct plpks_sed_object_data data;
+ int ret;
+ u_int len;
+
+ plpks_init_var(&var, keyname);
+
+ if (!plpks_sed_available)
+ return -EOPNOTSUPP;
+
+ var.data = (u8 *)&data;
+ var.datalen = sizeof(data);
+
+ ret = plpks_read_os_var(&var);
+ if (ret != 0)
+ return ret;
+
+ len = min_t(u16, be32_to_cpu(data.key_len), var.datalen);
+ memcpy(key, data.key, len);
+ key[len] = '\0';
+ *keylen = len;
+
+ return 0;
+}
+
+/*
+ * Write the SED Opal key to PLPKS given the label
+ */
+int sed_write_key(char *keyname, char *key, u_int keylen)
+{
+ struct plpks_var var;
+ struct plpks_sed_object_data data;
+ struct plpks_var_name vname;
+
+ plpks_init_var(&var, keyname);
+
+ if (!plpks_sed_available)
+ return -EOPNOTSUPP;
+
+ var.datalen = sizeof(struct plpks_sed_object_data);
+ var.data = (u8 *)&data;
+
+ /* initialize SED object */
+ data.version = PLPKS_SED_OBJECT_DATA_V0;
+ data.authority = cpu_to_be64(PLPKS_SED_AUTHORITY);
+ data.range = cpu_to_be64(PLPKS_SED_RANGE);
+ memset(&data.pad1, '\0', sizeof(data.pad1));
+ data.key_len = cpu_to_be32(keylen);
+ memcpy(data.key, (char *)key, keylen);
+
+ /*
+ * Key update requires remove first. The return value
+ * is ignored since it's okay if the key doesn't exist.
+ */
+ vname.namelen = var.namelen;
+ vname.name = var.name;
+ plpks_remove_var(var.component, var.os, vname);
+
+ return plpks_write_var(var);
+}
diff --git a/arch/powerpc/platforms/pseries/rtas-work-area.c b/arch/powerpc/platforms/pseries/rtas-work-area.c
index b37d52f403..7fe34bee84 100644
--- a/arch/powerpc/platforms/pseries/rtas-work-area.c
+++ b/arch/powerpc/platforms/pseries/rtas-work-area.c
@@ -184,6 +184,7 @@ machine_arch_initcall(pseries, rtas_work_area_allocator_init);
/**
* rtas_work_area_reserve_arena() - Reserve memory suitable for RTAS work areas.
+ * @limit: Upper limit for memblock allocation.
*/
void __init rtas_work_area_reserve_arena(const phys_addr_t limit)
{
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 57978a44d5..558ec68d76 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -11,9 +11,11 @@
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/seq_file.h>
@@ -392,7 +394,6 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
static const struct of_device_id fsl_of_msi_ids[];
static int fsl_of_msi_probe(struct platform_device *dev)
{
- const struct of_device_id *match;
struct fsl_msi *msi;
struct resource res, msiir;
int err, i, j, irq_index, count;
@@ -402,10 +403,7 @@ static int fsl_of_msi_probe(struct platform_device *dev)
u32 offset;
struct pci_controller *phb;
- match = of_match_device(fsl_of_msi_ids, &dev->dev);
- if (!match)
- return -EINVAL;
- features = match->data;
+ features = device_get_match_data(&dev->dev);
printk(KERN_DEBUG "Setting up Freescale MSI support\n");
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index ba287abcb0..dabbdd3566 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -355,7 +355,7 @@ static void __init mpic_test_broken_ipi(struct mpic *mpic)
mpic_write(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0), MPIC_VECPRI_MASK);
r = mpic_read(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0));
- if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
+ if (r == swab32(MPIC_VECPRI_MASK)) {
printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
mpic->flags |= MPIC_BROKEN_IPI;
}
diff --git a/arch/powerpc/tools/gcc-check-mprofile-kernel.sh b/arch/powerpc/tools/gcc-check-mprofile-kernel.sh
index a31a56016c..73e331e766 100755
--- a/arch/powerpc/tools/gcc-check-mprofile-kernel.sh
+++ b/arch/powerpc/tools/gcc-check-mprofile-kernel.sh
@@ -7,21 +7,20 @@ set -o pipefail
# To debug, uncomment the following line
# set -x
-# -mprofile-kernel is only supported on 64-bit, so this should not be invoked
-# for 32-bit. We pass in -m64 explicitly, and -mbig-endian and -mlittle-endian
-# are passed in from Kconfig, which takes care of toolchains defaulting to
-# other targets.
+# -mprofile-kernel is only supported on 64-bit with ELFv2, so this should not
+# be invoked for other targets. Therefore we can pass in -m64 and -mabi
+# explicitly, to take care of toolchains defaulting to other targets.
# Test whether the compile option -mprofile-kernel exists and generates
# profiling code (ie. a call to _mcount()).
echo "int func() { return 0; }" | \
- $* -m64 -S -x c -O2 -p -mprofile-kernel - -o - \
+ $* -m64 -mabi=elfv2 -S -x c -O2 -p -mprofile-kernel - -o - \
2> /dev/null | grep -q "_mcount"
# Test whether the notrace attribute correctly suppresses calls to _mcount().
echo -e "#include <linux/compiler.h>\nnotrace int func() { return 0; }" | \
- $* -m64 -S -x c -O2 -p -mprofile-kernel - -o - \
+ $* -m64 -mabi=elfv2 -S -x c -O2 -p -mprofile-kernel - -o - \
2> /dev/null | grep -q "_mcount" && \
exit 1