diff options
Diffstat (limited to 'arch/um/include/asm')
44 files changed, 1866 insertions, 0 deletions
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild new file mode 100644 index 0000000000..b2d834a29f --- /dev/null +++ b/arch/um/include/asm/Kbuild @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0 +generic-y += bpf_perf_event.h +generic-y += bug.h +generic-y += compat.h +generic-y += current.h +generic-y += device.h +generic-y += dma-mapping.h +generic-y += emergency-restart.h +generic-y += exec.h +generic-y += extable.h +generic-y += fb.h +generic-y += ftrace.h +generic-y += hw_irq.h +generic-y += irq_regs.h +generic-y += irq_work.h +generic-y += kdebug.h +generic-y += mcs_spinlock.h +generic-y += mmiowb.h +generic-y += module.lds.h +generic-y += param.h +generic-y += parport.h +generic-y += percpu.h +generic-y += preempt.h +generic-y += softirq_stack.h +generic-y += switch_to.h +generic-y += topology.h +generic-y += trace_clock.h +generic-y += kprobes.h +generic-y += mm_hooks.h +generic-y += vga.h diff --git a/arch/um/include/asm/archrandom.h b/arch/um/include/asm/archrandom.h new file mode 100644 index 0000000000..24e16c979c --- /dev/null +++ b/arch/um/include/asm/archrandom.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_UM_ARCHRANDOM_H__ +#define __ASM_UM_ARCHRANDOM_H__ + +#include <linux/types.h> + +/* This is from <os.h>, but better not to #include that in a global header here. */ +ssize_t os_getrandom(void *buf, size_t len, unsigned int flags); + +static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) +{ + ssize_t ret; + + ret = os_getrandom(v, max_longs * sizeof(*v), 0); + if (ret < 0) + return 0; + return ret / sizeof(*v); +} + +static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) +{ + return 0; +} + +#endif diff --git a/arch/um/include/asm/asm-prototypes.h b/arch/um/include/asm/asm-prototypes.h new file mode 100644 index 0000000000..5898a26daa --- /dev/null +++ b/arch/um/include/asm/asm-prototypes.h @@ -0,0 +1 @@ +#include <asm-generic/asm-prototypes.h> diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h new file mode 100644 index 0000000000..5c15627348 --- /dev/null +++ b/arch/um/include/asm/cache.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __UM_CACHE_H +#define __UM_CACHE_H + + +#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT) +# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) +#elif defined(CONFIG_UML_X86) /* 64-bit */ +# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */ +#else +/* XXX: this was taken from x86, now it's completely random. Luckily only + * affects SMP padding. */ +# define L1_CACHE_SHIFT 5 +#endif + +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#endif diff --git a/arch/um/include/asm/cacheflush.h b/arch/um/include/asm/cacheflush.h new file mode 100644 index 0000000000..4c9858cd36 --- /dev/null +++ b/arch/um/include/asm/cacheflush.h @@ -0,0 +1,9 @@ +#ifndef __UM_ASM_CACHEFLUSH_H +#define __UM_ASM_CACHEFLUSH_H + +#include <asm/tlbflush.h> +#define flush_cache_vmap flush_tlb_kernel_range +#define flush_cache_vunmap flush_tlb_kernel_range + +#include <asm-generic/cacheflush.h> +#endif /* __UM_ASM_CACHEFLUSH_H */ diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S new file mode 100644 index 0000000000..fd481ac371 --- /dev/null +++ b/arch/um/include/asm/common.lds.S @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <asm-generic/vmlinux.lds.h> + + .fini : { *(.fini) } =0x9090 + _etext = .; + PROVIDE (etext = .); + + . = ALIGN(4096); + _sdata = .; + PROVIDE (sdata = .); + + RO_DATA(4096) + + .unprotected : { *(.unprotected) } + . = ALIGN(4096); + PROVIDE (_unprotected_end = .); + + . = ALIGN(4096); + EXCEPTION_TABLE(0) + + BUG_TABLE + + .uml.setup.init : { + __uml_setup_start = .; + *(.uml.setup.init) + __uml_setup_end = .; + } + + .uml.help.init : { + __uml_help_start = .; + *(.uml.help.init) + __uml_help_end = .; + } + + .uml.postsetup.init : { + __uml_postsetup_start = .; + *(.uml.postsetup.init) + __uml_postsetup_end = .; + } + + .init.setup : { + INIT_SETUP(0) + } + + PERCPU_SECTION(32) + + .initcall.init : { + INIT_CALLS + } + + .con_initcall.init : { + CON_INITCALL + } + + .exitcall : { + __exitcall_begin = .; + *(.exitcall.exit) + __exitcall_end = .; + } + + .uml.exitcall : { + __uml_exitcall_begin = .; + *(.uml.exitcall.exit) + __uml_exitcall_end = .; + } + + . = ALIGN(4); + .altinstructions : { + __alt_instructions = .; + *(.altinstructions) + __alt_instructions_end = .; + } + .altinstr_replacement : { *(.altinstr_replacement) } + /* .exit.text is discard at runtime, not link time, to deal with references + from .altinstructions and .eh_frame */ + .exit.text : { EXIT_TEXT } + .exit.data : { *(.exit.data) } + + .preinit_array : { + __preinit_array_start = .; + *(.preinit_array) + __preinit_array_end = .; + } + .init_array : { + __init_array_start = .; + *(.kasan_init) + *(.init_array.*) + *(.init_array) + __init_array_end = .; + } + .fini_array : { + __fini_array_start = .; + *(.fini_array) + __fini_array_end = .; + } + + . = ALIGN(4096); + .init.ramfs : { + INIT_RAM_FS + } + diff --git a/arch/um/include/asm/cpufeature.h b/arch/um/include/asm/cpufeature.h new file mode 100644 index 0000000000..4b6d1b526b --- /dev/null +++ b/arch/um/include/asm/cpufeature.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_UM_CPUFEATURE_H +#define _ASM_UM_CPUFEATURE_H + +#include <asm/processor.h> + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) + +#include <asm/asm.h> +#include <linux/bitops.h> + +extern const char * const x86_cap_flags[NCAPINTS*32]; +extern const char * const x86_power_flags[32]; +#define X86_CAP_FMT "%s" +#define x86_cap_flag(flag) x86_cap_flags[flag] + +/* + * In order to save room, we index into this array by doing + * X86_BUG_<name> - NCAPINTS*32. + */ +extern const char * const x86_bug_flags[NBUGINTS*32]; + +#define test_cpu_cap(c, bit) \ + test_bit(bit, (unsigned long *)((c)->x86_capability)) + +/* + * There are 32 bits/features in each mask word. The high bits + * (selected with (bit>>5) give us the word number and the low 5 + * bits give us the bit/feature number inside the word. + * (1UL<<((bit)&31) gives us a mask for the feature_bit so we can + * see if it is set in the mask word. + */ +#define CHECK_BIT_IN_MASK_WORD(maskname, word, bit) \ + (((bit)>>5)==(word) && (1UL<<((bit)&31) & maskname##word )) + +#define cpu_has(c, bit) \ + test_cpu_cap(c, bit) + +#define this_cpu_has(bit) \ + (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ + x86_this_cpu_test_bit(bit, \ + (unsigned long __percpu *)&cpu_info.x86_capability)) + +/* + * This macro is for detection of features which need kernel + * infrastructure to be used. It may *not* directly test the CPU + * itself. Use the cpu_has() family if you want true runtime + * testing of CPU features, like in hypervisor code where you are + * supporting a possible guest feature where host support for it + * is not relevant. + */ +#define cpu_feature_enabled(bit) \ + (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : static_cpu_has(bit)) + +#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) + +#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) + +extern void setup_clear_cpu_cap(unsigned int bit); + +#define setup_force_cpu_cap(bit) do { \ + set_cpu_cap(&boot_cpu_data, bit); \ + set_bit(bit, (unsigned long *)cpu_caps_set); \ +} while (0) + +#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) + +/* + * Static testing of CPU features. Used the same as boot_cpu_has(). It + * statically patches the target code for additional performance. Use + * static_cpu_has() only in fast paths, where every cycle counts. Which + * means that the boot_cpu_has() variant is already fast enough for the + * majority of cases and you should stick to using it as it is generally + * only two instructions: a RIP-relative MOV and a TEST. + */ +static __always_inline bool _static_cpu_has(u16 bit) +{ + asm_volatile_goto("1: jmp 6f\n" + "2:\n" + ".skip -(((5f-4f) - (2b-1b)) > 0) * " + "((5f-4f) - (2b-1b)),0x90\n" + "3:\n" + ".section .altinstructions,\"a\"\n" + " .long 1b - .\n" /* src offset */ + " .long 4f - .\n" /* repl offset */ + " .word %P[always]\n" /* always replace */ + " .byte 3b - 1b\n" /* src len */ + " .byte 5f - 4f\n" /* repl len */ + " .byte 3b - 2b\n" /* pad len */ + ".previous\n" + ".section .altinstr_replacement,\"ax\"\n" + "4: jmp %l[t_no]\n" + "5:\n" + ".previous\n" + ".section .altinstructions,\"a\"\n" + " .long 1b - .\n" /* src offset */ + " .long 0\n" /* no replacement */ + " .word %P[feature]\n" /* feature bit */ + " .byte 3b - 1b\n" /* src len */ + " .byte 0\n" /* repl len */ + " .byte 0\n" /* pad len */ + ".previous\n" + ".section .altinstr_aux,\"ax\"\n" + "6:\n" + " testb %[bitnum],%[cap_byte]\n" + " jnz %l[t_yes]\n" + " jmp %l[t_no]\n" + ".previous\n" + : : [feature] "i" (bit), + [always] "i" (X86_FEATURE_ALWAYS), + [bitnum] "i" (1 << (bit & 7)), + [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) + : : t_yes, t_no); +t_yes: + return true; +t_no: + return false; +} + +#define static_cpu_has(bit) \ +( \ + __builtin_constant_p(boot_cpu_has(bit)) ? \ + boot_cpu_has(bit) : \ + _static_cpu_has(bit) \ +) + +#define cpu_has_bug(c, bit) cpu_has(c, (bit)) +#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit)) + +#define static_cpu_has_bug(bit) static_cpu_has((bit)) +#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit)) +#define boot_cpu_set_bug(bit) set_cpu_cap(&boot_cpu_data, (bit)) + +#define MAX_CPU_FEATURES (NCAPINTS * 32) +#define cpu_have_feature boot_cpu_has + +#define CPU_FEATURE_TYPEFMT "x86,ven%04Xfam%04Xmod%04X" +#define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \ + boot_cpu_data.x86_model + +#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ +#endif /* _ASM_UM_CPUFEATURE_H */ diff --git a/arch/um/include/asm/delay.h b/arch/um/include/asm/delay.h new file mode 100644 index 0000000000..e79b2ab6f4 --- /dev/null +++ b/arch/um/include/asm/delay.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __UM_DELAY_H +#define __UM_DELAY_H +#include <asm-generic/delay.h> +#include <linux/time-internal.h> + +static inline void um_ndelay(unsigned long nsecs) +{ + if (time_travel_mode == TT_MODE_INFCPU || + time_travel_mode == TT_MODE_EXTERNAL) { + time_travel_ndelay(nsecs); + return; + } + ndelay(nsecs); +} +#undef ndelay +#define ndelay(n) um_ndelay(n) + +static inline void um_udelay(unsigned long usecs) +{ + if (time_travel_mode == TT_MODE_INFCPU || + time_travel_mode == TT_MODE_EXTERNAL) { + time_travel_ndelay(1000 * usecs); + return; + } + udelay(usecs); +} +#undef udelay +#define udelay(n) um_udelay(n) +#endif /* __UM_DELAY_H */ diff --git a/arch/um/include/asm/dma.h b/arch/um/include/asm/dma.h new file mode 100644 index 0000000000..fdc53642c7 --- /dev/null +++ b/arch/um/include/asm/dma.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __UM_DMA_H +#define __UM_DMA_H + +#include <asm/io.h> + +extern unsigned long uml_physmem; + +#define MAX_DMA_ADDRESS (uml_physmem) + +#endif diff --git a/arch/um/include/asm/fixmap.h b/arch/um/include/asm/fixmap.h new file mode 100644 index 0000000000..2efac58271 --- /dev/null +++ b/arch/um/include/asm/fixmap.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __UM_FIXMAP_H +#define __UM_FIXMAP_H + +#include <asm/processor.h> +#include <asm/archparam.h> +#include <asm/page.h> +#include <linux/threads.h> + +/* + * Here we define all the compile-time 'special' virtual + * addresses. The point is to have a constant address at + * compile time, but to set the physical address only + * in the boot process. We allocate these special addresses + * from the end of virtual memory (0xfffff000) backwards. + * Also this lets us do fail-safe vmalloc(), we + * can guarantee that these special addresses and + * vmalloc()-ed addresses never overlap. + * + * these 'compile-time allocated' memory buffers are + * fixed-size 4k pages. (or larger if used with an increment + * highger than 1) use fixmap_set(idx,phys) to associate + * physical memory with fixmap indices. + * + * TLB entries of such buffers will not be flushed across + * task switches. + */ + +/* + * on UP currently we will have no trace of the fixmap mechanizm, + * no page table allocations, etc. This might change in the + * future, say framebuffers for the console driver(s) could be + * fix-mapped? + */ +enum fixed_addresses { + __end_of_fixed_addresses +}; + +extern void __set_fixmap (enum fixed_addresses idx, + unsigned long phys, pgprot_t flags); + +/* + * used by vmalloc.c. + * + * Leave one empty page between vmalloc'ed areas and + * the start of the fixmap, and leave one page empty + * at the top of mem.. + */ + +#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE) +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#include <asm-generic/fixmap.h> + +#endif diff --git a/arch/um/include/asm/fpu/api.h b/arch/um/include/asm/fpu/api.h new file mode 100644 index 0000000000..71bfd9ef39 --- /dev/null +++ b/arch/um/include/asm/fpu/api.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ASM_UM_FPU_API_H +#define _ASM_UM_FPU_API_H + +/* Copyright (c) 2020 Cambridge Greys Ltd + * Copyright (c) 2020 Red Hat Inc. + * A set of "dummy" defines to allow the direct inclusion + * of x86 optimized copy, xor, etc routines into the + * UML code tree. */ + +#define kernel_fpu_begin() (void)0 +#define kernel_fpu_end() (void)0 + +static inline bool irq_fpu_usable(void) +{ + return true; +} + + +#endif diff --git a/arch/um/include/asm/futex.h b/arch/um/include/asm/futex.h new file mode 100644 index 0000000000..780aa6bfc0 --- /dev/null +++ b/arch/um/include/asm/futex.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_UM_FUTEX_H +#define _ASM_UM_FUTEX_H + +#include <linux/futex.h> +#include <linux/uaccess.h> +#include <asm/errno.h> + + +int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr); +int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval); + +#endif diff --git a/arch/um/include/asm/hardirq.h b/arch/um/include/asm/hardirq.h new file mode 100644 index 0000000000..52e2c36267 --- /dev/null +++ b/arch/um/include/asm/hardirq.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_UM_HARDIRQ_H +#define __ASM_UM_HARDIRQ_H + +#include <asm-generic/hardirq.h> + +#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 + +#endif /* __ASM_UM_HARDIRQ_H */ diff --git a/arch/um/include/asm/io.h b/arch/um/include/asm/io.h new file mode 100644 index 0000000000..9ea42cc746 --- /dev/null +++ b/arch/um/include/asm/io.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_UM_IO_H +#define _ASM_UM_IO_H +#include <linux/types.h> + +/* get emulated iomem (if desired) */ +#include <asm-generic/logic_io.h> + +#ifndef ioremap +#define ioremap ioremap +static inline void __iomem *ioremap(phys_addr_t offset, size_t size) +{ + return NULL; +} +#endif /* ioremap */ + +#ifndef iounmap +#define iounmap iounmap +static inline void iounmap(void __iomem *addr) +{ +} +#endif /* iounmap */ + +#include <asm-generic/io.h> + +#endif diff --git a/arch/um/include/asm/irq.h b/arch/um/include/asm/irq.h new file mode 100644 index 0000000000..749dfe8512 --- /dev/null +++ b/arch/um/include/asm/irq.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __UM_IRQ_H +#define __UM_IRQ_H + +#define TIMER_IRQ 0 +#define UMN_IRQ 1 +#define UBD_IRQ 2 +#define UM_ETH_IRQ 3 +#define ACCEPT_IRQ 4 +#define MCONSOLE_IRQ 5 +#define WINCH_IRQ 6 +#define SIGIO_WRITE_IRQ 7 +#define TELNETD_IRQ 8 +#define XTERM_IRQ 9 +#define RANDOM_IRQ 10 + +#ifdef CONFIG_UML_NET_VECTOR + +#define VECTOR_BASE_IRQ (RANDOM_IRQ + 1) +#define VECTOR_IRQ_SPACE 8 + +#define UM_FIRST_DYN_IRQ (VECTOR_IRQ_SPACE + VECTOR_BASE_IRQ) + +#else + +#define UM_FIRST_DYN_IRQ (RANDOM_IRQ + 1) + +#endif + +#define UM_LAST_SIGNAL_IRQ 64 +/* If we have (simulated) PCI MSI, allow 64 more interrupt numbers for it */ +#ifdef CONFIG_PCI_MSI +#define NR_IRQS (UM_LAST_SIGNAL_IRQ + 64) +#else +#define NR_IRQS UM_LAST_SIGNAL_IRQ +#endif /* CONFIG_PCI_MSI */ + +#include <asm-generic/irq.h> +#endif diff --git a/arch/um/include/asm/irqflags.h b/arch/um/include/asm/irqflags.h new file mode 100644 index 0000000000..1e69ef5bc3 --- /dev/null +++ b/arch/um/include/asm/irqflags.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __UM_IRQFLAGS_H +#define __UM_IRQFLAGS_H + +extern int signals_enabled; +int um_set_signals(int enable); +void block_signals(void); +void unblock_signals(void); + +#define arch_local_save_flags arch_local_save_flags +static inline unsigned long arch_local_save_flags(void) +{ + return signals_enabled; +} + +#define arch_local_irq_restore arch_local_irq_restore +static inline void arch_local_irq_restore(unsigned long flags) +{ + um_set_signals(flags); +} + +#define arch_local_irq_enable arch_local_irq_enable +static inline void arch_local_irq_enable(void) +{ + unblock_signals(); +} + +#define arch_local_irq_disable arch_local_irq_disable +static inline void arch_local_irq_disable(void) +{ + block_signals(); +} + +#define ARCH_IRQ_DISABLED 0 + +#include <asm-generic/irqflags.h> + +#endif diff --git a/arch/um/include/asm/kasan.h b/arch/um/include/asm/kasan.h new file mode 100644 index 0000000000..0d6547f4ec --- /dev/null +++ b/arch/um/include/asm/kasan.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_UM_KASAN_H +#define __ASM_UM_KASAN_H + +#include <linux/init.h> +#include <linux/const.h> + +#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) + +/* used in kasan_mem_to_shadow to divide by 8 */ +#define KASAN_SHADOW_SCALE_SHIFT 3 + +#ifdef CONFIG_X86_64 +#define KASAN_HOST_USER_SPACE_END_ADDR 0x00007fffffffffffUL +/* KASAN_SHADOW_SIZE is the size of total address space divided by 8 */ +#define KASAN_SHADOW_SIZE ((KASAN_HOST_USER_SPACE_END_ADDR + 1) >> \ + KASAN_SHADOW_SCALE_SHIFT) +#else +#error "KASAN_SHADOW_SIZE is not defined for this sub-architecture" +#endif /* CONFIG_X86_64 */ + +#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET) +#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) + +#ifdef CONFIG_KASAN +void kasan_init(void); +void kasan_map_memory(void *start, unsigned long len); +extern int kasan_um_is_ready; + +#ifdef CONFIG_STATIC_LINK +#define kasan_arch_is_ready() (kasan_um_is_ready) +#endif +#else +static inline void kasan_init(void) { } +#endif /* CONFIG_KASAN */ + +#endif /* __ASM_UM_KASAN_H */ diff --git a/arch/um/include/asm/kvm_para.h b/arch/um/include/asm/kvm_para.h new file mode 100644 index 0000000000..14fab8f0b9 --- /dev/null +++ b/arch/um/include/asm/kvm_para.h @@ -0,0 +1 @@ +#include <asm-generic/kvm_para.h> diff --git a/arch/um/include/asm/mmu.h b/arch/um/include/asm/mmu.h new file mode 100644 index 0000000000..5b072aba5b --- /dev/null +++ b/arch/um/include/asm/mmu.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#ifndef __ARCH_UM_MMU_H +#define __ARCH_UM_MMU_H + +#include <mm_id.h> +#include <asm/mm_context.h> + +typedef struct mm_context { + struct mm_id id; + struct uml_arch_mm_context arch; + struct page *stub_pages[2]; +} mm_context_t; + +extern void __switch_mm(struct mm_id * mm_idp); + +/* Avoid tangled inclusion with asm/ldt.h */ +extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm); +extern void free_ldt(struct mm_context *mm); + +#endif diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h new file mode 100644 index 0000000000..68e2eb9cfb --- /dev/null +++ b/arch/um/include/asm/mmu_context.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#ifndef __UM_MMU_CONTEXT_H +#define __UM_MMU_CONTEXT_H + +#include <linux/sched.h> +#include <linux/mm_types.h> +#include <linux/mmap_lock.h> + +#include <asm/mm_hooks.h> +#include <asm/mmu.h> + +extern void force_flush_all(void); + +#define activate_mm activate_mm +static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) +{ + /* + * This is called by fs/exec.c and sys_unshare() + * when the new ->mm is used for the first time. + */ + __switch_mm(&new->context.id); +} + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned cpu = smp_processor_id(); + + if(prev != next){ + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + cpumask_set_cpu(cpu, mm_cpumask(next)); + if(next != &init_mm) + __switch_mm(&next->context.id); + } +} + +#define init_new_context init_new_context +extern int init_new_context(struct task_struct *task, struct mm_struct *mm); + +#define destroy_context destroy_context +extern void destroy_context(struct mm_struct *mm); + +#include <asm-generic/mmu_context.h> + +#endif diff --git a/arch/um/include/asm/msi.h b/arch/um/include/asm/msi.h new file mode 100644 index 0000000000..c8c6c381f3 --- /dev/null +++ b/arch/um/include/asm/msi.h @@ -0,0 +1 @@ +#include <asm-generic/msi.h> diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h new file mode 100644 index 0000000000..84866127d0 --- /dev/null +++ b/arch/um/include/asm/page.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) + * Copyright 2003 PathScale, Inc. + */ + +#ifndef __UM_PAGE_H +#define __UM_PAGE_H + +#include <linux/const.h> + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#ifndef __ASSEMBLY__ + +struct page; + +#include <linux/pfn.h> +#include <linux/types.h> +#include <asm/vm-flags.h> + +/* + * These are used to make use of C type-checking.. + */ + +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) +#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) + +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT) + +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pgd; } pgd_t; +#define pte_val(p) ((p).pte) + +#define pte_get_bits(p, bits) ((p).pte & (bits)) +#define pte_set_bits(p, bits) ((p).pte |= (bits)) +#define pte_clear_bits(p, bits) ((p).pte &= ~(bits)) +#define pte_copy(to, from) ({ (to).pte = (from).pte; }) +#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE)) +#define pte_set_val(p, phys, prot) \ + ({ (p).pte = (phys) | pgprot_val(prot); }) + +#define pmd_val(x) ((x).pmd) +#define __pmd(x) ((pmd_t) { (x) } ) + +typedef unsigned long long phys_t; + +#else + +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pgd; } pgd_t; + +#ifdef CONFIG_3_LEVEL_PGTABLES +typedef struct { unsigned long pmd; } pmd_t; +#define pmd_val(x) ((x).pmd) +#define __pmd(x) ((pmd_t) { (x) } ) +#endif + +#define pte_val(x) ((x).pte) + + +#define pte_get_bits(p, bits) ((p).pte & (bits)) +#define pte_set_bits(p, bits) ((p).pte |= (bits)) +#define pte_clear_bits(p, bits) ((p).pte &= ~(bits)) +#define pte_copy(to, from) ((to).pte = (from).pte) +#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE)) +#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot)) + +typedef unsigned long phys_t; + +#endif + +typedef struct { unsigned long pgprot; } pgprot_t; + +typedef struct page *pgtable_t; + +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +extern unsigned long uml_physmem; + +#define PAGE_OFFSET (uml_physmem) +#define KERNELBASE PAGE_OFFSET + +#define __va_space (8*1024*1024) + +#include <mem.h> + +/* Cast to unsigned long before casting to void * to avoid a warning from + * mmap_kmem about cutting a long long down to a void *. Not sure that + * casting is the right thing, but 32-bit UML can't have 64-bit virtual + * addresses + */ +#define __pa(virt) uml_to_phys((void *) (unsigned long) (virt)) +#define __va(phys) uml_to_virt((unsigned long) (phys)) + +#define phys_to_pfn(p) ((p) >> PAGE_SHIFT) +#define pfn_to_phys(pfn) PFN_PHYS(pfn) + +#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v))) + +#include <asm-generic/memory_model.h> +#include <asm-generic/getorder.h> + +#endif /* __ASSEMBLY__ */ + +#ifdef CONFIG_X86_32 +#define __HAVE_ARCH_GATE_AREA 1 +#endif + +#endif /* __UM_PAGE_H */ diff --git a/arch/um/include/asm/pci.h b/arch/um/include/asm/pci.h new file mode 100644 index 0000000000..238d2e7faf --- /dev/null +++ b/arch/um/include/asm/pci.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_UM_PCI_H +#define __ASM_UM_PCI_H +#include <linux/types.h> +#include <asm/io.h> + +/* Generic PCI */ +#include <asm-generic/pci.h> + +#ifdef CONFIG_PCI_MSI +/* + * This is a bit of an annoying hack, and it assumes we only have + * the virt-pci (if anything). Which is true, but still. + */ +void *pci_root_bus_fwnode(struct pci_bus *bus); +#define pci_root_bus_fwnode pci_root_bus_fwnode +#endif + +#endif /* __ASM_UM_PCI_H */ diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h new file mode 100644 index 0000000000..de5e31c647 --- /dev/null +++ b/arch/um/include/asm/pgalloc.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) + * Copyright 2003 PathScale, Inc. + * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h + */ + +#ifndef __UM_PGALLOC_H +#define __UM_PGALLOC_H + +#include <linux/mm.h> + +#include <asm-generic/pgalloc.h> + +#define pmd_populate_kernel(mm, pmd, pte) \ + set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte))) + +#define pmd_populate(mm, pmd, pte) \ + set_pmd(pmd, __pmd(_PAGE_TABLE + \ + ((unsigned long long)page_to_pfn(pte) << \ + (unsigned long long) PAGE_SHIFT))) + +/* + * Allocate and free page tables. + */ +extern pgd_t *pgd_alloc(struct mm_struct *); + +#define __pte_free_tlb(tlb, pte, address) \ +do { \ + pagetable_pte_dtor(page_ptdesc(pte)); \ + tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \ +} while (0) + +#ifdef CONFIG_3_LEVEL_PGTABLES + +#define __pmd_free_tlb(tlb, pmd, address) \ +do { \ + pagetable_pmd_dtor(virt_to_ptdesc(pmd)); \ + tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pmd)); \ +} while (0) + +#endif + +#endif + diff --git a/arch/um/include/asm/pgtable-2level.h b/arch/um/include/asm/pgtable-2level.h new file mode 100644 index 0000000000..8256ecc5b9 --- /dev/null +++ b/arch/um/include/asm/pgtable-2level.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) + * Copyright 2003 PathScale, Inc. + * Derived from include/asm-i386/pgtable.h + */ + +#ifndef __UM_PGTABLE_2LEVEL_H +#define __UM_PGTABLE_2LEVEL_H + +#include <asm-generic/pgtable-nopmd.h> + +/* PGDIR_SHIFT determines what a third-level page table entry can map */ + +#define PGDIR_SHIFT 22 +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * entries per page directory level: the i386 is two-level, so + * we don't really have any PMD directory physically. + */ +#define PTRS_PER_PTE 1024 +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE) +#define PTRS_PER_PGD 1024 + +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \ + pte_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \ + pgd_val(e)) + +static inline int pgd_newpage(pgd_t pgd) { return 0; } +static inline void pgd_mkuptodate(pgd_t pgd) { } + +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) + +#define pte_pfn(x) phys_to_pfn(pte_val(x)) +#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot)) +#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot)) + +#endif diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h new file mode 100644 index 0000000000..8a5032ec23 --- /dev/null +++ b/arch/um/include/asm/pgtable-3level.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2003 PathScale Inc + * Derived from include/asm-i386/pgtable.h + */ + +#ifndef __UM_PGTABLE_3LEVEL_H +#define __UM_PGTABLE_3LEVEL_H + +#include <asm-generic/pgtable-nopud.h> + +/* PGDIR_SHIFT determines what a third-level page table entry can map */ + +#ifdef CONFIG_64BIT +#define PGDIR_SHIFT 30 +#else +#define PGDIR_SHIFT 31 +#endif +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* PMD_SHIFT determines the size of the area a second-level page table can + * map + */ + +#define PMD_SHIFT 21 +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* + * entries per page directory level + */ + +#define PTRS_PER_PTE 512 +#ifdef CONFIG_64BIT +#define PTRS_PER_PMD 512 +#define PTRS_PER_PGD 512 +#else +#define PTRS_PER_PMD 1024 +#define PTRS_PER_PGD 1024 +#endif + +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE) + +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \ + pte_val(e)) +#define pmd_ERROR(e) \ + printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \ + pmd_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \ + pgd_val(e)) + +#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE)) +#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) +#define pud_present(x) (pud_val(x) & _PAGE_PRESENT) +#define pud_populate(mm, pud, pmd) \ + set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd))) + +#define set_pud(pudptr, pudval) (*(pudptr) = (pudval)) + +static inline int pgd_newpage(pgd_t pgd) +{ + return(pgd_val(pgd) & _PAGE_NEWPAGE); +} + +static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; } + +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) + +static inline void pud_clear (pud_t *pud) +{ + set_pud(pud, __pud(_PAGE_NEWPAGE)); +} + +#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK) +#define pud_pgtable(pud) ((pmd_t *) __va(pud_val(pud) & PAGE_MASK)) + +static inline unsigned long pte_pfn(pte_t pte) +{ + return phys_to_pfn(pte_val(pte)); +} + +static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) +{ + pte_t pte; + phys_t phys = pfn_to_phys(page_nr); + + pte_set_val(pte, phys, pgprot); + return pte; +} + +static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) +{ + return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)); +} + +#endif + diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h new file mode 100644 index 0000000000..e1ece21dbe --- /dev/null +++ b/arch/um/include/asm/pgtable.h @@ -0,0 +1,337 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + * Copyright 2003 PathScale, Inc. + * Derived from include/asm-i386/pgtable.h + */ + +#ifndef __UM_PGTABLE_H +#define __UM_PGTABLE_H + +#include <asm/fixmap.h> + +#define _PAGE_PRESENT 0x001 +#define _PAGE_NEWPAGE 0x002 +#define _PAGE_NEWPROT 0x004 +#define _PAGE_RW 0x020 +#define _PAGE_USER 0x040 +#define _PAGE_ACCESSED 0x080 +#define _PAGE_DIRTY 0x100 +/* If _PAGE_PRESENT is clear, we use these: */ +#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE; + pte_present gives true */ + +/* We borrow bit 10 to store the exclusive marker in swap PTEs. */ +#define _PAGE_SWP_EXCLUSIVE 0x400 + +#ifdef CONFIG_3_LEVEL_PGTABLES +#include <asm/pgtable-3level.h> +#else +#include <asm/pgtable-2level.h> +#endif + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; + +/* zero page used for uninitialized stuff */ +extern unsigned long *empty_zero_page; + +/* Just any arbitrary offset to the start of the vmalloc VM area: the + * current 8MB value just means that there will be a 8MB "hole" after the + * physical memory until the kernel virtual memory starts. That means that + * any out-of-bounds memory accesses will hopefully be caught. + * The vmalloc() routines leaves a hole of 4kB between each vmalloced + * area for the same reason. ;) + */ + +extern unsigned long end_iomem; + +#define VMALLOC_OFFSET (__va_space) +#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) +#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK) +#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) +#define MODULES_VADDR VMALLOC_START +#define MODULES_END VMALLOC_END +#define MODULES_LEN (MODULES_VADDR - MODULES_END) + +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) +#define __PAGE_KERNEL_EXEC \ + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) +#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) + +/* + * The i386 can't do page protection for execute, and considers that the same + * are read. + * Also, write permissions imply read permissions. This is the closest we can + * get.. + */ + +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) + +#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) + +#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE)) +#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) + +#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) +#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0) + +#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE) +#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE) + +#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE) +#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE) + +#define p4d_newpage(x) (p4d_val(x) & _PAGE_NEWPAGE) +#define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEWPAGE) + +#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) +#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) + +#define pte_page(x) pfn_to_page(pte_pfn(x)) + +#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE)) + +/* + * ================================= + * Flags checking section. + * ================================= + */ + +static inline int pte_none(pte_t pte) +{ + return pte_is_zero(pte); +} + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +static inline int pte_read(pte_t pte) +{ + return((pte_get_bits(pte, _PAGE_USER)) && + !(pte_get_bits(pte, _PAGE_PROTNONE))); +} + +static inline int pte_exec(pte_t pte){ + return((pte_get_bits(pte, _PAGE_USER)) && + !(pte_get_bits(pte, _PAGE_PROTNONE))); +} + +static inline int pte_write(pte_t pte) +{ + return((pte_get_bits(pte, _PAGE_RW)) && + !(pte_get_bits(pte, _PAGE_PROTNONE))); +} + +static inline int pte_dirty(pte_t pte) +{ + return pte_get_bits(pte, _PAGE_DIRTY); +} + +static inline int pte_young(pte_t pte) +{ + return pte_get_bits(pte, _PAGE_ACCESSED); +} + +static inline int pte_newpage(pte_t pte) +{ + return pte_get_bits(pte, _PAGE_NEWPAGE); +} + +static inline int pte_newprot(pte_t pte) +{ + return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT))); +} + +/* + * ================================= + * Flags setting section. + * ================================= + */ + +static inline pte_t pte_mknewprot(pte_t pte) +{ + pte_set_bits(pte, _PAGE_NEWPROT); + return(pte); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_clear_bits(pte, _PAGE_DIRTY); + return(pte); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_clear_bits(pte, _PAGE_ACCESSED); + return(pte); +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + if (likely(pte_get_bits(pte, _PAGE_RW))) + pte_clear_bits(pte, _PAGE_RW); + else + return pte; + return(pte_mknewprot(pte)); +} + +static inline pte_t pte_mkread(pte_t pte) +{ + if (unlikely(pte_get_bits(pte, _PAGE_USER))) + return pte; + pte_set_bits(pte, _PAGE_USER); + return(pte_mknewprot(pte)); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_set_bits(pte, _PAGE_DIRTY); + return(pte); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_set_bits(pte, _PAGE_ACCESSED); + return(pte); +} + +static inline pte_t pte_mkwrite_novma(pte_t pte) +{ + if (unlikely(pte_get_bits(pte, _PAGE_RW))) + return pte; + pte_set_bits(pte, _PAGE_RW); + return(pte_mknewprot(pte)); +} + +static inline pte_t pte_mkuptodate(pte_t pte) +{ + pte_clear_bits(pte, _PAGE_NEWPAGE); + if(pte_present(pte)) + pte_clear_bits(pte, _PAGE_NEWPROT); + return(pte); +} + +static inline pte_t pte_mknewpage(pte_t pte) +{ + pte_set_bits(pte, _PAGE_NEWPAGE); + return(pte); +} + +static inline void set_pte(pte_t *pteptr, pte_t pteval) +{ + pte_copy(*pteptr, pteval); + + /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so + * fix_range knows to unmap it. _PAGE_NEWPROT is specific to + * mapped pages. + */ + + *pteptr = pte_mknewpage(*pteptr); + if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); +} + +#define PFN_PTE_SHIFT PAGE_SHIFT + +#define __HAVE_ARCH_PTE_SAME +static inline int pte_same(pte_t pte_a, pte_t pte_b) +{ + return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE); +} + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ + +#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) +#define __virt_to_page(virt) phys_to_page(__pa(virt)) +#define page_to_phys(page) pfn_to_phys(page_to_pfn(page)) +#define virt_to_page(addr) __virt_to_page((const unsigned long) addr) + +#define mk_pte(page, pgprot) \ + ({ pte_t pte; \ + \ + pte_set_val(pte, page_to_phys(page), (pgprot)); \ + if (pte_present(pte)) \ + pte_mknewprot(pte_mknewpage(pte)); \ + pte;}) + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot); + return pte; +} + +/* + * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] + * + * this macro returns the index of the entry in the pmd page which would + * control the given virtual address + */ +#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) + +struct mm_struct; +extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); + +#define update_mmu_cache(vma,address,ptep) do {} while (0) +#define update_mmu_cache_range(vmf, vma, address, ptep, nr) do {} while (0) + +/* + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). + * + * Format of swap PTEs: + * + * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * <--------------- offset ----------------> E < type -> 0 0 0 1 0 + * + * E is the exclusive marker that is not stored in swap entries. + * _PAGE_NEWPAGE (bit 1) is always set to 1 in set_pte(). + */ +#define __swp_type(x) (((x).val >> 5) & 0x1f) +#define __swp_offset(x) ((x).val >> 11) + +#define __swp_entry(type, offset) \ + ((swp_entry_t) { (((type) & 0x1f) << 5) | ((offset) << 11) }) +#define __pte_to_swp_entry(pte) \ + ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +static inline int pte_swp_exclusive(pte_t pte) +{ + return pte_get_bits(pte, _PAGE_SWP_EXCLUSIVE); +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte_set_bits(pte, _PAGE_SWP_EXCLUSIVE); + return pte; +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte_clear_bits(pte, _PAGE_SWP_EXCLUSIVE); + return pte; +} + +/* Clear a kernel PTE and flush it from the TLB */ +#define kpte_clear_flush(ptep, vaddr) \ +do { \ + pte_clear(&init_mm, (vaddr), (ptep)); \ + __flush_tlb_one((vaddr)); \ +} while (0) + +#endif diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h new file mode 100644 index 0000000000..7414154b8e --- /dev/null +++ b/arch/um/include/asm/processor-generic.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#ifndef __UM_PROCESSOR_GENERIC_H +#define __UM_PROCESSOR_GENERIC_H + +struct pt_regs; + +struct task_struct; + +#include <asm/ptrace.h> +#include <sysdep/archsetjmp.h> + +#include <linux/prefetch.h> + +#include <asm/cpufeatures.h> + +struct mm_struct; + +struct thread_struct { + struct pt_regs regs; + struct pt_regs *segv_regs; + int singlestep_syscall; + void *fault_addr; + jmp_buf *fault_catcher; + struct task_struct *prev_sched; + struct arch_thread arch; + jmp_buf switch_buf; + struct { + int op; + union { + struct { + int pid; + } fork, exec; + struct { + int (*proc)(void *); + void *arg; + } thread; + struct { + void (*proc)(void *); + void *arg; + } cb; + } u; + } request; +}; + +#define INIT_THREAD \ +{ \ + .regs = EMPTY_REGS, \ + .fault_addr = NULL, \ + .prev_sched = NULL, \ + .arch = INIT_ARCH_THREAD, \ + .request = { 0 } \ +} + +/* + * User space process size: 3GB (default). + */ +extern unsigned long task_size; + +#define TASK_SIZE (task_size) + +#undef STACK_TOP +#undef STACK_TOP_MAX + +extern unsigned long stacksizelim; + +#define STACK_ROOM (stacksizelim) +#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE) +#define STACK_TOP_MAX STACK_TOP + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE (0x40000000) + +extern void start_thread(struct pt_regs *regs, unsigned long entry, + unsigned long stack); + +struct cpuinfo_um { + unsigned long loops_per_jiffy; + int ipi_pipe[2]; + int cache_alignment; + union { + __u32 x86_capability[NCAPINTS + NBUGINTS]; + unsigned long x86_capability_alignment; + }; +}; + +extern struct cpuinfo_um boot_cpu_data; + +#define cpu_data(cpu) boot_cpu_data +#define current_cpu_data boot_cpu_data +#define cache_line_size() (boot_cpu_data.cache_alignment) + +extern unsigned long get_thread_reg(int reg, jmp_buf *buf); +#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf) +extern unsigned long __get_wchan(struct task_struct *p); + +#endif diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h new file mode 100644 index 0000000000..adf91ef553 --- /dev/null +++ b/arch/um/include/asm/ptrace-generic.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#ifndef __UM_PTRACE_GENERIC_H +#define __UM_PTRACE_GENERIC_H + +#ifndef __ASSEMBLY__ + +#include <sysdep/ptrace.h> + +struct pt_regs { + struct uml_pt_regs regs; +}; + +#define arch_has_single_step() (1) + +#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS } + +#define PT_REGS_IP(r) UPT_IP(&(r)->regs) +#define PT_REGS_SP(r) UPT_SP(&(r)->regs) + +#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs) + +#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs) + +#define instruction_pointer(regs) PT_REGS_IP(regs) + +#define PTRACE_OLDSETOPTIONS 21 + +struct task_struct; + +extern long subarch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data); +extern unsigned long getreg(struct task_struct *child, int regno); +extern int putreg(struct task_struct *child, int regno, unsigned long value); + +extern int arch_set_tls(struct task_struct *new, unsigned long tls); +extern void clear_flushed_tls(struct task_struct *task); +extern int syscall_trace_enter(struct pt_regs *regs); +extern void syscall_trace_leave(struct pt_regs *regs); + +#endif + +#endif diff --git a/arch/um/include/asm/sections.h b/arch/um/include/asm/sections.h new file mode 100644 index 0000000000..a3c1fb6ed6 --- /dev/null +++ b/arch/um/include/asm/sections.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __UM_SECTIONS_H +#define __UM_SECTIONS_H + +#include <asm-generic/sections.h> + +extern char __binary_start[]; +extern char __syscall_stub_start[], __syscall_stub_end[]; + +#endif diff --git a/arch/um/include/asm/setup.h b/arch/um/include/asm/setup.h new file mode 100644 index 0000000000..80ada899f2 --- /dev/null +++ b/arch/um/include/asm/setup.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef SETUP_H_INCLUDED +#define SETUP_H_INCLUDED + +/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the + * command line, so this choice is ok. + */ + +#define COMMAND_LINE_SIZE 4096 + +#endif /* SETUP_H_INCLUDED */ diff --git a/arch/um/include/asm/smp.h b/arch/um/include/asm/smp.h new file mode 100644 index 0000000000..a8cc1d46dd --- /dev/null +++ b/arch/um/include/asm/smp.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __UM_SMP_H +#define __UM_SMP_H + +#define hard_smp_processor_id() 0 + +#endif diff --git a/arch/um/include/asm/stacktrace.h b/arch/um/include/asm/stacktrace.h new file mode 100644 index 0000000000..436b55952c --- /dev/null +++ b/arch/um/include/asm/stacktrace.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_UML_STACKTRACE_H +#define _ASM_UML_STACKTRACE_H + +#include <linux/uaccess.h> +#include <linux/ptrace.h> + +struct stack_frame { + struct stack_frame *next_frame; + unsigned long return_address; +}; + +struct stacktrace_ops { + void (*address)(void *data, unsigned long address, int reliable); +}; + +#ifdef CONFIG_FRAME_POINTER +static inline unsigned long +get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs) +{ + if (!task || task == current) + return segv_regs ? PT_REGS_BP(segv_regs) : current_bp(); + return KSTK_EBP(task); +} +#else +static inline unsigned long +get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs) +{ + return 0; +} +#endif + +static inline unsigned long +*get_stack_pointer(struct task_struct *task, struct pt_regs *segv_regs) +{ + if (!task || task == current) + return segv_regs ? (unsigned long *)PT_REGS_SP(segv_regs) : current_sp(); + return (unsigned long *)KSTK_ESP(task); +} + +void dump_trace(struct task_struct *tsk, const struct stacktrace_ops *ops, void *data); + +#endif /* _ASM_UML_STACKTRACE_H */ diff --git a/arch/um/include/asm/syscall-generic.h b/arch/um/include/asm/syscall-generic.h new file mode 100644 index 0000000000..172b74143c --- /dev/null +++ b/arch/um/include/asm/syscall-generic.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Access to user system call parameters and results + * + * See asm-generic/syscall.h for function descriptions. + * + * Copyright (C) 2015 Mickaël Salaün <mic@digikod.net> + */ + +#ifndef __UM_SYSCALL_GENERIC_H +#define __UM_SYSCALL_GENERIC_H + +#include <asm/ptrace.h> +#include <linux/err.h> +#include <linux/sched.h> +#include <sysdep/ptrace.h> + +static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) +{ + + return PT_REGS_SYSCALL_NR(regs); +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + /* do nothing */ +} + +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + const long error = regs_return_value(regs); + + return IS_ERR_VALUE(error) ? error : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs_return_value(regs); +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + PT_REGS_SET_SYSCALL_RETURN(regs, (long) error ?: val); +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + const struct uml_pt_regs *r = ®s->regs; + + *args++ = UPT_SYSCALL_ARG1(r); + *args++ = UPT_SYSCALL_ARG2(r); + *args++ = UPT_SYSCALL_ARG3(r); + *args++ = UPT_SYSCALL_ARG4(r); + *args++ = UPT_SYSCALL_ARG5(r); + *args = UPT_SYSCALL_ARG6(r); +} + +/* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */ + +#endif /* __UM_SYSCALL_GENERIC_H */ diff --git a/arch/um/include/asm/sysrq.h b/arch/um/include/asm/sysrq.h new file mode 100644 index 0000000000..8fc8c65cd3 --- /dev/null +++ b/arch/um/include/asm/sysrq.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __UM_SYSRQ_H +#define __UM_SYSRQ_H + +struct task_struct; +extern void show_trace(struct task_struct* task, unsigned long *stack); + +#endif diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h new file mode 100644 index 0000000000..c7b4b49826 --- /dev/null +++ b/arch/um/include/asm/thread_info.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#ifndef __UM_THREAD_INFO_H +#define __UM_THREAD_INFO_H + +#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER +#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE) + +#ifndef __ASSEMBLY__ + +#include <asm/types.h> +#include <asm/page.h> +#include <asm/segment.h> +#include <sysdep/ptrace_user.h> + +struct thread_info { + struct task_struct *task; /* main task structure */ + unsigned long flags; /* low level flags */ + __u32 cpu; /* current CPU */ + int preempt_count; /* 0 => preemptable, + <0 => BUG */ + struct thread_info *real_thread; /* Points to non-IRQ stack */ + unsigned long aux_fp_regs[FP_SIZE]; /* auxiliary fp_regs to save/restore + them out-of-band */ +}; + +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .flags = 0, \ + .cpu = 0, \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .real_thread = NULL, \ +} + +/* how to get the thread information struct from C */ +static inline struct thread_info *current_thread_info(void) +{ + struct thread_info *ti; + unsigned long mask = THREAD_SIZE - 1; + void *p; + + asm volatile ("" : "=r" (p) : "0" (&ti)); + ti = (struct thread_info *) (((unsigned long)p) & ~mask); + return ti; +} + +#endif + +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_SIGPENDING 1 /* signal pending */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_NOTIFY_SIGNAL 3 /* signal notifications exist */ +#define TIF_RESTART_BLOCK 4 +#define TIF_MEMDIE 5 /* is terminating due to OOM killer */ +#define TIF_SYSCALL_AUDIT 6 +#define TIF_RESTORE_SIGMASK 7 +#define TIF_NOTIFY_RESUME 8 +#define TIF_SECCOMP 9 /* secure computing */ +#define TIF_SINGLESTEP 10 /* single stepping userspace */ + +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) +#define _TIF_MEMDIE (1 << TIF_MEMDIE) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) + +#endif diff --git a/arch/um/include/asm/timex.h b/arch/um/include/asm/timex.h new file mode 100644 index 0000000000..9f27176adb --- /dev/null +++ b/arch/um/include/asm/timex.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __UM_TIMEX_H +#define __UM_TIMEX_H + +#define CLOCK_TICK_RATE (HZ) + +#include <asm-generic/timex.h> + +#endif diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h new file mode 100644 index 0000000000..0422467bda --- /dev/null +++ b/arch/um/include/asm/tlb.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __UM_TLB_H +#define __UM_TLB_H + +#include <linux/mm.h> + +#include <asm/tlbflush.h> +#include <asm/cacheflush.h> +#include <asm-generic/tlb.h> + +#endif diff --git a/arch/um/include/asm/tlbflush.h b/arch/um/include/asm/tlbflush.h new file mode 100644 index 0000000000..a5bda89039 --- /dev/null +++ b/arch/um/include/asm/tlbflush.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) + */ + +#ifndef __UM_TLBFLUSH_H +#define __UM_TLBFLUSH_H + +#include <linux/mm.h> + +/* + * TLB flushing: + * + * - flush_tlb() flushes the current mm struct TLBs + * - flush_tlb_all() flushes all processes TLBs + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_kernel_vm() flushes the kernel vm area + * - flush_tlb_range(vma, start, end) flushes a range of pages + */ + +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address); +extern void flush_tlb_kernel_vm(void); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void __flush_tlb_one(unsigned long addr); + +#endif diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h new file mode 100644 index 0000000000..7d9d60e41e --- /dev/null +++ b/arch/um/include/asm/uaccess.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) + * Copyright (C) 2015 Richard Weinberger (richard@nod.at) + */ + +#ifndef __UM_UACCESS_H +#define __UM_UACCESS_H + +#include <asm/elf.h> +#include <asm/unaligned.h> + +#define __under_task_size(addr, size) \ + (((unsigned long) (addr) < TASK_SIZE) && \ + (((unsigned long) (addr) + (size)) < TASK_SIZE)) + +#define __access_ok_vsyscall(addr, size) \ + (((unsigned long) (addr) >= FIXADDR_USER_START) && \ + ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \ + ((unsigned long) (addr) + (size) >= (unsigned long)(addr))) + +#define __addr_range_nowrap(addr, size) \ + ((unsigned long) (addr) <= ((unsigned long) (addr) + (size))) + +extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n); +extern unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n); +extern unsigned long __clear_user(void __user *mem, unsigned long len); +static inline int __access_ok(const void __user *ptr, unsigned long size); + +/* Teach asm-generic/uaccess.h that we have C functions for these. */ +#define __access_ok __access_ok +#define __clear_user __clear_user + +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER + +#include <asm-generic/uaccess.h> + +static inline int __access_ok(const void __user *ptr, unsigned long size) +{ + unsigned long addr = (unsigned long)ptr; + return __addr_range_nowrap(addr, size) && + (__under_task_size(addr, size) || + __access_ok_vsyscall(addr, size)); +} + +/* no pagefaults for kernel addresses in um */ +#define __get_kernel_nofault(dst, src, type, err_label) \ +do { \ + *((type *)dst) = get_unaligned((type *)(src)); \ + if (0) /* make sure the label looks used to the compiler */ \ + goto err_label; \ +} while (0) + +#define __put_kernel_nofault(dst, src, type, err_label) \ +do { \ + put_unaligned(*((type *)src), (type *)(dst)); \ + if (0) /* make sure the label looks used to the compiler */ \ + goto err_label; \ +} while (0) + +#endif diff --git a/arch/um/include/asm/unwind.h b/arch/um/include/asm/unwind.h new file mode 100644 index 0000000000..7ffa5437b7 --- /dev/null +++ b/arch/um/include/asm/unwind.h @@ -0,0 +1,8 @@ +#ifndef _ASM_UML_UNWIND_H +#define _ASM_UML_UNWIND_H + +static inline void +unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, + void *orc, size_t orc_size) {} + +#endif /* _ASM_UML_UNWIND_H */ diff --git a/arch/um/include/asm/vmalloc.h b/arch/um/include/asm/vmalloc.h new file mode 100644 index 0000000000..9a7b9ed937 --- /dev/null +++ b/arch/um/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_UM_VMALLOC_H +#define _ASM_UM_VMALLOC_H + +#endif /* _ASM_UM_VMALLOC_H */ diff --git a/arch/um/include/asm/vmlinux.lds.h b/arch/um/include/asm/vmlinux.lds.h new file mode 100644 index 0000000000..149494ae78 --- /dev/null +++ b/arch/um/include/asm/vmlinux.lds.h @@ -0,0 +1,2 @@ +#include <asm/thread_info.h> +#include <asm-generic/vmlinux.lds.h> diff --git a/arch/um/include/asm/xor.h b/arch/um/include/asm/xor.h new file mode 100644 index 0000000000..647fae200c --- /dev/null +++ b/arch/um/include/asm/xor.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_UM_XOR_H +#define _ASM_UM_XOR_H + +#ifdef CONFIG_64BIT +#undef CONFIG_X86_32 +#define TT_CPU_INF_XOR_DEFAULT (AVX_SELECT(&xor_block_sse_pf64)) +#else +#define CONFIG_X86_32 1 +#define TT_CPU_INF_XOR_DEFAULT (AVX_SELECT(&xor_block_8regs)) +#endif + +#include <asm/cpufeature.h> +#include <../../x86/include/asm/xor.h> +#include <linux/time-internal.h> + +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT +#undef XOR_SELECT_TEMPLATE +/* pick an arbitrary one - measuring isn't possible with inf-cpu */ +#define XOR_SELECT_TEMPLATE(x) \ + (time_travel_mode == TT_MODE_INFCPU ? TT_CPU_INF_XOR_DEFAULT : x) +#endif + +#endif |