summaryrefslogtreecommitdiffstats
path: root/arch/um/include/asm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /arch/um/include/asm
parentInitial commit. (diff)
downloadlinux-430c2fc249ea5c0536abd21c23382884005c9093.tar.xz
linux-430c2fc249ea5c0536abd21c23382884005c9093.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/um/include/asm')
-rw-r--r--arch/um/include/asm/Kbuild28
-rw-r--r--arch/um/include/asm/asm-prototypes.h1
-rw-r--r--arch/um/include/asm/cache.h18
-rw-r--r--arch/um/include/asm/common.lds.S99
-rw-r--r--arch/um/include/asm/delay.h30
-rw-r--r--arch/um/include/asm/dma.h11
-rw-r--r--arch/um/include/asm/fixmap.h57
-rw-r--r--arch/um/include/asm/hardirq.h24
-rw-r--r--arch/um/include/asm/io.h18
-rw-r--r--arch/um/include/asm/irq.h37
-rw-r--r--arch/um/include/asm/irqflags.h38
-rw-r--r--arch/um/include/asm/kmap_types.h13
-rw-r--r--arch/um/include/asm/kvm_para.h1
-rw-r--r--arch/um/include/asm/mmu.h24
-rw-r--r--arch/um/include/asm/mmu_context.h78
-rw-r--r--arch/um/include/asm/page.h123
-rw-r--r--arch/um/include/asm/pgalloc.h46
-rw-r--r--arch/um/include/asm/pgtable-2level.h44
-rw-r--r--arch/um/include/asm/pgtable-3level.h109
-rw-r--r--arch/um/include/asm/pgtable.h326
-rw-r--r--arch/um/include/asm/processor-generic.h103
-rw-r--r--arch/um/include/asm/ptrace-generic.h46
-rw-r--r--arch/um/include/asm/sections.h10
-rw-r--r--arch/um/include/asm/setup.h11
-rw-r--r--arch/um/include/asm/smp.h7
-rw-r--r--arch/um/include/asm/stacktrace.h43
-rw-r--r--arch/um/include/asm/syscall-generic.h81
-rw-r--r--arch/um/include/asm/sysrq.h8
-rw-r--r--arch/um/include/asm/thread_info.h78
-rw-r--r--arch/um/include/asm/timex.h9
-rw-r--r--arch/um/include/asm/tlb.h11
-rw-r--r--arch/um/include/asm/tlbflush.h31
-rw-r--r--arch/um/include/asm/uaccess.h49
-rw-r--r--arch/um/include/asm/unwind.h8
-rw-r--r--arch/um/include/asm/vmalloc.h4
-rw-r--r--arch/um/include/asm/vmlinux.lds.h2
-rw-r--r--arch/um/include/asm/xor.h7
37 files changed, 1633 insertions, 0 deletions
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
new file mode 100644
index 000000000..1c63b260e
--- /dev/null
+++ b/arch/um/include/asm/Kbuild
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+generic-y += bpf_perf_event.h
+generic-y += bug.h
+generic-y += compat.h
+generic-y += current.h
+generic-y += device.h
+generic-y += emergency-restart.h
+generic-y += exec.h
+generic-y += extable.h
+generic-y += ftrace.h
+generic-y += futex.h
+generic-y += hw_irq.h
+generic-y += irq_regs.h
+generic-y += irq_work.h
+generic-y += kdebug.h
+generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
+generic-y += module.lds.h
+generic-y += param.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += preempt.h
+generic-y += switch_to.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += word-at-a-time.h
+generic-y += kprobes.h
diff --git a/arch/um/include/asm/asm-prototypes.h b/arch/um/include/asm/asm-prototypes.h
new file mode 100644
index 000000000..5898a26da
--- /dev/null
+++ b/arch/um/include/asm/asm-prototypes.h
@@ -0,0 +1 @@
+#include <asm-generic/asm-prototypes.h>
diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
new file mode 100644
index 000000000..5c1562734
--- /dev/null
+++ b/arch/um/include/asm/cache.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __UM_CACHE_H
+#define __UM_CACHE_H
+
+
+#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
+# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
+#elif defined(CONFIG_UML_X86) /* 64-bit */
+# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
+#else
+/* XXX: this was taken from x86, now it's completely random. Luckily only
+ * affects SMP padding. */
+# define L1_CACHE_SHIFT 5
+#endif
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#endif
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
new file mode 100644
index 000000000..eca6c452a
--- /dev/null
+++ b/arch/um/include/asm/common.lds.S
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm-generic/vmlinux.lds.h>
+
+ .fini : { *(.fini) } =0x9090
+ _etext = .;
+ PROVIDE (etext = .);
+
+ . = ALIGN(4096);
+ _sdata = .;
+ PROVIDE (sdata = .);
+
+ RO_DATA(4096)
+
+ .unprotected : { *(.unprotected) }
+ . = ALIGN(4096);
+ PROVIDE (_unprotected_end = .);
+
+ . = ALIGN(4096);
+ EXCEPTION_TABLE(0)
+
+ BUG_TABLE
+
+ .uml.setup.init : {
+ __uml_setup_start = .;
+ *(.uml.setup.init)
+ __uml_setup_end = .;
+ }
+
+ .uml.help.init : {
+ __uml_help_start = .;
+ *(.uml.help.init)
+ __uml_help_end = .;
+ }
+
+ .uml.postsetup.init : {
+ __uml_postsetup_start = .;
+ *(.uml.postsetup.init)
+ __uml_postsetup_end = .;
+ }
+
+ .init.setup : {
+ INIT_SETUP(0)
+ }
+
+ PERCPU_SECTION(32)
+
+ .initcall.init : {
+ INIT_CALLS
+ }
+
+ .con_initcall.init : {
+ CON_INITCALL
+ }
+
+ .exitcall : {
+ __exitcall_begin = .;
+ *(.exitcall.exit)
+ __exitcall_end = .;
+ }
+
+ .uml.exitcall : {
+ __uml_exitcall_begin = .;
+ *(.uml.exitcall.exit)
+ __uml_exitcall_end = .;
+ }
+
+ . = ALIGN(4);
+ .altinstructions : {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
+ .altinstr_replacement : { *(.altinstr_replacement) }
+ /* .exit.text is discard at runtime, not link time, to deal with references
+ from .altinstructions and .eh_frame */
+ .exit.text : { EXIT_TEXT }
+ .exit.data : { *(.exit.data) }
+
+ .preinit_array : {
+ __preinit_array_start = .;
+ *(.preinit_array)
+ __preinit_array_end = .;
+ }
+ .init_array : {
+ __init_array_start = .;
+ *(.init_array)
+ __init_array_end = .;
+ }
+ .fini_array : {
+ __fini_array_start = .;
+ *(.fini_array)
+ __fini_array_end = .;
+ }
+
+ . = ALIGN(4096);
+ .init.ramfs : {
+ INIT_RAM_FS
+ }
+
diff --git a/arch/um/include/asm/delay.h b/arch/um/include/asm/delay.h
new file mode 100644
index 000000000..e79b2ab6f
--- /dev/null
+++ b/arch/um/include/asm/delay.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __UM_DELAY_H
+#define __UM_DELAY_H
+#include <asm-generic/delay.h>
+#include <linux/time-internal.h>
+
+static inline void um_ndelay(unsigned long nsecs)
+{
+ if (time_travel_mode == TT_MODE_INFCPU ||
+ time_travel_mode == TT_MODE_EXTERNAL) {
+ time_travel_ndelay(nsecs);
+ return;
+ }
+ ndelay(nsecs);
+}
+#undef ndelay
+#define ndelay(n) um_ndelay(n)
+
+static inline void um_udelay(unsigned long usecs)
+{
+ if (time_travel_mode == TT_MODE_INFCPU ||
+ time_travel_mode == TT_MODE_EXTERNAL) {
+ time_travel_ndelay(1000 * usecs);
+ return;
+ }
+ udelay(usecs);
+}
+#undef udelay
+#define udelay(n) um_udelay(n)
+#endif /* __UM_DELAY_H */
diff --git a/arch/um/include/asm/dma.h b/arch/um/include/asm/dma.h
new file mode 100644
index 000000000..fdc53642c
--- /dev/null
+++ b/arch/um/include/asm/dma.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __UM_DMA_H
+#define __UM_DMA_H
+
+#include <asm/io.h>
+
+extern unsigned long uml_physmem;
+
+#define MAX_DMA_ADDRESS (uml_physmem)
+
+#endif
diff --git a/arch/um/include/asm/fixmap.h b/arch/um/include/asm/fixmap.h
new file mode 100644
index 000000000..2c697a145
--- /dev/null
+++ b/arch/um/include/asm/fixmap.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __UM_FIXMAP_H
+#define __UM_FIXMAP_H
+
+#include <asm/processor.h>
+#include <asm/kmap_types.h>
+#include <asm/archparam.h>
+#include <asm/page.h>
+#include <linux/threads.h>
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special addresses
+ * from the end of virtual memory (0xfffff000) backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * highger than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+
+/*
+ * on UP currently we will have no trace of the fixmap mechanizm,
+ * no page table allocations, etc. This might change in the
+ * future, say framebuffers for the console driver(s) could be
+ * fix-mapped?
+ */
+enum fixed_addresses {
+ __end_of_fixed_addresses
+};
+
+extern void __set_fixmap (enum fixed_addresses idx,
+ unsigned long phys, pgprot_t flags);
+
+/*
+ * used by vmalloc.c.
+ *
+ * Leave one empty page between vmalloc'ed areas and
+ * the start of the fixmap, and leave one page empty
+ * at the top of mem..
+ */
+
+#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+#include <asm-generic/fixmap.h>
+
+#endif
diff --git a/arch/um/include/asm/hardirq.h b/arch/um/include/asm/hardirq.h
new file mode 100644
index 000000000..b426796d2
--- /dev/null
+++ b/arch/um/include/asm/hardirq.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_UM_HARDIRQ_H
+#define __ASM_UM_HARDIRQ_H
+
+#include <linux/cache.h>
+#include <linux/threads.h>
+
+typedef struct {
+ unsigned int __softirq_pending;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+#include <linux/irq.h>
+
+#ifndef ack_bad_irq
+static inline void ack_bad_irq(unsigned int irq)
+{
+ printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
+}
+#endif
+
+#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
+
+#endif /* __ASM_UM_HARDIRQ_H */
diff --git a/arch/um/include/asm/io.h b/arch/um/include/asm/io.h
new file mode 100644
index 000000000..96f77b523
--- /dev/null
+++ b/arch/um/include/asm/io.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_UM_IO_H
+#define _ASM_UM_IO_H
+
+#define ioremap ioremap
+static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
+{
+ return (void __iomem *)(unsigned long)offset;
+}
+
+#define iounmap iounmap
+static inline void iounmap(void __iomem *addr)
+{
+}
+
+#include <asm-generic/io.h>
+
+#endif
diff --git a/arch/um/include/asm/irq.h b/arch/um/include/asm/irq.h
new file mode 100644
index 000000000..42c6205e2
--- /dev/null
+++ b/arch/um/include/asm/irq.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __UM_IRQ_H
+#define __UM_IRQ_H
+
+#define TIMER_IRQ 0
+#define UMN_IRQ 1
+#define CONSOLE_IRQ 2
+#define CONSOLE_WRITE_IRQ 3
+#define UBD_IRQ 4
+#define UM_ETH_IRQ 5
+#define SSL_IRQ 6
+#define SSL_WRITE_IRQ 7
+#define ACCEPT_IRQ 8
+#define MCONSOLE_IRQ 9
+#define WINCH_IRQ 10
+#define SIGIO_WRITE_IRQ 11
+#define TELNETD_IRQ 12
+#define XTERM_IRQ 13
+#define RANDOM_IRQ 14
+#define VIRTIO_IRQ 15
+
+#ifdef CONFIG_UML_NET_VECTOR
+
+#define VECTOR_BASE_IRQ (VIRTIO_IRQ + 1)
+#define VECTOR_IRQ_SPACE 8
+
+#define LAST_IRQ (VECTOR_IRQ_SPACE + VECTOR_BASE_IRQ - 1)
+
+#else
+
+#define LAST_IRQ VIRTIO_IRQ
+
+#endif
+
+#define NR_IRQS (LAST_IRQ + 1)
+
+#endif
diff --git a/arch/um/include/asm/irqflags.h b/arch/um/include/asm/irqflags.h
new file mode 100644
index 000000000..0642ad903
--- /dev/null
+++ b/arch/um/include/asm/irqflags.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __UM_IRQFLAGS_H
+#define __UM_IRQFLAGS_H
+
+extern int get_signals(void);
+extern int set_signals(int enable);
+extern void block_signals(void);
+extern void unblock_signals(void);
+
+#define arch_local_save_flags arch_local_save_flags
+static inline unsigned long arch_local_save_flags(void)
+{
+ return get_signals();
+}
+
+#define arch_local_irq_restore arch_local_irq_restore
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ set_signals(flags);
+}
+
+#define arch_local_irq_enable arch_local_irq_enable
+static inline void arch_local_irq_enable(void)
+{
+ unblock_signals();
+}
+
+#define arch_local_irq_disable arch_local_irq_disable
+static inline void arch_local_irq_disable(void)
+{
+ block_signals();
+}
+
+#define ARCH_IRQ_DISABLED 0
+
+#include <asm-generic/irqflags.h>
+
+#endif
diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
new file mode 100644
index 000000000..b0bd12de1
--- /dev/null
+++ b/arch/um/include/asm/kmap_types.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ */
+
+#ifndef __UM_KMAP_TYPES_H
+#define __UM_KMAP_TYPES_H
+
+/* No more #include "asm/arch/kmap_types.h" ! */
+
+#define KM_TYPE_NR 14
+
+#endif
diff --git a/arch/um/include/asm/kvm_para.h b/arch/um/include/asm/kvm_para.h
new file mode 100644
index 000000000..14fab8f0b
--- /dev/null
+++ b/arch/um/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/um/include/asm/mmu.h b/arch/um/include/asm/mmu.h
new file mode 100644
index 000000000..5b072aba5
--- /dev/null
+++ b/arch/um/include/asm/mmu.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ */
+
+#ifndef __ARCH_UM_MMU_H
+#define __ARCH_UM_MMU_H
+
+#include <mm_id.h>
+#include <asm/mm_context.h>
+
+typedef struct mm_context {
+ struct mm_id id;
+ struct uml_arch_mm_context arch;
+ struct page *stub_pages[2];
+} mm_context_t;
+
+extern void __switch_mm(struct mm_id * mm_idp);
+
+/* Avoid tangled inclusion with asm/ldt.h */
+extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
+extern void free_ldt(struct mm_context *mm);
+
+#endif
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
new file mode 100644
index 000000000..17ddd4edf
--- /dev/null
+++ b/arch/um/include/asm/mmu_context.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ */
+
+#ifndef __UM_MMU_CONTEXT_H
+#define __UM_MMU_CONTEXT_H
+
+#include <linux/sched.h>
+#include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
+
+#include <asm/mmu.h>
+
+extern void uml_setup_stubs(struct mm_struct *mm);
+/*
+ * Needed since we do not use the asm-generic/mm_hooks.h:
+ */
+static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
+{
+ uml_setup_stubs(mm);
+ return 0;
+}
+extern void arch_exit_mmap(struct mm_struct *mm);
+static inline void arch_unmap(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
+ bool write, bool execute, bool foreign)
+{
+ /* by default, allow everything */
+ return true;
+}
+
+/*
+ * end asm-generic/mm_hooks.h functions
+ */
+
+#define deactivate_mm(tsk,mm) do { } while (0)
+
+extern void force_flush_all(void);
+
+static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
+{
+ /*
+ * This is called by fs/exec.c and sys_unshare()
+ * when the new ->mm is used for the first time.
+ */
+ __switch_mm(&new->context.id);
+ mmap_write_lock_nested(new, SINGLE_DEPTH_NESTING);
+ uml_setup_stubs(new);
+ mmap_write_unlock(new);
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned cpu = smp_processor_id();
+
+ if(prev != next){
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+ if(next != &init_mm)
+ __switch_mm(&next->context.id);
+ }
+}
+
+static inline void enter_lazy_tlb(struct mm_struct *mm,
+ struct task_struct *tsk)
+{
+}
+
+extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
+
+extern void destroy_context(struct mm_struct *mm);
+
+#endif
diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
new file mode 100644
index 000000000..95af12e82
--- /dev/null
+++ b/arch/um/include/asm/page.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
+ * Copyright 2003 PathScale, Inc.
+ */
+
+#ifndef __UM_PAGE_H
+#define __UM_PAGE_H
+
+#include <linux/const.h>
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#ifndef __ASSEMBLY__
+
+struct page;
+
+#include <linux/pfn.h>
+#include <linux/types.h>
+#include <asm/vm-flags.h>
+
+/*
+ * These are used to make use of C type-checking..
+ */
+
+#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
+#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
+
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+#define pte_val(p) ((p).pte)
+
+#define pte_get_bits(p, bits) ((p).pte & (bits))
+#define pte_set_bits(p, bits) ((p).pte |= (bits))
+#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
+#define pte_copy(to, from) ({ (to).pte = (from).pte; })
+#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
+#define pte_set_val(p, phys, prot) \
+ ({ (p).pte = (phys) | pgprot_val(prot); })
+
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) } )
+
+typedef unsigned long long phys_t;
+
+#else
+
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pgd; } pgd_t;
+
+#ifdef CONFIG_3_LEVEL_PGTABLES
+typedef struct { unsigned long pmd; } pmd_t;
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) } )
+#endif
+
+#define pte_val(x) ((x).pte)
+
+
+#define pte_get_bits(p, bits) ((p).pte & (bits))
+#define pte_set_bits(p, bits) ((p).pte |= (bits))
+#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
+#define pte_copy(to, from) ((to).pte = (from).pte)
+#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
+#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
+
+typedef unsigned long phys_t;
+
+#endif
+
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+typedef struct page *pgtable_t;
+
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+extern unsigned long uml_physmem;
+
+#define PAGE_OFFSET (uml_physmem)
+#define KERNELBASE PAGE_OFFSET
+
+#define __va_space (8*1024*1024)
+
+#include <mem.h>
+
+/* Cast to unsigned long before casting to void * to avoid a warning from
+ * mmap_kmem about cutting a long long down to a void *. Not sure that
+ * casting is the right thing, but 32-bit UML can't have 64-bit virtual
+ * addresses
+ */
+#define __pa(virt) to_phys((void *) (unsigned long) (virt))
+#define __va(phys) to_virt((unsigned long) (phys))
+
+#define phys_to_pfn(p) ((p) >> PAGE_SHIFT)
+#define pfn_to_phys(pfn) PFN_PHYS(pfn)
+
+#define pfn_valid(pfn) ((pfn) < max_mapnr)
+#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_X86_32
+#define __HAVE_ARCH_GATE_AREA 1
+#endif
+
+#endif /* __UM_PAGE_H */
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
new file mode 100644
index 000000000..2bbf28cf3
--- /dev/null
+++ b/arch/um/include/asm/pgalloc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright 2003 PathScale, Inc.
+ * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
+ */
+
+#ifndef __UM_PGALLOC_H
+#define __UM_PGALLOC_H
+
+#include <linux/mm.h>
+
+#include <asm-generic/pgalloc.h>
+
+#define pmd_populate_kernel(mm, pmd, pte) \
+ set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
+
+#define pmd_populate(mm, pmd, pte) \
+ set_pmd(pmd, __pmd(_PAGE_TABLE + \
+ ((unsigned long long)page_to_pfn(pte) << \
+ (unsigned long long) PAGE_SHIFT)))
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+/*
+ * Allocate and free page tables.
+ */
+extern pgd_t *pgd_alloc(struct mm_struct *);
+
+#define __pte_free_tlb(tlb,pte, address) \
+do { \
+ pgtable_pte_page_dtor(pte); \
+ tlb_remove_page((tlb),(pte)); \
+} while (0)
+
+#ifdef CONFIG_3_LEVEL_PGTABLES
+
+#define __pmd_free_tlb(tlb, pmd, address) \
+do { \
+ pgtable_pmd_page_dtor(virt_to_page(pmd)); \
+ tlb_remove_page((tlb),virt_to_page(pmd)); \
+} while (0) \
+
+#endif
+
+#endif
+
diff --git a/arch/um/include/asm/pgtable-2level.h b/arch/um/include/asm/pgtable-2level.h
new file mode 100644
index 000000000..32106d31e
--- /dev/null
+++ b/arch/um/include/asm/pgtable-2level.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright 2003 PathScale, Inc.
+ * Derived from include/asm-i386/pgtable.h
+ */
+
+#ifndef __UM_PGTABLE_2LEVEL_H
+#define __UM_PGTABLE_2LEVEL_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+/* PGDIR_SHIFT determines what a third-level page table entry can map */
+
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * entries per page directory level: the i386 is two-level, so
+ * we don't really have any PMD directory physically.
+ */
+#define PTRS_PER_PTE 1024
+#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
+#define PTRS_PER_PGD 1024
+#define FIRST_USER_ADDRESS 0UL
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
+ pte_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
+ pgd_val(e))
+
+static inline int pgd_newpage(pgd_t pgd) { return 0; }
+static inline void pgd_mkuptodate(pgd_t pgd) { }
+
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
+
+#define pte_pfn(x) phys_to_pfn(pte_val(x))
+#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
+
+#endif
diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
new file mode 100644
index 000000000..091bff319
--- /dev/null
+++ b/arch/um/include/asm/pgtable-3level.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2003 PathScale Inc
+ * Derived from include/asm-i386/pgtable.h
+ */
+
+#ifndef __UM_PGTABLE_3LEVEL_H
+#define __UM_PGTABLE_3LEVEL_H
+
+#include <asm-generic/pgtable-nopud.h>
+
+/* PGDIR_SHIFT determines what a third-level page table entry can map */
+
+#ifdef CONFIG_64BIT
+#define PGDIR_SHIFT 30
+#else
+#define PGDIR_SHIFT 31
+#endif
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/* PMD_SHIFT determines the size of the area a second-level page table can
+ * map
+ */
+
+#define PMD_SHIFT 21
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+
+/*
+ * entries per page directory level
+ */
+
+#define PTRS_PER_PTE 512
+#ifdef CONFIG_64BIT
+#define PTRS_PER_PMD 512
+#define PTRS_PER_PGD 512
+#else
+#define PTRS_PER_PMD 1024
+#define PTRS_PER_PGD 1024
+#endif
+
+#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
+#define FIRST_USER_ADDRESS 0UL
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
+ pte_val(e))
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
+ pmd_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
+ pgd_val(e))
+
+#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
+#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
+#define pud_populate(mm, pud, pmd) \
+ set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
+
+#ifdef CONFIG_64BIT
+#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
+#else
+#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
+#endif
+
+static inline int pgd_newpage(pgd_t pgd)
+{
+ return(pgd_val(pgd) & _PAGE_NEWPAGE);
+}
+
+static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
+
+#ifdef CONFIG_64BIT
+#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
+#else
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
+#endif
+
+static inline void pud_clear (pud_t *pud)
+{
+ set_pud(pud, __pud(_PAGE_NEWPAGE));
+}
+
+#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
+#define pud_pgtable(pud) ((pmd_t *) __va(pud_val(pud) & PAGE_MASK))
+
+static inline unsigned long pte_pfn(pte_t pte)
+{
+ return phys_to_pfn(pte_val(pte));
+}
+
+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
+{
+ pte_t pte;
+ phys_t phys = pfn_to_phys(page_nr);
+
+ pte_set_val(pte, phys, pgprot);
+ return pte;
+}
+
+static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
+{
+ return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
+}
+
+#endif
+
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
new file mode 100644
index 000000000..def376194
--- /dev/null
+++ b/arch/um/include/asm/pgtable.h
@@ -0,0 +1,326 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Copyright 2003 PathScale, Inc.
+ * Derived from include/asm-i386/pgtable.h
+ */
+
+#ifndef __UM_PGTABLE_H
+#define __UM_PGTABLE_H
+
+#include <asm/fixmap.h>
+
+#define _PAGE_PRESENT 0x001
+#define _PAGE_NEWPAGE 0x002
+#define _PAGE_NEWPROT 0x004
+#define _PAGE_RW 0x020
+#define _PAGE_USER 0x040
+#define _PAGE_ACCESSED 0x080
+#define _PAGE_DIRTY 0x100
+/* If _PAGE_PRESENT is clear, we use these: */
+#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
+ pte_present gives true */
+
+#ifdef CONFIG_3_LEVEL_PGTABLES
+#include <asm/pgtable-3level.h>
+#else
+#include <asm/pgtable-2level.h>
+#endif
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+/* zero page used for uninitialized stuff */
+extern unsigned long *empty_zero_page;
+
+/* Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
+
+extern unsigned long end_iomem;
+
+#define VMALLOC_OFFSET (__va_space)
+#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
+#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
+#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
+#define MODULES_VADDR VMALLOC_START
+#define MODULES_END VMALLOC_END
+#define MODULES_LEN (MODULES_VADDR - MODULES_END)
+
+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define __PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
+
+/*
+ * The i386 can't do page protection for execute, and considers that the same
+ * are read.
+ * Also, write permissions imply read permissions. This is the closest we can
+ * get..
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY
+#define __P110 PAGE_COPY
+#define __P111 PAGE_COPY
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY
+#define __S110 PAGE_SHARED
+#define __S111 PAGE_SHARED
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
+
+#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
+
+#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
+#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+
+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
+
+#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
+#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
+
+#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
+#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
+
+#define p4d_newpage(x) (p4d_val(x) & _PAGE_NEWPAGE)
+#define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEWPAGE)
+
+#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
+
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
+
+/*
+ * =================================
+ * Flags checking section.
+ * =================================
+ */
+
+static inline int pte_none(pte_t pte)
+{
+ return pte_is_zero(pte);
+}
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_read(pte_t pte)
+{
+ return((pte_get_bits(pte, _PAGE_USER)) &&
+ !(pte_get_bits(pte, _PAGE_PROTNONE)));
+}
+
+static inline int pte_exec(pte_t pte){
+ return((pte_get_bits(pte, _PAGE_USER)) &&
+ !(pte_get_bits(pte, _PAGE_PROTNONE)));
+}
+
+static inline int pte_write(pte_t pte)
+{
+ return((pte_get_bits(pte, _PAGE_RW)) &&
+ !(pte_get_bits(pte, _PAGE_PROTNONE)));
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+ return pte_get_bits(pte, _PAGE_DIRTY);
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return pte_get_bits(pte, _PAGE_ACCESSED);
+}
+
+static inline int pte_newpage(pte_t pte)
+{
+ return pte_get_bits(pte, _PAGE_NEWPAGE);
+}
+
+static inline int pte_newprot(pte_t pte)
+{
+ return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
+}
+
+/*
+ * =================================
+ * Flags setting section.
+ * =================================
+ */
+
+static inline pte_t pte_mknewprot(pte_t pte)
+{
+ pte_set_bits(pte, _PAGE_NEWPROT);
+ return(pte);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte_clear_bits(pte, _PAGE_DIRTY);
+ return(pte);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte_clear_bits(pte, _PAGE_ACCESSED);
+ return(pte);
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ if (likely(pte_get_bits(pte, _PAGE_RW)))
+ pte_clear_bits(pte, _PAGE_RW);
+ else
+ return pte;
+ return(pte_mknewprot(pte));
+}
+
+static inline pte_t pte_mkread(pte_t pte)
+{
+ if (unlikely(pte_get_bits(pte, _PAGE_USER)))
+ return pte;
+ pte_set_bits(pte, _PAGE_USER);
+ return(pte_mknewprot(pte));
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte_set_bits(pte, _PAGE_DIRTY);
+ return(pte);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte_set_bits(pte, _PAGE_ACCESSED);
+ return(pte);
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ if (unlikely(pte_get_bits(pte, _PAGE_RW)))
+ return pte;
+ pte_set_bits(pte, _PAGE_RW);
+ return(pte_mknewprot(pte));
+}
+
+static inline pte_t pte_mkuptodate(pte_t pte)
+{
+ pte_clear_bits(pte, _PAGE_NEWPAGE);
+ if(pte_present(pte))
+ pte_clear_bits(pte, _PAGE_NEWPROT);
+ return(pte);
+}
+
+static inline pte_t pte_mknewpage(pte_t pte)
+{
+ pte_set_bits(pte, _PAGE_NEWPAGE);
+ return(pte);
+}
+
+static inline void set_pte(pte_t *pteptr, pte_t pteval)
+{
+ pte_copy(*pteptr, pteval);
+
+ /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
+ * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
+ * mapped pages.
+ */
+
+ *pteptr = pte_mknewpage(*pteptr);
+ if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
+}
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *pteptr, pte_t pteval)
+{
+ set_pte(pteptr, pteval);
+}
+
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
+#define __virt_to_page(virt) phys_to_page(__pa(virt))
+#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
+#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
+
+#define mk_pte(page, pgprot) \
+ ({ pte_t pte; \
+ \
+ pte_set_val(pte, page_to_phys(page), (pgprot)); \
+ if (pte_present(pte)) \
+ pte_mknewprot(pte_mknewpage(pte)); \
+ pte;})
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
+ return pte;
+}
+
+/*
+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
+ *
+ * this macro returns the index of the entry in the pmd page which would
+ * control the given virtual address
+ */
+#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+struct mm_struct;
+extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
+
+#define update_mmu_cache(vma,address,ptep) do ; while (0)
+
+/* Encode and de-code a swap entry */
+#define __swp_type(x) (((x).val >> 5) & 0x1f)
+#define __swp_offset(x) ((x).val >> 11)
+
+#define __swp_entry(type, offset) \
+ ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
+#define __pte_to_swp_entry(pte) \
+ ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#define kern_addr_valid(addr) (1)
+
+/* Clear a kernel PTE and flush it from the TLB */
+#define kpte_clear_flush(ptep, vaddr) \
+do { \
+ pte_clear(&init_mm, (vaddr), (ptep)); \
+ __flush_tlb_one((vaddr)); \
+} while (0)
+
+#endif
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
new file mode 100644
index 000000000..afd9b267c
--- /dev/null
+++ b/arch/um/include/asm/processor-generic.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ */
+
+#ifndef __UM_PROCESSOR_GENERIC_H
+#define __UM_PROCESSOR_GENERIC_H
+
+struct pt_regs;
+
+struct task_struct;
+
+#include <asm/ptrace.h>
+#include <registers.h>
+#include <sysdep/archsetjmp.h>
+
+#include <linux/prefetch.h>
+
+struct mm_struct;
+
+struct thread_struct {
+ struct pt_regs regs;
+ struct pt_regs *segv_regs;
+ int singlestep_syscall;
+ void *fault_addr;
+ jmp_buf *fault_catcher;
+ struct task_struct *prev_sched;
+ struct arch_thread arch;
+ jmp_buf switch_buf;
+ struct {
+ int op;
+ union {
+ struct {
+ int pid;
+ } fork, exec;
+ struct {
+ int (*proc)(void *);
+ void *arg;
+ } thread;
+ struct {
+ void (*proc)(void *);
+ void *arg;
+ } cb;
+ } u;
+ } request;
+};
+
+#define INIT_THREAD \
+{ \
+ .regs = EMPTY_REGS, \
+ .fault_addr = NULL, \
+ .prev_sched = NULL, \
+ .arch = INIT_ARCH_THREAD, \
+ .request = { 0 } \
+}
+
+static inline void release_thread(struct task_struct *task)
+{
+}
+
+static inline void mm_copy_segments(struct mm_struct *from_mm,
+ struct mm_struct *new_mm)
+{
+}
+
+/*
+ * User space process size: 3GB (default).
+ */
+extern unsigned long task_size;
+
+#define TASK_SIZE (task_size)
+
+#undef STACK_TOP
+#undef STACK_TOP_MAX
+
+extern unsigned long stacksizelim;
+
+#define STACK_ROOM (stacksizelim)
+#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
+#define STACK_TOP_MAX STACK_TOP
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (0x40000000)
+
+extern void start_thread(struct pt_regs *regs, unsigned long entry,
+ unsigned long stack);
+
+struct cpuinfo_um {
+ unsigned long loops_per_jiffy;
+ int ipi_pipe[2];
+};
+
+extern struct cpuinfo_um boot_cpu_data;
+
+#define cpu_data (&boot_cpu_data)
+#define current_cpu_data boot_cpu_data
+
+#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
+extern unsigned long get_wchan(struct task_struct *p);
+
+#endif
diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h
new file mode 100644
index 000000000..adf91ef55
--- /dev/null
+++ b/arch/um/include/asm/ptrace-generic.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ */
+
+#ifndef __UM_PTRACE_GENERIC_H
+#define __UM_PTRACE_GENERIC_H
+
+#ifndef __ASSEMBLY__
+
+#include <sysdep/ptrace.h>
+
+struct pt_regs {
+ struct uml_pt_regs regs;
+};
+
+#define arch_has_single_step() (1)
+
+#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
+
+#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
+#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
+
+#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
+
+#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
+
+#define instruction_pointer(regs) PT_REGS_IP(regs)
+
+#define PTRACE_OLDSETOPTIONS 21
+
+struct task_struct;
+
+extern long subarch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data);
+extern unsigned long getreg(struct task_struct *child, int regno);
+extern int putreg(struct task_struct *child, int regno, unsigned long value);
+
+extern int arch_set_tls(struct task_struct *new, unsigned long tls);
+extern void clear_flushed_tls(struct task_struct *task);
+extern int syscall_trace_enter(struct pt_regs *regs);
+extern void syscall_trace_leave(struct pt_regs *regs);
+
+#endif
+
+#endif
diff --git a/arch/um/include/asm/sections.h b/arch/um/include/asm/sections.h
new file mode 100644
index 000000000..a3c1fb6ed
--- /dev/null
+++ b/arch/um/include/asm/sections.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __UM_SECTIONS_H
+#define __UM_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char __binary_start[];
+extern char __syscall_stub_start[], __syscall_stub_end[];
+
+#endif
diff --git a/arch/um/include/asm/setup.h b/arch/um/include/asm/setup.h
new file mode 100644
index 000000000..80ada899f
--- /dev/null
+++ b/arch/um/include/asm/setup.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef SETUP_H_INCLUDED
+#define SETUP_H_INCLUDED
+
+/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
+ * command line, so this choice is ok.
+ */
+
+#define COMMAND_LINE_SIZE 4096
+
+#endif /* SETUP_H_INCLUDED */
diff --git a/arch/um/include/asm/smp.h b/arch/um/include/asm/smp.h
new file mode 100644
index 000000000..a8cc1d46d
--- /dev/null
+++ b/arch/um/include/asm/smp.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __UM_SMP_H
+#define __UM_SMP_H
+
+#define hard_smp_processor_id() 0
+
+#endif
diff --git a/arch/um/include/asm/stacktrace.h b/arch/um/include/asm/stacktrace.h
new file mode 100644
index 000000000..436b55952
--- /dev/null
+++ b/arch/um/include/asm/stacktrace.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_UML_STACKTRACE_H
+#define _ASM_UML_STACKTRACE_H
+
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+
+struct stack_frame {
+ struct stack_frame *next_frame;
+ unsigned long return_address;
+};
+
+struct stacktrace_ops {
+ void (*address)(void *data, unsigned long address, int reliable);
+};
+
+#ifdef CONFIG_FRAME_POINTER
+static inline unsigned long
+get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
+{
+ if (!task || task == current)
+ return segv_regs ? PT_REGS_BP(segv_regs) : current_bp();
+ return KSTK_EBP(task);
+}
+#else
+static inline unsigned long
+get_frame_pointer(struct task_struct *task, struct pt_regs *segv_regs)
+{
+ return 0;
+}
+#endif
+
+static inline unsigned long
+*get_stack_pointer(struct task_struct *task, struct pt_regs *segv_regs)
+{
+ if (!task || task == current)
+ return segv_regs ? (unsigned long *)PT_REGS_SP(segv_regs) : current_sp();
+ return (unsigned long *)KSTK_ESP(task);
+}
+
+void dump_trace(struct task_struct *tsk, const struct stacktrace_ops *ops, void *data);
+
+#endif /* _ASM_UML_STACKTRACE_H */
diff --git a/arch/um/include/asm/syscall-generic.h b/arch/um/include/asm/syscall-generic.h
new file mode 100644
index 000000000..2984feb9d
--- /dev/null
+++ b/arch/um/include/asm/syscall-generic.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Access to user system call parameters and results
+ *
+ * See asm-generic/syscall.h for function descriptions.
+ *
+ * Copyright (C) 2015 Mickaël Salaün <mic@digikod.net>
+ */
+
+#ifndef __UM_SYSCALL_GENERIC_H
+#define __UM_SYSCALL_GENERIC_H
+
+#include <asm/ptrace.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <sysdep/ptrace.h>
+
+static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+
+ return PT_REGS_SYSCALL_NR(regs);
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ /* do nothing */
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ const long error = regs_return_value(regs);
+
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs_return_value(regs);
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ PT_REGS_SET_SYSCALL_RETURN(regs, (long) error ?: val);
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ const struct uml_pt_regs *r = &regs->regs;
+
+ *args++ = UPT_SYSCALL_ARG1(r);
+ *args++ = UPT_SYSCALL_ARG2(r);
+ *args++ = UPT_SYSCALL_ARG3(r);
+ *args++ = UPT_SYSCALL_ARG4(r);
+ *args++ = UPT_SYSCALL_ARG5(r);
+ *args = UPT_SYSCALL_ARG6(r);
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ const unsigned long *args)
+{
+ struct uml_pt_regs *r = &regs->regs;
+
+ UPT_SYSCALL_ARG1(r) = *args++;
+ UPT_SYSCALL_ARG2(r) = *args++;
+ UPT_SYSCALL_ARG3(r) = *args++;
+ UPT_SYSCALL_ARG4(r) = *args++;
+ UPT_SYSCALL_ARG5(r) = *args++;
+ UPT_SYSCALL_ARG6(r) = *args;
+}
+
+/* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */
+
+#endif /* __UM_SYSCALL_GENERIC_H */
diff --git a/arch/um/include/asm/sysrq.h b/arch/um/include/asm/sysrq.h
new file mode 100644
index 000000000..8fc8c65cd
--- /dev/null
+++ b/arch/um/include/asm/sysrq.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __UM_SYSRQ_H
+#define __UM_SYSRQ_H
+
+struct task_struct;
+extern void show_trace(struct task_struct* task, unsigned long *stack);
+
+#endif
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
new file mode 100644
index 000000000..e610e932c
--- /dev/null
+++ b/arch/um/include/asm/thread_info.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ */
+
+#ifndef __UM_THREAD_INFO_H
+#define __UM_THREAD_INFO_H
+
+#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
+#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/types.h>
+#include <asm/page.h>
+#include <asm/segment.h>
+#include <sysdep/ptrace_user.h>
+
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ unsigned long flags; /* low level flags */
+ __u32 cpu; /* current CPU */
+ int preempt_count; /* 0 => preemptable,
+ <0 => BUG */
+ mm_segment_t addr_limit; /* thread address space:
+ 0-0xBFFFFFFF for user
+ 0-0xFFFFFFFF for kernel */
+ struct thread_info *real_thread; /* Points to non-IRQ stack */
+ unsigned long aux_fp_regs[FP_SIZE]; /* auxiliary fp_regs to save/restore
+ them out-of-band */
+};
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+ .real_thread = NULL, \
+}
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+ struct thread_info *ti;
+ unsigned long mask = THREAD_SIZE - 1;
+ void *p;
+
+ asm volatile ("" : "=r" (p) : "0" (&ti));
+ ti = (struct thread_info *) (((unsigned long)p) & ~mask);
+ return ti;
+}
+
+#endif
+
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_SIGPENDING 1 /* signal pending */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_NOTIFY_SIGNAL 3 /* signal notifications exist */
+#define TIF_RESTART_BLOCK 4
+#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
+#define TIF_SYSCALL_AUDIT 6
+#define TIF_RESTORE_SIGMASK 7
+#define TIF_NOTIFY_RESUME 8
+#define TIF_SECCOMP 9 /* secure computing */
+#define TIF_SINGLESTEP 10 /* single stepping userspace */
+
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
+#define _TIF_MEMDIE (1 << TIF_MEMDIE)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+
+#endif
diff --git a/arch/um/include/asm/timex.h b/arch/um/include/asm/timex.h
new file mode 100644
index 000000000..9f27176ad
--- /dev/null
+++ b/arch/um/include/asm/timex.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __UM_TIMEX_H
+#define __UM_TIMEX_H
+
+#define CLOCK_TICK_RATE (HZ)
+
+#include <asm-generic/timex.h>
+
+#endif
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
new file mode 100644
index 000000000..ff9c62828
--- /dev/null
+++ b/arch/um/include/asm/tlb.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __UM_TLB_H
+#define __UM_TLB_H
+
+#include <linux/mm.h>
+
+#include <asm/tlbflush.h>
+#include <asm-generic/cacheflush.h>
+#include <asm-generic/tlb.h>
+
+#endif
diff --git a/arch/um/include/asm/tlbflush.h b/arch/um/include/asm/tlbflush.h
new file mode 100644
index 000000000..a5bda8903
--- /dev/null
+++ b/arch/um/include/asm/tlbflush.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ */
+
+#ifndef __UM_TLBFLUSH_H
+#define __UM_TLBFLUSH_H
+
+#include <linux/mm.h>
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_kernel_vm() flushes the kernel vm area
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ */
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
+extern void flush_tlb_kernel_vm(void);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void __flush_tlb_one(unsigned long addr);
+
+#endif
diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h
new file mode 100644
index 000000000..fe66d659a
--- /dev/null
+++ b/arch/um/include/asm/uaccess.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2015 Richard Weinberger (richard@nod.at)
+ */
+
+#ifndef __UM_UACCESS_H
+#define __UM_UACCESS_H
+
+#include <asm/elf.h>
+
+#define __under_task_size(addr, size) \
+ (((unsigned long) (addr) < TASK_SIZE) && \
+ (((unsigned long) (addr) + (size)) < TASK_SIZE))
+
+#define __access_ok_vsyscall(addr, size) \
+ (((unsigned long) (addr) >= FIXADDR_USER_START) && \
+ ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
+ ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
+
+#define __addr_range_nowrap(addr, size) \
+ ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
+
+extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n);
+extern unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n);
+extern long __strncpy_from_user(char *dst, const char __user *src, long count);
+extern long __strnlen_user(const void __user *str, long len);
+extern unsigned long __clear_user(void __user *mem, unsigned long len);
+static inline int __access_ok(unsigned long addr, unsigned long size);
+
+/* Teach asm-generic/uaccess.h that we have C functions for these. */
+#define __access_ok __access_ok
+#define __clear_user __clear_user
+#define __strnlen_user __strnlen_user
+#define __strncpy_from_user __strncpy_from_user
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+
+#include <asm-generic/uaccess.h>
+
+static inline int __access_ok(unsigned long addr, unsigned long size)
+{
+ return __addr_range_nowrap(addr, size) &&
+ (__under_task_size(addr, size) ||
+ __access_ok_vsyscall(addr, size) ||
+ uaccess_kernel());
+}
+
+#endif
diff --git a/arch/um/include/asm/unwind.h b/arch/um/include/asm/unwind.h
new file mode 100644
index 000000000..7ffa5437b
--- /dev/null
+++ b/arch/um/include/asm/unwind.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_UML_UNWIND_H
+#define _ASM_UML_UNWIND_H
+
+static inline void
+unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size,
+ void *orc, size_t orc_size) {}
+
+#endif /* _ASM_UML_UNWIND_H */
diff --git a/arch/um/include/asm/vmalloc.h b/arch/um/include/asm/vmalloc.h
new file mode 100644
index 000000000..9a7b9ed93
--- /dev/null
+++ b/arch/um/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_UM_VMALLOC_H
+#define _ASM_UM_VMALLOC_H
+
+#endif /* _ASM_UM_VMALLOC_H */
diff --git a/arch/um/include/asm/vmlinux.lds.h b/arch/um/include/asm/vmlinux.lds.h
new file mode 100644
index 000000000..149494ae7
--- /dev/null
+++ b/arch/um/include/asm/vmlinux.lds.h
@@ -0,0 +1,2 @@
+#include <asm/thread_info.h>
+#include <asm-generic/vmlinux.lds.h>
diff --git a/arch/um/include/asm/xor.h b/arch/um/include/asm/xor.h
new file mode 100644
index 000000000..36b33d62a
--- /dev/null
+++ b/arch/um/include/asm/xor.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm-generic/xor.h>
+#include <linux/time-internal.h>
+
+/* pick an arbitrary one - measuring isn't possible with inf-cpu */
+#define XOR_SELECT_TEMPLATE(x) \
+ (time_travel_mode == TT_MODE_INFCPU ? &xor_block_8regs : NULL)