summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/Kbuild65
-rw-r--r--include/asm-generic/access_ok.h48
-rw-r--r--include/asm-generic/archrandom.h15
-rw-r--r--include/asm-generic/asm-offsets.h1
-rw-r--r--include/asm-generic/asm-prototypes.h14
-rw-r--r--include/asm-generic/atomic.h136
-rw-r--r--include/asm-generic/atomic64.h75
-rw-r--r--include/asm-generic/audit_change_attr.h33
-rw-r--r--include/asm-generic/audit_dir_write.h38
-rw-r--r--include/asm-generic/audit_read.h14
-rw-r--r--include/asm-generic/audit_signal.h3
-rw-r--r--include/asm-generic/audit_write.h25
-rw-r--r--include/asm-generic/barrier.h300
-rw-r--r--include/asm-generic/bitops.h38
-rw-r--r--include/asm-generic/bitops/__ffs.h44
-rw-r--r--include/asm-generic/bitops/__fls.h44
-rw-r--r--include/asm-generic/bitops/arch_hweight.h26
-rw-r--r--include/asm-generic/bitops/atomic.h70
-rw-r--r--include/asm-generic/bitops/builtin-__ffs.h16
-rw-r--r--include/asm-generic/bitops/builtin-__fls.h16
-rw-r--r--include/asm-generic/bitops/builtin-ffs.h15
-rw-r--r--include/asm-generic/bitops/builtin-fls.h17
-rw-r--r--include/asm-generic/bitops/const_hweight.h44
-rw-r--r--include/asm-generic/bitops/ext2-atomic-setbit.h12
-rw-r--r--include/asm-generic/bitops/ext2-atomic.h27
-rw-r--r--include/asm-generic/bitops/ffs.h42
-rw-r--r--include/asm-generic/bitops/ffz.h13
-rw-r--r--include/asm-generic/bitops/fls.h42
-rw-r--r--include/asm-generic/bitops/fls64.h37
-rw-r--r--include/asm-generic/bitops/generic-non-atomic.h175
-rw-r--r--include/asm-generic/bitops/hweight.h8
-rw-r--r--include/asm-generic/bitops/instrumented-atomic.h103
-rw-r--r--include/asm-generic/bitops/instrumented-lock.h84
-rw-r--r--include/asm-generic/bitops/instrumented-non-atomic.h157
-rw-r--r--include/asm-generic/bitops/le.h64
-rw-r--r--include/asm-generic/bitops/lock.h94
-rw-r--r--include/asm-generic/bitops/non-atomic.h20
-rw-r--r--include/asm-generic/bitops/non-instrumented-non-atomic.h17
-rw-r--r--include/asm-generic/bitops/sched.h32
-rw-r--r--include/asm-generic/bitsperlong.h38
-rw-r--r--include/asm-generic/bug.h225
-rw-r--r--include/asm-generic/cache.h13
-rw-r--r--include/asm-generic/cacheflush.h128
-rw-r--r--include/asm-generic/checksum.h65
-rw-r--r--include/asm-generic/cmpxchg-local.h68
-rw-r--r--include/asm-generic/cmpxchg.h115
-rw-r--r--include/asm-generic/compat.h168
-rw-r--r--include/asm-generic/current.h10
-rw-r--r--include/asm-generic/delay.h45
-rw-r--r--include/asm-generic/device.h14
-rw-r--r--include/asm-generic/div64.h249
-rw-r--r--include/asm-generic/dma-mapping.h10
-rw-r--r--include/asm-generic/dma.h16
-rw-r--r--include/asm-generic/early_ioremap.h47
-rw-r--r--include/asm-generic/emergency-restart.h10
-rw-r--r--include/asm-generic/error-injection.h42
-rw-r--r--include/asm-generic/exec.h15
-rw-r--r--include/asm-generic/export.h88
-rw-r--r--include/asm-generic/extable.h27
-rw-r--r--include/asm-generic/fb.h13
-rw-r--r--include/asm-generic/fixmap.h104
-rw-r--r--include/asm-generic/flat.h26
-rw-r--r--include/asm-generic/ftrace.h13
-rw-r--r--include/asm-generic/futex.h121
-rw-r--r--include/asm-generic/getorder.h52
-rw-r--r--include/asm-generic/gpio.h172
-rw-r--r--include/asm-generic/hardirq.h26
-rw-r--r--include/asm-generic/hugetlb.h160
-rw-r--r--include/asm-generic/hw_irq.h9
-rw-r--r--include/asm-generic/hyperv-tlfs.h793
-rw-r--r--include/asm-generic/ide_iops.h39
-rw-r--r--include/asm-generic/int-ll64.h47
-rw-r--r--include/asm-generic/io.h1228
-rw-r--r--include/asm-generic/ioctl.h18
-rw-r--r--include/asm-generic/iomap.h115
-rw-r--r--include/asm-generic/irq.h19
-rw-r--r--include/asm-generic/irq_regs.h33
-rw-r--r--include/asm-generic/irq_work.h11
-rw-r--r--include/asm-generic/irqflags.h67
-rw-r--r--include/asm-generic/kdebug.h10
-rw-r--r--include/asm-generic/kmap_size.h12
-rw-r--r--include/asm-generic/kprobes.h26
-rw-r--r--include/asm-generic/kvm_para.h32
-rw-r--r--include/asm-generic/kvm_types.h5
-rw-r--r--include/asm-generic/linkage.h8
-rw-r--r--include/asm-generic/local.h56
-rw-r--r--include/asm-generic/local64.h97
-rw-r--r--include/asm-generic/logic_io.h78
-rw-r--r--include/asm-generic/mcs_spinlock.h13
-rw-r--r--include/asm-generic/memory_model.h57
-rw-r--r--include/asm-generic/mm_hooks.h31
-rw-r--r--include/asm-generic/mmiowb.h65
-rw-r--r--include/asm-generic/mmiowb_types.h12
-rw-r--r--include/asm-generic/mmu.h20
-rw-r--r--include/asm-generic/mmu_context.h76
-rw-r--r--include/asm-generic/module.h49
-rw-r--r--include/asm-generic/module.lds.h10
-rw-r--r--include/asm-generic/mshyperv.h283
-rw-r--r--include/asm-generic/msi.h41
-rw-r--r--include/asm-generic/nommu_context.h19
-rw-r--r--include/asm-generic/numa.h52
-rw-r--r--include/asm-generic/page.h97
-rw-r--r--include/asm-generic/param.h11
-rw-r--r--include/asm-generic/parport.h24
-rw-r--r--include/asm-generic/pci.h30
-rw-r--r--include/asm-generic/pci_iomap.h60
-rw-r--r--include/asm-generic/percpu.h444
-rw-r--r--include/asm-generic/pgalloc.h199
-rw-r--r--include/asm-generic/pgtable-nop4d.h58
-rw-r--r--include/asm-generic/pgtable-nopmd.h73
-rw-r--r--include/asm-generic/pgtable-nopud.h66
-rw-r--r--include/asm-generic/pgtable_uffd.h66
-rw-r--r--include/asm-generic/preempt.h88
-rw-r--r--include/asm-generic/qrwlock.h147
-rw-r--r--include/asm-generic/qrwlock_types.h34
-rw-r--r--include/asm-generic/qspinlock.h150
-rw-r--r--include/asm-generic/qspinlock_types.h95
-rw-r--r--include/asm-generic/resource.h31
-rw-r--r--include/asm-generic/rwonce.h90
-rw-r--r--include/asm-generic/seccomp.h43
-rw-r--r--include/asm-generic/sections.h230
-rw-r--r--include/asm-generic/serial.h14
-rw-r--r--include/asm-generic/set_memory.h13
-rw-r--r--include/asm-generic/shmparam.h7
-rw-r--r--include/asm-generic/signal.h13
-rw-r--r--include/asm-generic/simd.h15
-rw-r--r--include/asm-generic/softirq_stack.h14
-rw-r--r--include/asm-generic/spinlock.h92
-rw-r--r--include/asm-generic/spinlock_types.h17
-rw-r--r--include/asm-generic/statfs.h8
-rw-r--r--include/asm-generic/string.h10
-rw-r--r--include/asm-generic/switch_to.h26
-rw-r--r--include/asm-generic/syscall.h134
-rw-r--r--include/asm-generic/syscalls.h29
-rw-r--r--include/asm-generic/timex.h23
-rw-r--r--include/asm-generic/tlb.h685
-rw-r--r--include/asm-generic/tlbflush.h21
-rw-r--r--include/asm-generic/topology.h77
-rw-r--r--include/asm-generic/trace_clock.h17
-rw-r--r--include/asm-generic/uaccess.h235
-rw-r--r--include/asm-generic/unaligned.h155
-rw-r--r--include/asm-generic/user.h8
-rw-r--r--include/asm-generic/vdso/vsyscall.h29
-rw-r--r--include/asm-generic/vermagic.h7
-rw-r--r--include/asm-generic/vga.h25
-rw-r--r--include/asm-generic/vmlinux.lds.h1192
-rw-r--r--include/asm-generic/vtime.h1
-rw-r--r--include/asm-generic/word-at-a-time.h121
-rw-r--r--include/asm-generic/xor.h738
149 files changed, 13297 insertions, 0 deletions
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
new file mode 100644
index 000000000..941be574b
--- /dev/null
+++ b/include/asm-generic/Kbuild
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# asm headers that all architectures except um should have
+# (This file is not included when SRCARCH=um since UML borrows several
+# asm headers from the host architecutre.)
+
+mandatory-y += atomic.h
+mandatory-y += archrandom.h
+mandatory-y += barrier.h
+mandatory-y += bitops.h
+mandatory-y += bug.h
+mandatory-y += bugs.h
+mandatory-y += cacheflush.h
+mandatory-y += checksum.h
+mandatory-y += compat.h
+mandatory-y += current.h
+mandatory-y += delay.h
+mandatory-y += device.h
+mandatory-y += div64.h
+mandatory-y += dma-mapping.h
+mandatory-y += dma.h
+mandatory-y += emergency-restart.h
+mandatory-y += exec.h
+mandatory-y += fb.h
+mandatory-y += ftrace.h
+mandatory-y += futex.h
+mandatory-y += hardirq.h
+mandatory-y += hw_irq.h
+mandatory-y += io.h
+mandatory-y += irq.h
+mandatory-y += irq_regs.h
+mandatory-y += irq_work.h
+mandatory-y += kdebug.h
+mandatory-y += kmap_size.h
+mandatory-y += kprobes.h
+mandatory-y += linkage.h
+mandatory-y += local.h
+mandatory-y += local64.h
+mandatory-y += mmiowb.h
+mandatory-y += mmu.h
+mandatory-y += mmu_context.h
+mandatory-y += module.h
+mandatory-y += module.lds.h
+mandatory-y += msi.h
+mandatory-y += pci.h
+mandatory-y += percpu.h
+mandatory-y += pgalloc.h
+mandatory-y += preempt.h
+mandatory-y += rwonce.h
+mandatory-y += sections.h
+mandatory-y += serial.h
+mandatory-y += shmparam.h
+mandatory-y += simd.h
+mandatory-y += softirq_stack.h
+mandatory-y += switch_to.h
+mandatory-y += timex.h
+mandatory-y += tlbflush.h
+mandatory-y += topology.h
+mandatory-y += trace_clock.h
+mandatory-y += uaccess.h
+mandatory-y += unaligned.h
+mandatory-y += vermagic.h
+mandatory-y += vga.h
+mandatory-y += word-at-a-time.h
+mandatory-y += xor.h
diff --git a/include/asm-generic/access_ok.h b/include/asm-generic/access_ok.h
new file mode 100644
index 000000000..2866ae61b
--- /dev/null
+++ b/include/asm-generic/access_ok.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_ACCESS_OK_H__
+#define __ASM_GENERIC_ACCESS_OK_H__
+
+/*
+ * Checking whether a pointer is valid for user space access.
+ * These definitions work on most architectures, but overrides can
+ * be used where necessary.
+ */
+
+/*
+ * architectures with compat tasks have a variable TASK_SIZE and should
+ * override this to a constant.
+ */
+#ifndef TASK_SIZE_MAX
+#define TASK_SIZE_MAX TASK_SIZE
+#endif
+
+#ifndef __access_ok
+/*
+ * 'size' is a compile-time constant for most callers, so optimize for
+ * this case to turn the check into a single comparison against a constant
+ * limit and catch all possible overflows.
+ * On architectures with separate user address space (m68k, s390, parisc,
+ * sparc64) or those without an MMU, this should always return true.
+ *
+ * This version was originally contributed by Jonas Bonn for the
+ * OpenRISC architecture, and was found to be the most efficient
+ * for constant 'size' and 'limit' values.
+ */
+static inline int __access_ok(const void __user *ptr, unsigned long size)
+{
+ unsigned long limit = TASK_SIZE_MAX;
+ unsigned long addr = (unsigned long)ptr;
+
+ if (IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) ||
+ !IS_ENABLED(CONFIG_MMU))
+ return true;
+
+ return (size <= limit) && (addr <= (limit - size));
+}
+#endif
+
+#ifndef access_ok
+#define access_ok(addr, size) likely(__access_ok(addr, size))
+#endif
+
+#endif
diff --git a/include/asm-generic/archrandom.h b/include/asm-generic/archrandom.h
new file mode 100644
index 000000000..3cd7f980c
--- /dev/null
+++ b/include/asm-generic/archrandom.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_ARCHRANDOM_H__
+#define __ASM_GENERIC_ARCHRANDOM_H__
+
+static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
+{
+ return 0;
+}
+
+static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
+{
+ return 0;
+}
+
+#endif
diff --git a/include/asm-generic/asm-offsets.h b/include/asm-generic/asm-offsets.h
new file mode 100644
index 000000000..d370ee36a
--- /dev/null
+++ b/include/asm-generic/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/include/asm-generic/asm-prototypes.h b/include/asm-generic/asm-prototypes.h
new file mode 100644
index 000000000..2fa2bc208
--- /dev/null
+++ b/include/asm-generic/asm-prototypes.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/bitops.h>
+#undef __memset
+extern void *__memset(void *, int, __kernel_size_t);
+#undef __memcpy
+extern void *__memcpy(void *, const void *, __kernel_size_t);
+#undef __memmove
+extern void *__memmove(void *, const void *, __kernel_size_t);
+#undef memset
+extern void *memset(void *, int, __kernel_size_t);
+#undef memcpy
+extern void *memcpy(void *, const void *, __kernel_size_t);
+#undef memmove
+extern void *memmove(void *, const void *, __kernel_size_t);
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
new file mode 100644
index 000000000..04b8be9f1
--- /dev/null
+++ b/include/asm-generic/atomic.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Generic C implementation of atomic counter operations. Do not include in
+ * machine independent code.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+#ifndef __ASM_GENERIC_ATOMIC_H
+#define __ASM_GENERIC_ATOMIC_H
+
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+#ifdef CONFIG_SMP
+
+/* we can build all atomic primitives from cmpxchg */
+
+#define ATOMIC_OP(op, c_op) \
+static inline void generic_atomic_##op(int i, atomic_t *v) \
+{ \
+ int c, old; \
+ \
+ c = v->counter; \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
+ c = old; \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int c, old; \
+ \
+ c = v->counter; \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
+ c = old; \
+ \
+ return c c_op i; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op) \
+static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ int c, old; \
+ \
+ c = v->counter; \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
+ c = old; \
+ \
+ return c; \
+}
+
+#else
+
+#include <linux/irqflags.h>
+
+#define ATOMIC_OP(op, c_op) \
+static inline void generic_atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter = v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int ret; \
+ \
+ raw_local_irq_save(flags); \
+ ret = (v->counter = v->counter c_op i); \
+ raw_local_irq_restore(flags); \
+ \
+ return ret; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op) \
+static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int ret; \
+ \
+ raw_local_irq_save(flags); \
+ ret = v->counter; \
+ v->counter = v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ \
+ return ret; \
+}
+
+#endif /* CONFIG_SMP */
+
+ATOMIC_OP_RETURN(add, +)
+ATOMIC_OP_RETURN(sub, -)
+
+ATOMIC_FETCH_OP(add, +)
+ATOMIC_FETCH_OP(sub, -)
+ATOMIC_FETCH_OP(and, &)
+ATOMIC_FETCH_OP(or, |)
+ATOMIC_FETCH_OP(xor, ^)
+
+ATOMIC_OP(add, +)
+ATOMIC_OP(sub, -)
+ATOMIC_OP(and, &)
+ATOMIC_OP(or, |)
+ATOMIC_OP(xor, ^)
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#define arch_atomic_add_return generic_atomic_add_return
+#define arch_atomic_sub_return generic_atomic_sub_return
+
+#define arch_atomic_fetch_add generic_atomic_fetch_add
+#define arch_atomic_fetch_sub generic_atomic_fetch_sub
+#define arch_atomic_fetch_and generic_atomic_fetch_and
+#define arch_atomic_fetch_or generic_atomic_fetch_or
+#define arch_atomic_fetch_xor generic_atomic_fetch_xor
+
+#define arch_atomic_add generic_atomic_add
+#define arch_atomic_sub generic_atomic_sub
+#define arch_atomic_and generic_atomic_and
+#define arch_atomic_or generic_atomic_or
+#define arch_atomic_xor generic_atomic_xor
+
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+
+#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
+#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
+
+#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
new file mode 100644
index 000000000..100d24b02
--- /dev/null
+++ b/include/asm-generic/atomic64.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Generic implementation of 64-bit atomics using spinlocks,
+ * useful on processors that don't have 64-bit atomic instructions.
+ *
+ * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ */
+#ifndef _ASM_GENERIC_ATOMIC64_H
+#define _ASM_GENERIC_ATOMIC64_H
+#include <linux/types.h>
+
+typedef struct {
+ s64 counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(i) { (i) }
+
+extern s64 generic_atomic64_read(const atomic64_t *v);
+extern void generic_atomic64_set(atomic64_t *v, s64 i);
+
+#define ATOMIC64_OP(op) \
+extern void generic_atomic64_##op(s64 a, atomic64_t *v);
+
+#define ATOMIC64_OP_RETURN(op) \
+extern s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v);
+
+#define ATOMIC64_FETCH_OP(op) \
+extern s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v);
+
+#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
+
+ATOMIC64_OPS(add)
+ATOMIC64_OPS(sub)
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op)
+
+ATOMIC64_OPS(and)
+ATOMIC64_OPS(or)
+ATOMIC64_OPS(xor)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+extern s64 generic_atomic64_dec_if_positive(atomic64_t *v);
+extern s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
+extern s64 generic_atomic64_xchg(atomic64_t *v, s64 new);
+extern s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
+
+#define arch_atomic64_read generic_atomic64_read
+#define arch_atomic64_set generic_atomic64_set
+#define arch_atomic64_set_release generic_atomic64_set
+
+#define arch_atomic64_add generic_atomic64_add
+#define arch_atomic64_add_return generic_atomic64_add_return
+#define arch_atomic64_fetch_add generic_atomic64_fetch_add
+#define arch_atomic64_sub generic_atomic64_sub
+#define arch_atomic64_sub_return generic_atomic64_sub_return
+#define arch_atomic64_fetch_sub generic_atomic64_fetch_sub
+
+#define arch_atomic64_and generic_atomic64_and
+#define arch_atomic64_fetch_and generic_atomic64_fetch_and
+#define arch_atomic64_or generic_atomic64_or
+#define arch_atomic64_fetch_or generic_atomic64_fetch_or
+#define arch_atomic64_xor generic_atomic64_xor
+#define arch_atomic64_fetch_xor generic_atomic64_fetch_xor
+
+#define arch_atomic64_dec_if_positive generic_atomic64_dec_if_positive
+#define arch_atomic64_cmpxchg generic_atomic64_cmpxchg
+#define arch_atomic64_xchg generic_atomic64_xchg
+#define arch_atomic64_fetch_add_unless generic_atomic64_fetch_add_unless
+
+#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h
new file mode 100644
index 000000000..331670807
--- /dev/null
+++ b/include/asm-generic/audit_change_attr.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifdef __NR_chmod
+__NR_chmod,
+#endif
+__NR_fchmod,
+#ifdef __NR_chown
+__NR_chown,
+__NR_lchown,
+#endif
+#ifdef __NR_fchown
+__NR_fchown,
+#endif
+__NR_setxattr,
+__NR_lsetxattr,
+__NR_fsetxattr,
+__NR_removexattr,
+__NR_lremovexattr,
+__NR_fremovexattr,
+#ifdef __NR_fchownat
+__NR_fchownat,
+__NR_fchmodat,
+#endif
+#ifdef __NR_chown32
+__NR_chown32,
+__NR_fchown32,
+__NR_lchown32,
+#endif
+#ifdef __NR_link
+__NR_link,
+#endif
+#ifdef __NR_linkat
+__NR_linkat,
+#endif
diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h
new file mode 100644
index 000000000..dd5a9dd7a
--- /dev/null
+++ b/include/asm-generic/audit_dir_write.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifdef __NR_rename
+__NR_rename,
+#endif
+#ifdef __NR_mkdir
+__NR_mkdir,
+#endif
+#ifdef __NR_rmdir
+__NR_rmdir,
+#endif
+#ifdef __NR_creat
+__NR_creat,
+#endif
+#ifdef __NR_link
+__NR_link,
+#endif
+#ifdef __NR_unlink
+__NR_unlink,
+#endif
+#ifdef __NR_symlink
+__NR_symlink,
+#endif
+#ifdef __NR_mknod
+__NR_mknod,
+#endif
+#ifdef __NR_mkdirat
+__NR_mkdirat,
+__NR_mknodat,
+__NR_unlinkat,
+#ifdef __NR_renameat
+__NR_renameat,
+#endif
+__NR_linkat,
+__NR_symlinkat,
+#endif
+#ifdef __NR_renameat2
+__NR_renameat2,
+#endif
diff --git a/include/asm-generic/audit_read.h b/include/asm-generic/audit_read.h
new file mode 100644
index 000000000..7bb7b5a83
--- /dev/null
+++ b/include/asm-generic/audit_read.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifdef __NR_readlink
+__NR_readlink,
+#endif
+__NR_quotactl,
+__NR_listxattr,
+__NR_llistxattr,
+__NR_flistxattr,
+__NR_getxattr,
+__NR_lgetxattr,
+__NR_fgetxattr,
+#ifdef __NR_readlinkat
+__NR_readlinkat,
+#endif
diff --git a/include/asm-generic/audit_signal.h b/include/asm-generic/audit_signal.h
new file mode 100644
index 000000000..6feab7f18
--- /dev/null
+++ b/include/asm-generic/audit_signal.h
@@ -0,0 +1,3 @@
+__NR_kill,
+__NR_tgkill,
+__NR_tkill,
diff --git a/include/asm-generic/audit_write.h b/include/asm-generic/audit_write.h
new file mode 100644
index 000000000..f9f1d0ae1
--- /dev/null
+++ b/include/asm-generic/audit_write.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm-generic/audit_dir_write.h>
+__NR_acct,
+#ifdef __NR_swapon
+__NR_swapon,
+#endif
+__NR_quotactl,
+#ifdef __NR_truncate
+__NR_truncate,
+#endif
+#ifdef __NR_truncate64
+__NR_truncate64,
+#endif
+#ifdef __NR_ftruncate
+__NR_ftruncate,
+#endif
+#ifdef __NR_ftruncate64
+__NR_ftruncate64,
+#endif
+#ifdef __NR_bind
+__NR_bind, /* bind can affect fs object only in one way... */
+#endif
+#ifdef __NR_fallocate
+__NR_fallocate,
+#endif
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
new file mode 100644
index 000000000..961f4d88f
--- /dev/null
+++ b/include/asm-generic/barrier.h
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Generic barrier definitions.
+ *
+ * It should be possible to use these on really simple architectures,
+ * but it serves more as a starting point for new ports.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+#ifndef __ASM_GENERIC_BARRIER_H
+#define __ASM_GENERIC_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+#include <linux/kcsan-checks.h>
+#include <asm/rwonce.h>
+
+#ifndef nop
+#define nop() asm volatile ("nop")
+#endif
+
+/*
+ * Architectures that want generic instrumentation can define __ prefixed
+ * variants of all barriers.
+ */
+
+#ifdef __mb
+#define mb() do { kcsan_mb(); __mb(); } while (0)
+#endif
+
+#ifdef __rmb
+#define rmb() do { kcsan_rmb(); __rmb(); } while (0)
+#endif
+
+#ifdef __wmb
+#define wmb() do { kcsan_wmb(); __wmb(); } while (0)
+#endif
+
+#ifdef __dma_mb
+#define dma_mb() do { kcsan_mb(); __dma_mb(); } while (0)
+#endif
+
+#ifdef __dma_rmb
+#define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0)
+#endif
+
+#ifdef __dma_wmb
+#define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0)
+#endif
+
+/*
+ * Force strict CPU ordering. And yes, this is required on UP too when we're
+ * talking to devices.
+ *
+ * Fall back to compiler barriers if nothing better is provided.
+ */
+
+#ifndef mb
+#define mb() barrier()
+#endif
+
+#ifndef rmb
+#define rmb() mb()
+#endif
+
+#ifndef wmb
+#define wmb() mb()
+#endif
+
+#ifndef dma_mb
+#define dma_mb() mb()
+#endif
+
+#ifndef dma_rmb
+#define dma_rmb() rmb()
+#endif
+
+#ifndef dma_wmb
+#define dma_wmb() wmb()
+#endif
+
+#ifndef __smp_mb
+#define __smp_mb() mb()
+#endif
+
+#ifndef __smp_rmb
+#define __smp_rmb() rmb()
+#endif
+
+#ifndef __smp_wmb
+#define __smp_wmb() wmb()
+#endif
+
+#ifdef CONFIG_SMP
+
+#ifndef smp_mb
+#define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0)
+#endif
+
+#ifndef smp_rmb
+#define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
+#endif
+
+#ifndef smp_wmb
+#define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
+#endif
+
+#else /* !CONFIG_SMP */
+
+#ifndef smp_mb
+#define smp_mb() barrier()
+#endif
+
+#ifndef smp_rmb
+#define smp_rmb() barrier()
+#endif
+
+#ifndef smp_wmb
+#define smp_wmb() barrier()
+#endif
+
+#endif /* CONFIG_SMP */
+
+#ifndef __smp_store_mb
+#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
+#endif
+
+#ifndef __smp_mb__before_atomic
+#define __smp_mb__before_atomic() __smp_mb()
+#endif
+
+#ifndef __smp_mb__after_atomic
+#define __smp_mb__after_atomic() __smp_mb()
+#endif
+
+#ifndef __smp_store_release
+#define __smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ __smp_mb(); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+#endif
+
+#ifndef __smp_load_acquire
+#define __smp_load_acquire(p) \
+({ \
+ __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ __smp_mb(); \
+ (typeof(*p))___p1; \
+})
+#endif
+
+#ifdef CONFIG_SMP
+
+#ifndef smp_store_mb
+#define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
+#endif
+
+#ifndef smp_mb__before_atomic
+#define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
+#endif
+
+#ifndef smp_mb__after_atomic
+#define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
+#endif
+
+#ifndef smp_store_release
+#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
+#endif
+
+#ifndef smp_load_acquire
+#define smp_load_acquire(p) __smp_load_acquire(p)
+#endif
+
+#else /* !CONFIG_SMP */
+
+#ifndef smp_store_mb
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
+#endif
+
+#ifndef smp_mb__before_atomic
+#define smp_mb__before_atomic() barrier()
+#endif
+
+#ifndef smp_mb__after_atomic
+#define smp_mb__after_atomic() barrier()
+#endif
+
+#ifndef smp_store_release
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+#endif
+
+#ifndef smp_load_acquire
+#define smp_load_acquire(p) \
+({ \
+ __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ (typeof(*p))___p1; \
+})
+#endif
+
+#endif /* CONFIG_SMP */
+
+/* Barriers for virtual machine guests when talking to an SMP host */
+#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0)
+#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
+#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
+#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
+#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
+#define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
+#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
+#define virt_load_acquire(p) __smp_load_acquire(p)
+
+/**
+ * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
+ *
+ * A control dependency provides a LOAD->STORE order, the additional RMB
+ * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
+ * aka. (load)-ACQUIRE.
+ *
+ * Architectures that do not do load speculation can have this be barrier().
+ */
+#ifndef smp_acquire__after_ctrl_dep
+#define smp_acquire__after_ctrl_dep() smp_rmb()
+#endif
+
+/**
+ * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
+ * @ptr: pointer to the variable to wait on
+ * @cond: boolean expression to wait for
+ *
+ * Equivalent to using READ_ONCE() on the condition variable.
+ *
+ * Due to C lacking lambda expressions we load the value of *ptr into a
+ * pre-named variable @VAL to be used in @cond.
+ */
+#ifndef smp_cond_load_relaxed
+#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
+ typeof(ptr) __PTR = (ptr); \
+ __unqual_scalar_typeof(*ptr) VAL; \
+ for (;;) { \
+ VAL = READ_ONCE(*__PTR); \
+ if (cond_expr) \
+ break; \
+ cpu_relax(); \
+ } \
+ (typeof(*ptr))VAL; \
+})
+#endif
+
+/**
+ * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
+ * @ptr: pointer to the variable to wait on
+ * @cond: boolean expression to wait for
+ *
+ * Equivalent to using smp_load_acquire() on the condition variable but employs
+ * the control dependency of the wait to reduce the barrier on many platforms.
+ */
+#ifndef smp_cond_load_acquire
+#define smp_cond_load_acquire(ptr, cond_expr) ({ \
+ __unqual_scalar_typeof(*ptr) _val; \
+ _val = smp_cond_load_relaxed(ptr, cond_expr); \
+ smp_acquire__after_ctrl_dep(); \
+ (typeof(*ptr))_val; \
+})
+#endif
+
+/*
+ * pmem_wmb() ensures that all stores for which the modification
+ * are written to persistent storage by preceding instructions have
+ * updated persistent storage before any data access or data transfer
+ * caused by subsequent instructions is initiated.
+ */
+#ifndef pmem_wmb
+#define pmem_wmb() wmb()
+#endif
+
+/*
+ * ioremap_wc() maps I/O memory as memory with write-combining attributes. For
+ * this kind of memory accesses, the CPU may wait for prior accesses to be
+ * merged with subsequent ones. In some situation, such wait is bad for the
+ * performance. io_stop_wc() can be used to prevent the merging of
+ * write-combining memory accesses before this macro with those after it.
+ */
+#ifndef io_stop_wc
+#define io_stop_wc() do { } while (0)
+#endif
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
new file mode 100644
index 000000000..a47b8a71d
--- /dev/null
+++ b/include/asm-generic/bitops.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_BITOPS_H
+#define __ASM_GENERIC_BITOPS_H
+
+/*
+ * For the benefit of those who are trying to port Linux to another
+ * architecture, here are some C-language equivalents. They should
+ * generate reasonable code, so take a look at what your compiler spits
+ * out before rolling your own buggy implementation in assembly language.
+ *
+ * C language equivalents written by Theodore Ts'o, 9/26/92
+ */
+
+#include <linux/irqflags.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* __ASM_GENERIC_BITOPS_H */
diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h
new file mode 100644
index 000000000..39e56e1c7
--- /dev/null
+++ b/include/asm-generic/bitops/__ffs.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS___FFS_H_
+#define _ASM_GENERIC_BITOPS___FFS_H_
+
+#include <asm/types.h>
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+ int num = 0;
+
+#if BITS_PER_LONG == 64
+ if ((word & 0xffffffff) == 0) {
+ num += 32;
+ word >>= 32;
+ }
+#endif
+ if ((word & 0xffff) == 0) {
+ num += 16;
+ word >>= 16;
+ }
+ if ((word & 0xff) == 0) {
+ num += 8;
+ word >>= 8;
+ }
+ if ((word & 0xf) == 0) {
+ num += 4;
+ word >>= 4;
+ }
+ if ((word & 0x3) == 0) {
+ num += 2;
+ word >>= 2;
+ }
+ if ((word & 0x1) == 0)
+ num += 1;
+ return num;
+}
+
+#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */
diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
new file mode 100644
index 000000000..03f721a8a
--- /dev/null
+++ b/include/asm-generic/bitops/__fls.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS___FLS_H_
+#define _ASM_GENERIC_BITOPS___FLS_H_
+
+#include <asm/types.h>
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __fls(unsigned long word)
+{
+ int num = BITS_PER_LONG - 1;
+
+#if BITS_PER_LONG == 64
+ if (!(word & (~0ul << 32))) {
+ num -= 32;
+ word <<= 32;
+ }
+#endif
+ if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
+ num -= 16;
+ word <<= 16;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
+ num -= 8;
+ word <<= 8;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
+ num -= 4;
+ word <<= 4;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
+ num -= 2;
+ word <<= 2;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-1))))
+ num -= 1;
+ return num;
+}
+
+#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */
diff --git a/include/asm-generic/bitops/arch_hweight.h b/include/asm-generic/bitops/arch_hweight.h
new file mode 100644
index 000000000..c2705e1d2
--- /dev/null
+++ b/include/asm-generic/bitops/arch_hweight.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_
+#define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_
+
+#include <asm/types.h>
+
+static inline unsigned int __arch_hweight32(unsigned int w)
+{
+ return __sw_hweight32(w);
+}
+
+static inline unsigned int __arch_hweight16(unsigned int w)
+{
+ return __sw_hweight16(w);
+}
+
+static inline unsigned int __arch_hweight8(unsigned int w)
+{
+ return __sw_hweight8(w);
+}
+
+static inline unsigned long __arch_hweight64(__u64 w)
+{
+ return __sw_hweight64(w);
+}
+#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
new file mode 100644
index 000000000..71ab4ba9c
--- /dev/null
+++ b/include/asm-generic/bitops/atomic.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
+#define _ASM_GENERIC_BITOPS_ATOMIC_H_
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
+/*
+ * Implementation of atomic bitops using atomic-fetch ops.
+ * See Documentation/atomic_bitops.txt for details.
+ */
+
+static __always_inline void
+arch_set_bit(unsigned int nr, volatile unsigned long *p)
+{
+ p += BIT_WORD(nr);
+ arch_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
+}
+
+static __always_inline void
+arch_clear_bit(unsigned int nr, volatile unsigned long *p)
+{
+ p += BIT_WORD(nr);
+ arch_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
+}
+
+static __always_inline void
+arch_change_bit(unsigned int nr, volatile unsigned long *p)
+{
+ p += BIT_WORD(nr);
+ arch_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
+}
+
+static __always_inline int
+arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p);
+ return !!(old & mask);
+}
+
+static __always_inline int
+arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+ return !!(old & mask);
+}
+
+static __always_inline int
+arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ old = arch_atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+ return !!(old & mask);
+}
+
+#include <asm-generic/bitops/instrumented-atomic.h>
+
+#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
diff --git a/include/asm-generic/bitops/builtin-__ffs.h b/include/asm-generic/bitops/builtin-__ffs.h
new file mode 100644
index 000000000..87024da44
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-__ffs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_
+#define _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+ return __builtin_ctzl(word);
+}
+
+#endif
diff --git a/include/asm-generic/bitops/builtin-__fls.h b/include/asm-generic/bitops/builtin-__fls.h
new file mode 100644
index 000000000..43a5aa9af
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-__fls.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_
+#define _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __fls(unsigned long word)
+{
+ return (sizeof(word) * 8) - 1 - __builtin_clzl(word);
+}
+
+#endif
diff --git a/include/asm-generic/bitops/builtin-ffs.h b/include/asm-generic/bitops/builtin-ffs.h
new file mode 100644
index 000000000..7b1293290
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-ffs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_
+#define _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_
+
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from ffz (man ffs).
+ */
+#define ffs(x) __builtin_ffs(x)
+
+#endif
diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h
new file mode 100644
index 000000000..c8455cc28
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-fls.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_
+#define _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_
+
+/**
+ * fls - find last (most-significant) bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+static __always_inline int fls(unsigned int x)
+{
+ return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
+}
+
+#endif
diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h
new file mode 100644
index 000000000..149faeeee
--- /dev/null
+++ b/include/asm-generic/bitops/const_hweight.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
+#define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
+
+/*
+ * Compile time versions of __arch_hweightN()
+ */
+#define __const_hweight8(w) \
+ ((unsigned int) \
+ ((!!((w) & (1ULL << 0))) + \
+ (!!((w) & (1ULL << 1))) + \
+ (!!((w) & (1ULL << 2))) + \
+ (!!((w) & (1ULL << 3))) + \
+ (!!((w) & (1ULL << 4))) + \
+ (!!((w) & (1ULL << 5))) + \
+ (!!((w) & (1ULL << 6))) + \
+ (!!((w) & (1ULL << 7)))))
+
+#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 ))
+#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16))
+#define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32))
+
+/*
+ * Generic interface.
+ */
+#define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w))
+#define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w))
+#define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w))
+#define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w))
+
+/*
+ * Interface for known constant arguments
+ */
+#define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w))
+#define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w))
+#define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w))
+#define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w))
+
+/*
+ * Type invariant interface to the compile time constant hweight functions.
+ */
+#define HWEIGHT(w) HWEIGHT64((u64)w)
+
+#endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */
diff --git a/include/asm-generic/bitops/ext2-atomic-setbit.h b/include/asm-generic/bitops/ext2-atomic-setbit.h
new file mode 100644
index 000000000..b041cbf0d
--- /dev/null
+++ b/include/asm-generic/bitops/ext2-atomic-setbit.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_
+#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_
+
+/*
+ * Atomic bitops based version of ext2 atomic bitops
+ */
+
+#define ext2_set_bit_atomic(l, nr, addr) test_and_set_bit_le(nr, addr)
+#define ext2_clear_bit_atomic(l, nr, addr) test_and_clear_bit_le(nr, addr)
+
+#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ */
diff --git a/include/asm-generic/bitops/ext2-atomic.h b/include/asm-generic/bitops/ext2-atomic.h
new file mode 100644
index 000000000..0cfc3180b
--- /dev/null
+++ b/include/asm-generic/bitops/ext2-atomic.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
+#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
+
+/*
+ * Spinlock based version of ext2 atomic bitops
+ */
+
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ ({ \
+ int ret; \
+ spin_lock(lock); \
+ ret = __test_and_set_bit_le(nr, addr); \
+ spin_unlock(lock); \
+ ret; \
+ })
+
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ ({ \
+ int ret; \
+ spin_lock(lock); \
+ ret = __test_and_clear_bit_le(nr, addr); \
+ spin_unlock(lock); \
+ ret; \
+ })
+
+#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h
new file mode 100644
index 000000000..323fd5d6a
--- /dev/null
+++ b/include/asm-generic/bitops/ffs.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_FFS_H_
+#define _ASM_GENERIC_BITOPS_FFS_H_
+
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from ffz (man ffs).
+ */
+static inline int ffs(int x)
+{
+ int r = 1;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff)) {
+ x >>= 16;
+ r += 16;
+ }
+ if (!(x & 0xff)) {
+ x >>= 8;
+ r += 8;
+ }
+ if (!(x & 0xf)) {
+ x >>= 4;
+ r += 4;
+ }
+ if (!(x & 3)) {
+ x >>= 2;
+ r += 2;
+ }
+ if (!(x & 1)) {
+ x >>= 1;
+ r += 1;
+ }
+ return r;
+}
+
+#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */
diff --git a/include/asm-generic/bitops/ffz.h b/include/asm-generic/bitops/ffz.h
new file mode 100644
index 000000000..0d010085f
--- /dev/null
+++ b/include/asm-generic/bitops/ffz.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
+#define _ASM_GENERIC_BITOPS_FFZ_H_
+
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+#define ffz(x) __ffs(~(x))
+
+#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */
diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
new file mode 100644
index 000000000..b168bb10e
--- /dev/null
+++ b/include/asm-generic/bitops/fls.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_FLS_H_
+#define _ASM_GENERIC_BITOPS_FLS_H_
+
+/**
+ * fls - find last (most-significant) bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+
+static __always_inline int fls(unsigned int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
+#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
new file mode 100644
index 000000000..866f2b230
--- /dev/null
+++ b/include/asm-generic/bitops/fls64.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
+#define _ASM_GENERIC_BITOPS_FLS64_H_
+
+#include <asm/types.h>
+
+/**
+ * fls64 - find last set bit in a 64-bit word
+ * @x: the word to search
+ *
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffsll, but returns the position of the most significant set bit.
+ *
+ * fls64(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 64.
+ */
+#if BITS_PER_LONG == 32
+static __always_inline int fls64(__u64 x)
+{
+ __u32 h = x >> 32;
+ if (h)
+ return fls(h) + 32;
+ return fls(x);
+}
+#elif BITS_PER_LONG == 64
+static __always_inline int fls64(__u64 x)
+{
+ if (x == 0)
+ return 0;
+ return __fls(x) + 1;
+}
+#else
+#error BITS_PER_LONG not 32 or 64
+#endif
+
+#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
diff --git a/include/asm-generic/bitops/generic-non-atomic.h b/include/asm-generic/bitops/generic-non-atomic.h
new file mode 100644
index 000000000..564a8c675
--- /dev/null
+++ b/include/asm-generic/bitops/generic-non-atomic.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
+#define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
+
+#include <linux/bits.h>
+#include <asm/barrier.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+/*
+ * Generic definitions for bit operations, should not be used in regular code
+ * directly.
+ */
+
+/**
+ * generic___set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __always_inline void
+generic___set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p |= mask;
+}
+
+static __always_inline void
+generic___clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p &= ~mask;
+}
+
+/**
+ * generic___change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __always_inline void
+generic___change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p ^= mask;
+}
+
+/**
+ * generic___test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __always_inline bool
+generic___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * generic___test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __always_inline bool
+generic___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+/* WARNING: non atomic and it can be reordered! */
+static __always_inline bool
+generic___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * generic_test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+generic_test_bit(unsigned long nr, const volatile unsigned long *addr)
+{
+ /*
+ * Unlike the bitops with the '__' prefix above, this one *is* atomic,
+ * so `volatile` must always stay here with no cast-aways. See
+ * `Documentation/atomic_bitops.txt` for the details.
+ */
+ return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+}
+
+/**
+ * generic_test_bit_acquire - Determine, with acquire semantics, whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+generic_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1)));
+}
+
+/*
+ * const_*() definitions provide good compile-time optimizations when
+ * the passed arguments can be resolved at compile time.
+ */
+#define const___set_bit generic___set_bit
+#define const___clear_bit generic___clear_bit
+#define const___change_bit generic___change_bit
+#define const___test_and_set_bit generic___test_and_set_bit
+#define const___test_and_clear_bit generic___test_and_clear_bit
+#define const___test_and_change_bit generic___test_and_change_bit
+#define const_test_bit_acquire generic_test_bit_acquire
+
+/**
+ * const_test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ *
+ * A version of generic_test_bit() which discards the `volatile` qualifier to
+ * allow a compiler to optimize code harder. Non-atomic and to be called only
+ * for testing compile-time constants, e.g. by the corresponding macros, not
+ * directly from "regular" code.
+ */
+static __always_inline bool
+const_test_bit(unsigned long nr, const volatile unsigned long *addr)
+{
+ const unsigned long *p = (const unsigned long *)addr + BIT_WORD(nr);
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long val = *p;
+
+ return !!(val & mask);
+}
+
+#endif /* __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h
new file mode 100644
index 000000000..6bf1bba83
--- /dev/null
+++ b/include/asm-generic/bitops/hweight.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
+#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
+
+#include <asm-generic/bitops/arch_hweight.h>
+#include <asm-generic/bitops/const_hweight.h>
+
+#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h
new file mode 100644
index 000000000..4225a8ca9
--- /dev/null
+++ b/include/asm-generic/bitops/instrumented-atomic.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file provides wrappers with sanitizer instrumentation for atomic bit
+ * operations.
+ *
+ * To use this functionality, an arch's bitops.h file needs to define each of
+ * the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
+ * arch___set_bit(), etc.).
+ */
+#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
+#define _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
+
+#include <linux/instrumented.h>
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This is a relaxed atomic operation (no implied memory barriers).
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static __always_inline void set_bit(long nr, volatile unsigned long *addr)
+{
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ arch_set_bit(nr, addr);
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * This is a relaxed atomic operation (no implied memory barriers).
+ */
+static __always_inline void clear_bit(long nr, volatile unsigned long *addr)
+{
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ arch_clear_bit(nr, addr);
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * This is a relaxed atomic operation (no implied memory barriers).
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static __always_inline void change_bit(long nr, volatile unsigned long *addr)
+{
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ arch_change_bit(nr, addr);
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This is an atomic fully-ordered operation (implied full memory barrier).
+ */
+static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_and_set_bit(nr, addr);
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This is an atomic fully-ordered operation (implied full memory barrier).
+ */
+static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_and_clear_bit(nr, addr);
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This is an atomic fully-ordered operation (implied full memory barrier).
+ */
+static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_and_change_bit(nr, addr);
+}
+
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h
new file mode 100644
index 000000000..eb64bd4f1
--- /dev/null
+++ b/include/asm-generic/bitops/instrumented-lock.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file provides wrappers with sanitizer instrumentation for bit
+ * locking operations.
+ *
+ * To use this functionality, an arch's bitops.h file needs to define each of
+ * the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
+ * arch___set_bit(), etc.).
+ */
+#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
+#define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
+
+#include <linux/instrumented.h>
+
+/**
+ * clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ */
+static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
+{
+ kcsan_release();
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ arch_clear_bit_unlock(nr, addr);
+}
+
+/**
+ * __clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * This is a non-atomic operation but implies a release barrier before the
+ * memory operation. It can be used for an unlock if no other CPUs can
+ * concurrently modify other bits in the word.
+ */
+static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
+{
+ kcsan_release();
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ arch___clear_bit_unlock(nr, addr);
+}
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and provides acquire barrier semantics if
+ * the returned value is 0.
+ * It can be used to implement bit locks.
+ */
+static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
+{
+ instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_and_set_bit_lock(nr, addr);
+}
+
+#if defined(arch_clear_bit_unlock_is_negative_byte)
+/**
+ * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ * byte is negative, for unlock.
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ *
+ * This is a bit of a one-trick-pony for the filemap code, which clears
+ * PG_locked and tests PG_waiters,
+ */
+static inline bool
+clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
+{
+ kcsan_release();
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch_clear_bit_unlock_is_negative_byte(nr, addr);
+}
+/* Let everybody know we have it. */
+#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+#endif
+
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */
diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h
new file mode 100644
index 000000000..2b238b161
--- /dev/null
+++ b/include/asm-generic/bitops/instrumented-non-atomic.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file provides wrappers with sanitizer instrumentation for non-atomic
+ * bit operations.
+ *
+ * To use this functionality, an arch's bitops.h file needs to define each of
+ * the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
+ * arch___set_bit(), etc.).
+ */
+#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
+#define _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
+
+#include <linux/instrumented.h>
+
+/**
+ * ___set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic. If it is called on the same
+ * region of memory concurrently, the effect may be that only one operation
+ * succeeds.
+ */
+static __always_inline void
+___set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ arch___set_bit(nr, addr);
+}
+
+/**
+ * ___clear_bit - Clears a bit in memory
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * Unlike clear_bit(), this function is non-atomic. If it is called on the same
+ * region of memory concurrently, the effect may be that only one operation
+ * succeeds.
+ */
+static __always_inline void
+___clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ arch___clear_bit(nr, addr);
+}
+
+/**
+ * ___change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic. If it is called on the same
+ * region of memory concurrently, the effect may be that only one operation
+ * succeeds.
+ */
+static __always_inline void
+___change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ arch___change_bit(nr, addr);
+}
+
+static __always_inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr)
+{
+ if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) {
+ /*
+ * We treat non-atomic read-write bitops a little more special.
+ * Given the operations here only modify a single bit, assuming
+ * non-atomicity of the writer is sufficient may be reasonable
+ * for certain usage (and follows the permissible nature of the
+ * assume-plain-writes-atomic rule):
+ * 1. report read-modify-write races -> check read;
+ * 2. do not report races with marked readers, but do report
+ * races with unmarked readers -> check "atomic" write.
+ */
+ kcsan_check_read(addr + BIT_WORD(nr), sizeof(long));
+ /*
+ * Use generic write instrumentation, in case other sanitizers
+ * or tools are enabled alongside KCSAN.
+ */
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
+ } else {
+ instrument_read_write(addr + BIT_WORD(nr), sizeof(long));
+ }
+}
+
+/**
+ * ___test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic. If two instances of this operation race, one
+ * can appear to succeed but actually fail.
+ */
+static __always_inline bool
+___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ __instrument_read_write_bitop(nr, addr);
+ return arch___test_and_set_bit(nr, addr);
+}
+
+/**
+ * ___test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic. If two instances of this operation race, one
+ * can appear to succeed but actually fail.
+ */
+static __always_inline bool
+___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ __instrument_read_write_bitop(nr, addr);
+ return arch___test_and_clear_bit(nr, addr);
+}
+
+/**
+ * ___test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic. If two instances of this operation race, one
+ * can appear to succeed but actually fail.
+ */
+static __always_inline bool
+___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ __instrument_read_write_bitop(nr, addr);
+ return arch___test_and_change_bit(nr, addr);
+}
+
+/**
+ * _test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+_test_bit(unsigned long nr, const volatile unsigned long *addr)
+{
+ instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_bit(nr, addr);
+}
+
+/**
+ * _test_bit_acquire - Determine, with acquire semantics, whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+ instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_bit_acquire(nr, addr);
+}
+
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
new file mode 100644
index 000000000..d51beff60
--- /dev/null
+++ b/include/asm-generic/bitops/le.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_LE_H_
+#define _ASM_GENERIC_BITOPS_LE_H_
+
+#include <asm/types.h>
+#include <asm/byteorder.h>
+
+#if defined(__LITTLE_ENDIAN)
+
+#define BITOP_LE_SWIZZLE 0
+
+#elif defined(__BIG_ENDIAN)
+
+#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
+
+#endif
+
+
+static inline int test_bit_le(int nr, const void *addr)
+{
+ return test_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline void set_bit_le(int nr, void *addr)
+{
+ set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline void clear_bit_le(int nr, void *addr)
+{
+ clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline void __set_bit_le(int nr, void *addr)
+{
+ __set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline void __clear_bit_le(int nr, void *addr)
+{
+ __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline int test_and_set_bit_le(int nr, void *addr)
+{
+ return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline int test_and_clear_bit_le(int nr, void *addr)
+{
+ return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline int __test_and_set_bit_le(int nr, void *addr)
+{
+ return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline int __test_and_clear_bit_le(int nr, void *addr)
+{
+ return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+#endif /* _ASM_GENERIC_BITOPS_LE_H_ */
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
new file mode 100644
index 000000000..630f2f6b9
--- /dev/null
+++ b/include/asm-generic/bitops/lock.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
+#define _ASM_GENERIC_BITOPS_LOCK_H_
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
+/**
+ * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and provides acquire barrier semantics if
+ * the returned value is 0.
+ * It can be used to implement bit locks.
+ */
+static __always_inline int
+arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ if (READ_ONCE(*p) & mask)
+ return 1;
+
+ old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+ return !!(old & mask);
+}
+
+
+/**
+ * arch_clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ */
+static __always_inline void
+arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+{
+ p += BIT_WORD(nr);
+ arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+}
+
+/**
+ * arch___clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
+ * the bits in the word are protected by this lock some archs can use weaker
+ * ops to safely unlock.
+ *
+ * See for example x86's implementation.
+ */
+static inline void
+arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+{
+ unsigned long old;
+
+ p += BIT_WORD(nr);
+ old = READ_ONCE(*p);
+ old &= ~BIT_MASK(nr);
+ arch_atomic_long_set_release((atomic_long_t *)p, old);
+}
+
+/**
+ * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ * byte is negative, for unlock.
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * This is a bit of a one-trick-pony for the filemap code, which clears
+ * PG_locked and tests PG_waiters,
+ */
+#ifndef arch_clear_bit_unlock_is_negative_byte
+static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr,
+ volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
+ return !!(old & BIT(7));
+}
+#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte
+#endif
+
+#include <asm-generic/bitops/instrumented-lock.h>
+
+#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h
new file mode 100644
index 000000000..71f8d54a5
--- /dev/null
+++ b/include/asm-generic/bitops/non-atomic.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
+#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
+
+#include <asm-generic/bitops/generic-non-atomic.h>
+
+#define arch___set_bit generic___set_bit
+#define arch___clear_bit generic___clear_bit
+#define arch___change_bit generic___change_bit
+
+#define arch___test_and_set_bit generic___test_and_set_bit
+#define arch___test_and_clear_bit generic___test_and_clear_bit
+#define arch___test_and_change_bit generic___test_and_change_bit
+
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
+
+#include <asm-generic/bitops/non-instrumented-non-atomic.h>
+
+#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/non-instrumented-non-atomic.h b/include/asm-generic/bitops/non-instrumented-non-atomic.h
new file mode 100644
index 000000000..0ddc78dfc
--- /dev/null
+++ b/include/asm-generic/bitops/non-instrumented-non-atomic.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
+#define __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
+
+#define ___set_bit arch___set_bit
+#define ___clear_bit arch___clear_bit
+#define ___change_bit arch___change_bit
+
+#define ___test_and_set_bit arch___test_and_set_bit
+#define ___test_and_clear_bit arch___test_and_clear_bit
+#define ___test_and_change_bit arch___test_and_change_bit
+
+#define _test_bit arch_test_bit
+#define _test_bit_acquire arch_test_bit_acquire
+
+#endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h
new file mode 100644
index 000000000..86470cfce
--- /dev/null
+++ b/include/asm-generic/bitops/sched.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_SCHED_H_
+#define _ASM_GENERIC_BITOPS_SCHED_H_
+
+#include <linux/compiler.h> /* unlikely() */
+#include <asm/types.h>
+
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 100-bit bitmap. It's guaranteed that at least
+ * one of the 100 bits is cleared.
+ */
+static inline int sched_find_first_bit(const unsigned long *b)
+{
+#if BITS_PER_LONG == 64
+ if (b[0])
+ return __ffs(b[0]);
+ return __ffs(b[1]) + 64;
+#elif BITS_PER_LONG == 32
+ if (b[0])
+ return __ffs(b[0]);
+ if (b[1])
+ return __ffs(b[1]) + 32;
+ if (b[2])
+ return __ffs(b[2]) + 64;
+ return __ffs(b[3]) + 96;
+#else
+#error BITS_PER_LONG not defined
+#endif
+}
+
+#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */
diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h
new file mode 100644
index 000000000..1023e2a4b
--- /dev/null
+++ b/include/asm-generic/bitsperlong.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_BITS_PER_LONG
+#define __ASM_GENERIC_BITS_PER_LONG
+
+#include <uapi/asm-generic/bitsperlong.h>
+
+
+#ifdef CONFIG_64BIT
+#define BITS_PER_LONG 64
+#else
+#define BITS_PER_LONG 32
+#endif /* CONFIG_64BIT */
+
+/*
+ * FIXME: The check currently breaks x86-64 build, so it's
+ * temporarily disabled. Please fix x86-64 and reenable
+ */
+#if 0 && BITS_PER_LONG != __BITS_PER_LONG
+#error Inconsistent word size. Check asm/bitsperlong.h
+#endif
+
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG 64
+#endif
+
+/*
+ * small_const_nbits(n) is true precisely when it is known at compile-time
+ * that BITMAP_SIZE(n) is 1, i.e. 1 <= n <= BITS_PER_LONG. This allows
+ * various bit/bitmap APIs to provide a fast inline implementation. Bitmaps
+ * of size 0 are very rare, and a compile-time-known-size 0 is most likely
+ * a sign of error. They will be handled correctly by the bit/bitmap APIs,
+ * but using the out-of-line functions, so that the inline implementations
+ * can unconditionally dereference the pointer(s).
+ */
+#define small_const_nbits(nbits) \
+ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
+
+#endif /* __ASM_GENERIC_BITS_PER_LONG */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
new file mode 100644
index 000000000..4050b191e
--- /dev/null
+++ b/include/asm-generic/bug.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BUG_H
+#define _ASM_GENERIC_BUG_H
+
+#include <linux/compiler.h>
+#include <linux/instrumentation.h>
+#include <linux/once_lite.h>
+
+#define CUT_HERE "------------[ cut here ]------------\n"
+
+#ifdef CONFIG_GENERIC_BUG
+#define BUGFLAG_WARNING (1 << 0)
+#define BUGFLAG_ONCE (1 << 1)
+#define BUGFLAG_DONE (1 << 2)
+#define BUGFLAG_NO_CUT_HERE (1 << 3) /* CUT_HERE already sent */
+#define BUGFLAG_TAINT(taint) ((taint) << 8)
+#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
+#endif
+
+#ifndef __ASSEMBLY__
+#include <linux/panic.h>
+#include <linux/printk.h>
+
+struct warn_args;
+struct pt_regs;
+
+void __warn(const char *file, int line, void *caller, unsigned taint,
+ struct pt_regs *regs, struct warn_args *args);
+
+#ifdef CONFIG_BUG
+
+#ifdef CONFIG_GENERIC_BUG
+struct bug_entry {
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ unsigned long bug_addr;
+#else
+ signed int bug_addr_disp;
+#endif
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ const char *file;
+#else
+ signed int file_disp;
+#endif
+ unsigned short line;
+#endif
+ unsigned short flags;
+};
+#endif /* CONFIG_GENERIC_BUG */
+
+/*
+ * Don't use BUG() or BUG_ON() unless there's really no way out; one
+ * example might be detecting data structure corruption in the middle
+ * of an operation that can't be backed out of. If the (sub)system
+ * can somehow continue operating, perhaps with reduced functionality,
+ * it's probably not BUG-worthy.
+ *
+ * If you're tempted to BUG(), think again: is completely giving up
+ * really the *only* solution? There are usually better options, where
+ * users don't need to reboot ASAP and can mostly shut down cleanly.
+ */
+#ifndef HAVE_ARCH_BUG
+#define BUG() do { \
+ printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
+ barrier_before_unreachable(); \
+ panic("BUG!"); \
+} while (0)
+#endif
+
+#ifndef HAVE_ARCH_BUG_ON
+#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
+#endif
+
+/*
+ * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
+ * significant kernel issues that need prompt attention if they should ever
+ * appear at runtime.
+ *
+ * Do not use these macros when checking for invalid external inputs
+ * (e.g. invalid system call arguments, or invalid data coming from
+ * network/devices), and on transient conditions like ENOMEM or EAGAIN.
+ * These macros should be used for recoverable kernel issues only.
+ * For invalid external inputs, transient conditions, etc use
+ * pr_err[_once/_ratelimited]() followed by dump_stack(), if necessary.
+ * Do not include "BUG"/"WARNING" in format strings manually to make these
+ * conditions distinguishable from kernel issues.
+ *
+ * Use the versions with printk format strings to provide better diagnostics.
+ */
+#ifndef __WARN_FLAGS
+extern __printf(4, 5)
+void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
+ const char *fmt, ...);
+#define __WARN() __WARN_printf(TAINT_WARN, NULL)
+#define __WARN_printf(taint, arg...) do { \
+ instrumentation_begin(); \
+ warn_slowpath_fmt(__FILE__, __LINE__, taint, arg); \
+ instrumentation_end(); \
+ } while (0)
+#else
+extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
+#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN))
+#define __WARN_printf(taint, arg...) do { \
+ instrumentation_begin(); \
+ __warn_printk(arg); \
+ __WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
+ instrumentation_end(); \
+ } while (0)
+#define WARN_ON_ONCE(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_FLAGS(BUGFLAG_ONCE | \
+ BUGFLAG_TAINT(TAINT_WARN)); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+/* used internally by panic.c */
+
+#ifndef WARN_ON
+#define WARN_ON(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN(); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#ifndef WARN
+#define WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_printf(TAINT_WARN, format); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#define WARN_TAINT(condition, taint, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_printf(taint, format); \
+ unlikely(__ret_warn_on); \
+})
+
+#ifndef WARN_ON_ONCE
+#define WARN_ON_ONCE(condition) \
+ DO_ONCE_LITE_IF(condition, WARN_ON, 1)
+#endif
+
+#define WARN_ONCE(condition, format...) \
+ DO_ONCE_LITE_IF(condition, WARN, 1, format)
+
+#define WARN_TAINT_ONCE(condition, taint, format...) \
+ DO_ONCE_LITE_IF(condition, WARN_TAINT, 1, taint, format)
+
+#else /* !CONFIG_BUG */
+#ifndef HAVE_ARCH_BUG
+#define BUG() do {} while (1)
+#endif
+
+#ifndef HAVE_ARCH_BUG_ON
+#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
+#endif
+
+#ifndef HAVE_ARCH_WARN_ON
+#define WARN_ON(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#ifndef WARN
+#define WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ no_printk(format); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#define WARN_ON_ONCE(condition) WARN_ON(condition)
+#define WARN_ONCE(condition, format...) WARN(condition, format)
+#define WARN_TAINT(condition, taint, format...) WARN(condition, format)
+#define WARN_TAINT_ONCE(condition, taint, format...) WARN(condition, format)
+
+#endif
+
+/*
+ * WARN_ON_SMP() is for cases that the warning is either
+ * meaningless for !SMP or may even cause failures.
+ * It can also be used with values that are only defined
+ * on SMP:
+ *
+ * struct foo {
+ * [...]
+ * #ifdef CONFIG_SMP
+ * int bar;
+ * #endif
+ * };
+ *
+ * void func(struct foo *zoot)
+ * {
+ * WARN_ON_SMP(!zoot->bar);
+ *
+ * For CONFIG_SMP, WARN_ON_SMP() should act the same as WARN_ON(),
+ * and should be a nop and return false for uniprocessor.
+ *
+ * if (WARN_ON_SMP(x)) returns true only when CONFIG_SMP is set
+ * and x is true.
+ */
+#ifdef CONFIG_SMP
+# define WARN_ON_SMP(x) WARN_ON(x)
+#else
+/*
+ * Use of ({0;}) because WARN_ON_SMP(x) may be used either as
+ * a stand alone line statement or as a condition in an if ()
+ * statement.
+ * A simple "0" would cause gcc to give a "statement has no effect"
+ * warning.
+ */
+# define WARN_ON_SMP(x) ({0;})
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
new file mode 100644
index 000000000..60386e164
--- /dev/null
+++ b/include/asm-generic/cache.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_CACHE_H
+#define __ASM_GENERIC_CACHE_H
+/*
+ * 32 bytes appears to be the most common cache line size,
+ * so make that the default here. Architectures with larger
+ * cache lines need to provide their own cache.h.
+ */
+
+#define L1_CACHE_SHIFT 5
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#endif /* __ASM_GENERIC_CACHE_H */
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
new file mode 100644
index 000000000..f46258d1a
--- /dev/null
+++ b/include/asm-generic/cacheflush.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_CACHEFLUSH_H
+#define _ASM_GENERIC_CACHEFLUSH_H
+
+#include <linux/instrumented.h>
+
+struct mm_struct;
+struct vm_area_struct;
+struct page;
+struct address_space;
+
+/*
+ * The cache doesn't need to be flushed when TLB entries change when
+ * the cache is mapped to physical memory, not virtual memory
+ */
+#ifndef flush_cache_all
+static inline void flush_cache_all(void)
+{
+}
+#endif
+
+#ifndef flush_cache_mm
+static inline void flush_cache_mm(struct mm_struct *mm)
+{
+}
+#endif
+
+#ifndef flush_cache_dup_mm
+static inline void flush_cache_dup_mm(struct mm_struct *mm)
+{
+}
+#endif
+
+#ifndef flush_cache_range
+static inline void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end)
+{
+}
+#endif
+
+#ifndef flush_cache_page
+static inline void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long vmaddr,
+ unsigned long pfn)
+{
+}
+#endif
+
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+static inline void flush_dcache_page(struct page *page)
+{
+}
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#endif
+
+#ifndef flush_dcache_mmap_lock
+static inline void flush_dcache_mmap_lock(struct address_space *mapping)
+{
+}
+#endif
+
+#ifndef flush_dcache_mmap_unlock
+static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
+{
+}
+#endif
+
+#ifndef flush_icache_range
+static inline void flush_icache_range(unsigned long start, unsigned long end)
+{
+}
+#endif
+
+#ifndef flush_icache_user_range
+#define flush_icache_user_range flush_icache_range
+#endif
+
+#ifndef flush_icache_page
+static inline void flush_icache_page(struct vm_area_struct *vma,
+ struct page *page)
+{
+}
+#endif
+
+#ifndef flush_icache_user_page
+static inline void flush_icache_user_page(struct vm_area_struct *vma,
+ struct page *page,
+ unsigned long addr, int len)
+{
+}
+#endif
+
+#ifndef flush_cache_vmap
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+}
+#endif
+
+#ifndef flush_cache_vunmap
+static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+}
+#endif
+
+#ifndef copy_to_user_page
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ instrument_copy_to_user((void __user *)dst, src, len); \
+ memcpy(dst, src, len); \
+ flush_icache_user_page(vma, page, vaddr, len); \
+ } while (0)
+#endif
+
+
+#ifndef copy_from_user_page
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ instrument_copy_from_user_before(dst, (void __user *)src, \
+ len); \
+ memcpy(dst, src, len); \
+ instrument_copy_from_user_after(dst, (void __user *)src, len, \
+ 0); \
+ } while (0)
+#endif
+
+#endif /* _ASM_GENERIC_CACHEFLUSH_H */
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
new file mode 100644
index 000000000..43e18db89
--- /dev/null
+++ b/include/asm-generic/checksum.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_CHECKSUM_H
+#define __ASM_GENERIC_CHECKSUM_H
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+#ifndef ip_fast_csum
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+#endif
+
+#ifndef csum_fold
+/*
+ * Fold a partial checksum
+ */
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+ sum = (sum & 0xffff) + (sum >> 16);
+ sum = (sum & 0xffff) + (sum >> 16);
+ return (__force __sum16)~sum;
+}
+#endif
+
+#ifndef csum_tcpudp_nofold
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+extern __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum);
+#endif
+
+#ifndef csum_tcpudp_magic
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+#endif
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+extern __sum16 ip_compute_csum(const void *buff, int len);
+
+#endif /* __ASM_GENERIC_CHECKSUM_H */
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
new file mode 100644
index 000000000..380cdc824
--- /dev/null
+++ b/include/asm-generic/cmpxchg-local.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_CMPXCHG_LOCAL_H
+#define __ASM_GENERIC_CMPXCHG_LOCAL_H
+
+#include <linux/types.h>
+#include <linux/irqflags.h>
+
+extern unsigned long wrong_size_cmpxchg(volatile void *ptr)
+ __noreturn;
+
+/*
+ * Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned
+ * long parameter, supporting various types of architectures.
+ */
+static inline unsigned long __generic_cmpxchg_local(volatile void *ptr,
+ unsigned long old, unsigned long new, int size)
+{
+ unsigned long flags, prev;
+
+ /*
+ * Sanity checking, compile-time.
+ */
+ if (size == 8 && sizeof(unsigned long) != 8)
+ wrong_size_cmpxchg(ptr);
+
+ raw_local_irq_save(flags);
+ switch (size) {
+ case 1: prev = *(u8 *)ptr;
+ if (prev == old)
+ *(u8 *)ptr = (u8)new;
+ break;
+ case 2: prev = *(u16 *)ptr;
+ if (prev == old)
+ *(u16 *)ptr = (u16)new;
+ break;
+ case 4: prev = *(u32 *)ptr;
+ if (prev == old)
+ *(u32 *)ptr = (u32)new;
+ break;
+ case 8: prev = *(u64 *)ptr;
+ if (prev == old)
+ *(u64 *)ptr = (u64)new;
+ break;
+ default:
+ wrong_size_cmpxchg(ptr);
+ }
+ raw_local_irq_restore(flags);
+ return prev;
+}
+
+/*
+ * Generic version of __cmpxchg64_local. Takes an u64 parameter.
+ */
+static inline u64 __generic_cmpxchg64_local(volatile void *ptr,
+ u64 old, u64 new)
+{
+ u64 prev;
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ prev = *(u64 *)ptr;
+ if (prev == old)
+ *(u64 *)ptr = new;
+ raw_local_irq_restore(flags);
+ return prev;
+}
+
+#endif
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
new file mode 100644
index 000000000..dca441992
--- /dev/null
+++ b/include/asm-generic/cmpxchg.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Generic UP xchg and cmpxchg using interrupt disablement. Does not
+ * support SMP.
+ */
+
+#ifndef __ASM_GENERIC_CMPXCHG_H
+#define __ASM_GENERIC_CMPXCHG_H
+
+#ifdef CONFIG_SMP
+#error "Cannot use generic cmpxchg on SMP"
+#endif
+
+#include <linux/types.h>
+#include <linux/irqflags.h>
+
+/*
+ * This function doesn't exist, so you'll get a linker error if
+ * something tries to do an invalidly-sized xchg().
+ */
+extern void __generic_xchg_called_with_bad_pointer(void);
+
+static inline
+unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
+{
+ unsigned long ret, flags;
+
+ switch (size) {
+ case 1:
+#ifdef __xchg_u8
+ return __xchg_u8(x, ptr);
+#else
+ local_irq_save(flags);
+ ret = *(volatile u8 *)ptr;
+ *(volatile u8 *)ptr = x;
+ local_irq_restore(flags);
+ return ret;
+#endif /* __xchg_u8 */
+
+ case 2:
+#ifdef __xchg_u16
+ return __xchg_u16(x, ptr);
+#else
+ local_irq_save(flags);
+ ret = *(volatile u16 *)ptr;
+ *(volatile u16 *)ptr = x;
+ local_irq_restore(flags);
+ return ret;
+#endif /* __xchg_u16 */
+
+ case 4:
+#ifdef __xchg_u32
+ return __xchg_u32(x, ptr);
+#else
+ local_irq_save(flags);
+ ret = *(volatile u32 *)ptr;
+ *(volatile u32 *)ptr = x;
+ local_irq_restore(flags);
+ return ret;
+#endif /* __xchg_u32 */
+
+#ifdef CONFIG_64BIT
+ case 8:
+#ifdef __xchg_u64
+ return __xchg_u64(x, ptr);
+#else
+ local_irq_save(flags);
+ ret = *(volatile u64 *)ptr;
+ *(volatile u64 *)ptr = x;
+ local_irq_restore(flags);
+ return ret;
+#endif /* __xchg_u64 */
+#endif /* CONFIG_64BIT */
+
+ default:
+ __generic_xchg_called_with_bad_pointer();
+ return x;
+ }
+}
+
+#define generic_xchg(ptr, x) ({ \
+ ((__typeof__(*(ptr))) \
+ __generic_xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
+})
+
+/*
+ * Atomic compare and exchange.
+ */
+#include <asm-generic/cmpxchg-local.h>
+
+#define generic_cmpxchg_local(ptr, o, n) ({ \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr)))); \
+})
+
+#define generic_cmpxchg64_local(ptr, o, n) \
+ __generic_cmpxchg64_local((ptr), (o), (n))
+
+
+#ifndef arch_xchg
+#define arch_xchg generic_xchg
+#endif
+
+#ifndef arch_cmpxchg_local
+#define arch_cmpxchg_local generic_cmpxchg_local
+#endif
+
+#ifndef arch_cmpxchg64_local
+#define arch_cmpxchg64_local generic_cmpxchg64_local
+#endif
+
+#define arch_cmpxchg arch_cmpxchg_local
+#define arch_cmpxchg64 arch_cmpxchg64_local
+
+#endif /* __ASM_GENERIC_CMPXCHG_H */
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
new file mode 100644
index 000000000..8392caea3
--- /dev/null
+++ b/include/asm-generic/compat.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_COMPAT_H
+#define __ASM_GENERIC_COMPAT_H
+
+#ifndef COMPAT_USER_HZ
+#define COMPAT_USER_HZ 100
+#endif
+
+#ifndef COMPAT_RLIM_INFINITY
+#define COMPAT_RLIM_INFINITY 0xffffffff
+#endif
+
+#ifndef COMPAT_OFF_T_MAX
+#define COMPAT_OFF_T_MAX 0x7fffffff
+#endif
+
+#ifndef compat_arg_u64
+#ifndef CONFIG_CPU_BIG_ENDIAN
+#define compat_arg_u64(name) u32 name##_lo, u32 name##_hi
+#define compat_arg_u64_dual(name) u32, name##_lo, u32, name##_hi
+#else
+#define compat_arg_u64(name) u32 name##_hi, u32 name##_lo
+#define compat_arg_u64_dual(name) u32, name##_hi, u32, name##_lo
+#endif
+#define compat_arg_u64_glue(name) (((u64)name##_lo & 0xffffffffUL) | \
+ ((u64)name##_hi << 32))
+#endif /* compat_arg_u64 */
+
+/* These types are common across all compat ABIs */
+typedef u32 compat_size_t;
+typedef s32 compat_ssize_t;
+typedef s32 compat_clock_t;
+typedef s32 compat_pid_t;
+typedef u32 compat_ino_t;
+typedef s32 compat_off_t;
+typedef s64 compat_loff_t;
+typedef s32 compat_daddr_t;
+typedef s32 compat_timer_t;
+typedef s32 compat_key_t;
+typedef s16 compat_short_t;
+typedef s32 compat_int_t;
+typedef s32 compat_long_t;
+typedef u16 compat_ushort_t;
+typedef u32 compat_uint_t;
+typedef u32 compat_ulong_t;
+typedef u32 compat_uptr_t;
+typedef u32 compat_caddr_t;
+typedef u32 compat_aio_context_t;
+typedef u32 compat_old_sigset_t;
+
+#ifndef __compat_uid_t
+typedef u32 __compat_uid_t;
+typedef u32 __compat_gid_t;
+#endif
+
+#ifndef __compat_uid32_t
+typedef u32 __compat_uid32_t;
+typedef u32 __compat_gid32_t;
+#endif
+
+#ifndef compat_mode_t
+typedef u32 compat_mode_t;
+#endif
+
+#ifdef CONFIG_COMPAT_FOR_U64_ALIGNMENT
+typedef s64 __attribute__((aligned(4))) compat_s64;
+typedef u64 __attribute__((aligned(4))) compat_u64;
+#else
+typedef s64 compat_s64;
+typedef u64 compat_u64;
+#endif
+
+#ifndef _COMPAT_NSIG
+typedef u32 compat_sigset_word;
+#define _COMPAT_NSIG _NSIG
+#define _COMPAT_NSIG_BPW 32
+#endif
+
+#ifndef compat_dev_t
+typedef u32 compat_dev_t;
+#endif
+
+#ifndef compat_ipc_pid_t
+typedef s32 compat_ipc_pid_t;
+#endif
+
+#ifndef compat_fsid_t
+typedef __kernel_fsid_t compat_fsid_t;
+#endif
+
+#ifndef compat_statfs
+struct compat_statfs {
+ compat_int_t f_type;
+ compat_int_t f_bsize;
+ compat_int_t f_blocks;
+ compat_int_t f_bfree;
+ compat_int_t f_bavail;
+ compat_int_t f_files;
+ compat_int_t f_ffree;
+ compat_fsid_t f_fsid;
+ compat_int_t f_namelen;
+ compat_int_t f_frsize;
+ compat_int_t f_flags;
+ compat_int_t f_spare[4];
+};
+#endif
+
+#ifndef compat_ipc64_perm
+struct compat_ipc64_perm {
+ compat_key_t key;
+ __compat_uid32_t uid;
+ __compat_gid32_t gid;
+ __compat_uid32_t cuid;
+ __compat_gid32_t cgid;
+ compat_mode_t mode;
+ unsigned char __pad1[4 - sizeof(compat_mode_t)];
+ compat_ushort_t seq;
+ compat_ushort_t __pad2;
+ compat_ulong_t unused1;
+ compat_ulong_t unused2;
+};
+
+struct compat_semid64_ds {
+ struct compat_ipc64_perm sem_perm;
+ compat_ulong_t sem_otime;
+ compat_ulong_t sem_otime_high;
+ compat_ulong_t sem_ctime;
+ compat_ulong_t sem_ctime_high;
+ compat_ulong_t sem_nsems;
+ compat_ulong_t __unused3;
+ compat_ulong_t __unused4;
+};
+
+struct compat_msqid64_ds {
+ struct compat_ipc64_perm msg_perm;
+ compat_ulong_t msg_stime;
+ compat_ulong_t msg_stime_high;
+ compat_ulong_t msg_rtime;
+ compat_ulong_t msg_rtime_high;
+ compat_ulong_t msg_ctime;
+ compat_ulong_t msg_ctime_high;
+ compat_ulong_t msg_cbytes;
+ compat_ulong_t msg_qnum;
+ compat_ulong_t msg_qbytes;
+ compat_pid_t msg_lspid;
+ compat_pid_t msg_lrpid;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+
+struct compat_shmid64_ds {
+ struct compat_ipc64_perm shm_perm;
+ compat_size_t shm_segsz;
+ compat_ulong_t shm_atime;
+ compat_ulong_t shm_atime_high;
+ compat_ulong_t shm_dtime;
+ compat_ulong_t shm_dtime_high;
+ compat_ulong_t shm_ctime;
+ compat_ulong_t shm_ctime_high;
+ compat_pid_t shm_cpid;
+ compat_pid_t shm_lpid;
+ compat_ulong_t shm_nattch;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+#endif
+
+#endif
diff --git a/include/asm-generic/current.h b/include/asm-generic/current.h
new file mode 100644
index 000000000..3a2e224b9
--- /dev/null
+++ b/include/asm-generic/current.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_CURRENT_H
+#define __ASM_GENERIC_CURRENT_H
+
+#include <linux/thread_info.h>
+
+#define get_current() (current_thread_info()->task)
+#define current get_current()
+
+#endif /* __ASM_GENERIC_CURRENT_H */
diff --git a/include/asm-generic/delay.h b/include/asm-generic/delay.h
new file mode 100644
index 000000000..e448ac614
--- /dev/null
+++ b/include/asm-generic/delay.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_DELAY_H
+#define __ASM_GENERIC_DELAY_H
+
+/* Undefined functions to get compile-time errors */
+extern void __bad_udelay(void);
+extern void __bad_ndelay(void);
+
+extern void __udelay(unsigned long usecs);
+extern void __ndelay(unsigned long nsecs);
+extern void __const_udelay(unsigned long xloops);
+extern void __delay(unsigned long loops);
+
+/*
+ * The weird n/20000 thing suppresses a "comparison is always false due to
+ * limited range of data type" warning with non-const 8-bit arguments.
+ */
+
+/* 0x10c7 is 2**32 / 1000000 (rounded up) */
+#define udelay(n) \
+ ({ \
+ if (__builtin_constant_p(n)) { \
+ if ((n) / 20000 >= 1) \
+ __bad_udelay(); \
+ else \
+ __const_udelay((n) * 0x10c7ul); \
+ } else { \
+ __udelay(n); \
+ } \
+ })
+
+/* 0x5 is 2**32 / 1000000000 (rounded up) */
+#define ndelay(n) \
+ ({ \
+ if (__builtin_constant_p(n)) { \
+ if ((n) / 20000 >= 1) \
+ __bad_ndelay(); \
+ else \
+ __const_udelay((n) * 5ul); \
+ } else { \
+ __ndelay(n); \
+ } \
+ })
+
+#endif /* __ASM_GENERIC_DELAY_H */
diff --git a/include/asm-generic/device.h b/include/asm-generic/device.h
new file mode 100644
index 000000000..974517cdf
--- /dev/null
+++ b/include/asm-generic/device.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Arch specific extensions to struct device
+ */
+#ifndef _ASM_GENERIC_DEVICE_H
+#define _ASM_GENERIC_DEVICE_H
+
+struct dev_archdata {
+};
+
+struct pdev_archdata {
+};
+
+#endif /* _ASM_GENERIC_DEVICE_H */
diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h
new file mode 100644
index 000000000..13f5aa68a
--- /dev/null
+++ b/include/asm-generic/div64.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_DIV64_H
+#define _ASM_GENERIC_DIV64_H
+/*
+ * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
+ * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
+ *
+ * Optimization for constant divisors on 32-bit machines:
+ * Copyright (C) 2006-2015 Nicolas Pitre
+ *
+ * The semantics of do_div() is, in C++ notation, observing that the name
+ * is a function-like macro and the n parameter has the semantics of a C++
+ * reference:
+ *
+ * uint32_t do_div(uint64_t &n, uint32_t base)
+ * {
+ * uint32_t remainder = n % base;
+ * n = n / base;
+ * return remainder;
+ * }
+ *
+ * NOTE: macro parameter n is evaluated multiple times,
+ * beware of side effects!
+ */
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+#if BITS_PER_LONG == 64
+
+/**
+ * do_div - returns 2 values: calculate remainder and update new dividend
+ * @n: uint64_t dividend (will be updated)
+ * @base: uint32_t divisor
+ *
+ * Summary:
+ * ``uint32_t remainder = n % base;``
+ * ``n = n / base;``
+ *
+ * Return: (uint32_t)remainder
+ *
+ * NOTE: macro parameter @n is evaluated multiple times,
+ * beware of side effects!
+ */
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ __rem = ((uint64_t)(n)) % __base; \
+ (n) = ((uint64_t)(n)) / __base; \
+ __rem; \
+ })
+
+#elif BITS_PER_LONG == 32
+
+#include <linux/log2.h>
+
+/*
+ * If the divisor happens to be constant, we determine the appropriate
+ * inverse at compile time to turn the division into a few inline
+ * multiplications which ought to be much faster.
+ *
+ * (It is unfortunate that gcc doesn't perform all this internally.)
+ */
+
+#define __div64_const32(n, ___b) \
+({ \
+ /* \
+ * Multiplication by reciprocal of b: n / b = n * (p / b) / p \
+ * \
+ * We rely on the fact that most of this code gets optimized \
+ * away at compile time due to constant propagation and only \
+ * a few multiplication instructions should remain. \
+ * Hence this monstrous macro (static inline doesn't always \
+ * do the trick here). \
+ */ \
+ uint64_t ___res, ___x, ___t, ___m, ___n = (n); \
+ uint32_t ___p, ___bias; \
+ \
+ /* determine MSB of b */ \
+ ___p = 1 << ilog2(___b); \
+ \
+ /* compute m = ((p << 64) + b - 1) / b */ \
+ ___m = (~0ULL / ___b) * ___p; \
+ ___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b; \
+ \
+ /* one less than the dividend with highest result */ \
+ ___x = ~0ULL / ___b * ___b - 1; \
+ \
+ /* test our ___m with res = m * x / (p << 64) */ \
+ ___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32; \
+ ___t = ___res += (___m & 0xffffffff) * (___x >> 32); \
+ ___res += (___x & 0xffffffff) * (___m >> 32); \
+ ___t = (___res < ___t) ? (1ULL << 32) : 0; \
+ ___res = (___res >> 32) + ___t; \
+ ___res += (___m >> 32) * (___x >> 32); \
+ ___res /= ___p; \
+ \
+ /* Now sanitize and optimize what we've got. */ \
+ if (~0ULL % (___b / (___b & -___b)) == 0) { \
+ /* special case, can be simplified to ... */ \
+ ___n /= (___b & -___b); \
+ ___m = ~0ULL / (___b / (___b & -___b)); \
+ ___p = 1; \
+ ___bias = 1; \
+ } else if (___res != ___x / ___b) { \
+ /* \
+ * We can't get away without a bias to compensate \
+ * for bit truncation errors. To avoid it we'd need an \
+ * additional bit to represent m which would overflow \
+ * a 64-bit variable. \
+ * \
+ * Instead we do m = p / b and n / b = (n * m + m) / p. \
+ */ \
+ ___bias = 1; \
+ /* Compute m = (p << 64) / b */ \
+ ___m = (~0ULL / ___b) * ___p; \
+ ___m += ((~0ULL % ___b + 1) * ___p) / ___b; \
+ } else { \
+ /* \
+ * Reduce m / p, and try to clear bit 31 of m when \
+ * possible, otherwise that'll need extra overflow \
+ * handling later. \
+ */ \
+ uint32_t ___bits = -(___m & -___m); \
+ ___bits |= ___m >> 32; \
+ ___bits = (~___bits) << 1; \
+ /* \
+ * If ___bits == 0 then setting bit 31 is unavoidable. \
+ * Simply apply the maximum possible reduction in that \
+ * case. Otherwise the MSB of ___bits indicates the \
+ * best reduction we should apply. \
+ */ \
+ if (!___bits) { \
+ ___p /= (___m & -___m); \
+ ___m /= (___m & -___m); \
+ } else { \
+ ___p >>= ilog2(___bits); \
+ ___m >>= ilog2(___bits); \
+ } \
+ /* No bias needed. */ \
+ ___bias = 0; \
+ } \
+ \
+ /* \
+ * Now we have a combination of 2 conditions: \
+ * \
+ * 1) whether or not we need to apply a bias, and \
+ * \
+ * 2) whether or not there might be an overflow in the cross \
+ * product determined by (___m & ((1 << 63) | (1 << 31))). \
+ * \
+ * Select the best way to do (m_bias + m * n) / (1 << 64). \
+ * From now on there will be actual runtime code generated. \
+ */ \
+ ___res = __arch_xprod_64(___m, ___n, ___bias); \
+ \
+ ___res /= ___p; \
+})
+
+#ifndef __arch_xprod_64
+/*
+ * Default C implementation for __arch_xprod_64()
+ *
+ * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
+ * Semantic: retval = ((bias ? m : 0) + m * n) >> 64
+ *
+ * The product is a 128-bit value, scaled down to 64 bits.
+ * Assuming constant propagation to optimize away unused conditional code.
+ * Architectures may provide their own optimized assembly implementation.
+ */
+static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
+{
+ uint32_t m_lo = m;
+ uint32_t m_hi = m >> 32;
+ uint32_t n_lo = n;
+ uint32_t n_hi = n >> 32;
+ uint64_t res;
+ uint32_t res_lo, res_hi, tmp;
+
+ if (!bias) {
+ res = ((uint64_t)m_lo * n_lo) >> 32;
+ } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
+ /* there can't be any overflow here */
+ res = (m + (uint64_t)m_lo * n_lo) >> 32;
+ } else {
+ res = m + (uint64_t)m_lo * n_lo;
+ res_lo = res >> 32;
+ res_hi = (res_lo < m_hi);
+ res = res_lo | ((uint64_t)res_hi << 32);
+ }
+
+ if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
+ /* there can't be any overflow here */
+ res += (uint64_t)m_lo * n_hi;
+ res += (uint64_t)m_hi * n_lo;
+ res >>= 32;
+ } else {
+ res += (uint64_t)m_lo * n_hi;
+ tmp = res >> 32;
+ res += (uint64_t)m_hi * n_lo;
+ res_lo = res >> 32;
+ res_hi = (res_lo < tmp);
+ res = res_lo | ((uint64_t)res_hi << 32);
+ }
+
+ res += (uint64_t)m_hi * n_hi;
+
+ return res;
+}
+#endif
+
+#ifndef __div64_32
+extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
+#endif
+
+/* The unnecessary pointer compare is there
+ * to check for type safety (n must be 64bit)
+ */
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
+ if (__builtin_constant_p(__base) && \
+ is_power_of_2(__base)) { \
+ __rem = (n) & (__base - 1); \
+ (n) >>= ilog2(__base); \
+ } else if (__builtin_constant_p(__base) && \
+ __base != 0) { \
+ uint32_t __res_lo, __n_lo = (n); \
+ (n) = __div64_const32(n, __base); \
+ /* the remainder can be computed with 32-bit regs */ \
+ __res_lo = (n); \
+ __rem = __n_lo - __res_lo * __base; \
+ } else if (likely(((n) >> 32) == 0)) { \
+ __rem = (uint32_t)(n) % __base; \
+ (n) = (uint32_t)(n) / __base; \
+ } else { \
+ __rem = __div64_32(&(n), __base); \
+ } \
+ __rem; \
+ })
+
+#else /* BITS_PER_LONG == ?? */
+
+# error do_div() does not yet support the C64
+
+#endif /* BITS_PER_LONG */
+
+#endif /* _ASM_GENERIC_DIV64_H */
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
new file mode 100644
index 000000000..c13f46109
--- /dev/null
+++ b/include/asm-generic/dma-mapping.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_DMA_MAPPING_H
+#define _ASM_GENERIC_DMA_MAPPING_H
+
+static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+{
+ return NULL;
+}
+
+#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/dma.h b/include/asm-generic/dma.h
new file mode 100644
index 000000000..43d0c8af8
--- /dev/null
+++ b/include/asm-generic/dma.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_DMA_H
+#define __ASM_GENERIC_DMA_H
+/*
+ * This file traditionally describes the i8237 PC style DMA controller.
+ * Most architectures don't have these any more and can get the minimal
+ * implementation from kernel/dma.c by not defining MAX_DMA_CHANNELS.
+ *
+ * Some code relies on seeing MAX_DMA_ADDRESS though.
+ */
+#define MAX_DMA_ADDRESS PAGE_OFFSET
+
+extern int request_dma(unsigned int dmanr, const char *device_id);
+extern void free_dma(unsigned int dmanr);
+
+#endif /* __ASM_GENERIC_DMA_H */
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
new file mode 100644
index 000000000..9d0479f50
--- /dev/null
+++ b/include/asm-generic/early_ioremap.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_EARLY_IOREMAP_H_
+#define _ASM_EARLY_IOREMAP_H_
+
+#include <linux/types.h>
+
+/*
+ * early_ioremap() and early_iounmap() are for temporary early boot-time
+ * mappings, before the real ioremap() is functional.
+ */
+extern void __iomem *early_ioremap(resource_size_t phys_addr,
+ unsigned long size);
+extern void *early_memremap(resource_size_t phys_addr,
+ unsigned long size);
+extern void *early_memremap_ro(resource_size_t phys_addr,
+ unsigned long size);
+extern void *early_memremap_prot(resource_size_t phys_addr,
+ unsigned long size, unsigned long prot_val);
+extern void early_iounmap(void __iomem *addr, unsigned long size);
+extern void early_memunmap(void *addr, unsigned long size);
+
+#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU)
+/* Arch-specific initialization */
+extern void early_ioremap_init(void);
+
+/* Generic initialization called by architecture code */
+extern void early_ioremap_setup(void);
+
+/*
+ * Called as last step in paging_init() so library can act
+ * accordingly for subsequent map/unmap requests.
+ */
+extern void early_ioremap_reset(void);
+
+/*
+ * Early copy from unmapped memory to kernel mapped memory.
+ */
+extern void copy_from_early_mem(void *dest, phys_addr_t src,
+ unsigned long size);
+
+#else
+static inline void early_ioremap_init(void) { }
+static inline void early_ioremap_setup(void) { }
+static inline void early_ioremap_reset(void) { }
+#endif
+
+#endif /* _ASM_EARLY_IOREMAP_H_ */
diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
new file mode 100644
index 000000000..445de38b7
--- /dev/null
+++ b/include/asm-generic/emergency-restart.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
+#define _ASM_GENERIC_EMERGENCY_RESTART_H
+
+static inline void machine_emergency_restart(void)
+{
+ machine_restart(NULL);
+}
+
+#endif /* _ASM_GENERIC_EMERGENCY_RESTART_H */
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
new file mode 100644
index 000000000..fbca56bd9
--- /dev/null
+++ b/include/asm-generic/error-injection.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_ERROR_INJECTION_H
+#define _ASM_GENERIC_ERROR_INJECTION_H
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+enum {
+ EI_ETYPE_NONE, /* Dummy value for undefined case */
+ EI_ETYPE_NULL, /* Return NULL if failure */
+ EI_ETYPE_ERRNO, /* Return -ERRNO if failure */
+ EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */
+ EI_ETYPE_TRUE, /* Return true if failure */
+};
+
+struct error_injection_entry {
+ unsigned long addr;
+ int etype;
+};
+
+struct pt_regs;
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+/*
+ * Whitelist generating macro. Specify functions which can be
+ * error-injectable using this macro.
+ */
+#define ALLOW_ERROR_INJECTION(fname, _etype) \
+static struct error_injection_entry __used \
+ __section("_error_injection_whitelist") \
+ _eil_addr_##fname = { \
+ .addr = (unsigned long)fname, \
+ .etype = EI_ETYPE_##_etype, \
+ }
+
+void override_function_with_return(struct pt_regs *regs);
+#else
+#define ALLOW_ERROR_INJECTION(fname, _etype)
+
+static inline void override_function_with_return(struct pt_regs *regs) { }
+#endif
+#endif
+
+#endif /* _ASM_GENERIC_ERROR_INJECTION_H */
diff --git a/include/asm-generic/exec.h b/include/asm-generic/exec.h
new file mode 100644
index 000000000..f66dc71fa
--- /dev/null
+++ b/include/asm-generic/exec.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Generic process execution definitions.
+ *
+ * It should be possible to use these on really simple architectures,
+ * but it serves more as a starting point for new ports.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+#ifndef __ASM_GENERIC_EXEC_H
+#define __ASM_GENERIC_EXEC_H
+
+#define arch_align_stack(x) (x)
+
+#endif /* __ASM_GENERIC_EXEC_H */
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
new file mode 100644
index 000000000..5e4b1f236
--- /dev/null
+++ b/include/asm-generic/export.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_GENERIC_EXPORT_H
+#define __ASM_GENERIC_EXPORT_H
+
+/*
+ * This comment block is used by fixdep. Please do not remove.
+ *
+ * When CONFIG_MODVERSIONS is changed from n to y, all source files having
+ * EXPORT_SYMBOL variants must be re-compiled because genksyms is run as a
+ * side effect of the *.o build rule.
+ */
+
+#ifndef KSYM_FUNC
+#define KSYM_FUNC(x) x
+#endif
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+#define KSYM_ALIGN 4
+#elif defined(CONFIG_64BIT)
+#define KSYM_ALIGN 8
+#else
+#define KSYM_ALIGN 4
+#endif
+
+.macro __put, val, name
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+ .long \val - ., \name - ., 0
+#elif defined(CONFIG_64BIT)
+ .quad \val, \name, 0
+#else
+ .long \val, \name, 0
+#endif
+.endm
+
+/*
+ * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
+ * section flag requires it. Use '%progbits' instead of '@progbits' since the
+ * former apparently works on all arches according to the binutils source.
+ */
+
+.macro ___EXPORT_SYMBOL name,val,sec
+#if defined(CONFIG_MODULES) && !defined(__DISABLE_EXPORTS)
+ .section ___ksymtab\sec+\name,"a"
+ .balign KSYM_ALIGN
+__ksymtab_\name:
+ __put \val, __kstrtab_\name
+ .previous
+ .section __ksymtab_strings,"aMS",%progbits,1
+__kstrtab_\name:
+ .asciz "\name"
+ .previous
+#endif
+.endm
+
+#if defined(CONFIG_TRIM_UNUSED_KSYMS)
+
+#include <linux/kconfig.h>
+#include <generated/autoksyms.h>
+
+.macro __ksym_marker sym
+ .section ".discard.ksym","a"
+__ksym_marker_\sym:
+ .previous
+.endm
+
+#define __EXPORT_SYMBOL(sym, val, sec) \
+ __ksym_marker sym; \
+ __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym))
+#define __cond_export_sym(sym, val, sec, conf) \
+ ___cond_export_sym(sym, val, sec, conf)
+#define ___cond_export_sym(sym, val, sec, enabled) \
+ __cond_export_sym_##enabled(sym, val, sec)
+#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
+#define __cond_export_sym_0(sym, val, sec) /* nothing */
+
+#else
+#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
+#endif
+
+#define EXPORT_SYMBOL(name) \
+ __EXPORT_SYMBOL(name, KSYM_FUNC(name),)
+#define EXPORT_SYMBOL_GPL(name) \
+ __EXPORT_SYMBOL(name, KSYM_FUNC(name), _gpl)
+#define EXPORT_DATA_SYMBOL(name) \
+ __EXPORT_SYMBOL(name, name,)
+#define EXPORT_DATA_SYMBOL_GPL(name) \
+ __EXPORT_SYMBOL(name, name,_gpl)
+
+#endif
diff --git a/include/asm-generic/extable.h b/include/asm-generic/extable.h
new file mode 100644
index 000000000..f9618bd07
--- /dev/null
+++ b/include/asm-generic/extable.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_EXTABLE_H
+#define __ASM_GENERIC_EXTABLE_H
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry
+{
+ unsigned long insn, fixup;
+};
+
+
+struct pt_regs;
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif
diff --git a/include/asm-generic/fb.h b/include/asm-generic/fb.h
new file mode 100644
index 000000000..f9f18101e
--- /dev/null
+++ b/include/asm-generic/fb.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_FB_H_
+#define __ASM_GENERIC_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* __ASM_GENERIC_FB_H_ */
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
new file mode 100644
index 000000000..8cc7b09c1
--- /dev/null
+++ b/include/asm-generic/fixmap.h
@@ -0,0 +1,104 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
+ * Break out common bits to asm-generic by Mark Salter, November 2013
+ */
+
+#ifndef __ASM_GENERIC_FIXMAP_H
+#define __ASM_GENERIC_FIXMAP_H
+
+#include <linux/bug.h>
+#include <linux/mm_types.h>
+
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without translation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
+ return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+ return __virt_to_fix(vaddr);
+}
+
+/*
+ * Provide some reasonable defaults for page flags.
+ * Not all architectures use all of these different types and some
+ * architectures use different names.
+ */
+#ifndef FIXMAP_PAGE_NORMAL
+#define FIXMAP_PAGE_NORMAL PAGE_KERNEL
+#endif
+#if !defined(FIXMAP_PAGE_RO) && defined(PAGE_KERNEL_RO)
+#define FIXMAP_PAGE_RO PAGE_KERNEL_RO
+#endif
+#ifndef FIXMAP_PAGE_NOCACHE
+#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE
+#endif
+#ifndef FIXMAP_PAGE_IO
+#define FIXMAP_PAGE_IO PAGE_KERNEL_IO
+#endif
+#ifndef FIXMAP_PAGE_CLEAR
+#define FIXMAP_PAGE_CLEAR __pgprot(0)
+#endif
+
+#ifndef set_fixmap
+#define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, FIXMAP_PAGE_NORMAL)
+#endif
+
+#ifndef clear_fixmap
+#define clear_fixmap(idx) \
+ __set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR)
+#endif
+
+/* Return a pointer with offset calculated */
+#define __set_fixmap_offset(idx, phys, flags) \
+({ \
+ unsigned long ________addr; \
+ __set_fixmap(idx, phys, flags); \
+ ________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
+ ________addr; \
+})
+
+#define set_fixmap_offset(idx, phys) \
+ __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NORMAL)
+
+/*
+ * Some hardware wants to get fixmapped without caching.
+ */
+#define set_fixmap_nocache(idx, phys) \
+ __set_fixmap(idx, phys, FIXMAP_PAGE_NOCACHE)
+
+#define set_fixmap_offset_nocache(idx, phys) \
+ __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NOCACHE)
+
+/*
+ * Some fixmaps are for IO
+ */
+#define set_fixmap_io(idx, phys) \
+ __set_fixmap(idx, phys, FIXMAP_PAGE_IO)
+
+#define set_fixmap_offset_io(idx, phys) \
+ __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_GENERIC_FIXMAP_H */
diff --git a/include/asm-generic/flat.h b/include/asm-generic/flat.h
new file mode 100644
index 000000000..1928a3596
--- /dev/null
+++ b/include/asm-generic/flat.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_FLAT_H
+#define _ASM_GENERIC_FLAT_H
+
+#include <linux/uaccess.h>
+
+static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
+ u32 *addr)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ return copy_from_user(addr, rp, 4) ? -EFAULT : 0;
+#else
+ return get_user(*addr, rp);
+#endif
+}
+
+static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ return copy_to_user(rp, &addr, 4) ? -EFAULT : 0;
+#else
+ return put_user(addr, rp);
+#endif
+}
+
+#endif /* _ASM_GENERIC_FLAT_H */
diff --git a/include/asm-generic/ftrace.h b/include/asm-generic/ftrace.h
new file mode 100644
index 000000000..3a23028d6
--- /dev/null
+++ b/include/asm-generic/ftrace.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/include/asm-generic/ftrace.h
+ */
+#ifndef __ASM_GENERIC_FTRACE_H__
+#define __ASM_GENERIC_FTRACE_H__
+
+/*
+ * Not all architectures need their own ftrace.h, the most
+ * common definitions are already in linux/ftrace.h.
+ */
+
+#endif /* __ASM_GENERIC_FTRACE_H__ */
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
new file mode 100644
index 000000000..2a19215ba
--- /dev/null
+++ b/include/asm-generic/futex.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_FUTEX_H
+#define _ASM_GENERIC_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#ifndef futex_atomic_cmpxchg_inatomic
+#ifndef CONFIG_SMP
+/*
+ * The following implementation only for uniprocessor machines.
+ * It relies on preempt_disable() ensuring mutual exclusion.
+ *
+ */
+#define futex_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval) \
+ futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval)
+#define arch_futex_atomic_op_inuser(op, oparg, oval, uaddr) \
+ futex_atomic_op_inuser_local(op, oparg, oval, uaddr)
+#endif /* CONFIG_SMP */
+#endif
+
+/**
+ * futex_atomic_op_inuser_local() - Atomic arithmetic operation with constant
+ * argument and comparison of the previous
+ * futex value with another constant.
+ *
+ * @encoded_op: encoded operation to execute
+ * @uaddr: pointer to user space address
+ *
+ * Return:
+ * 0 - On success
+ * -EFAULT - User access resulted in a page fault
+ * -EAGAIN - Atomic operation was unable to complete due to contention
+ * -ENOSYS - Operation not supported
+ */
+static inline int
+futex_atomic_op_inuser_local(int op, u32 oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval, ret;
+ u32 tmp;
+
+ preempt_disable();
+
+ ret = -EFAULT;
+ if (unlikely(get_user(oldval, uaddr) != 0))
+ goto out_pagefault_enable;
+
+ ret = 0;
+ tmp = oldval;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ tmp = oparg;
+ break;
+ case FUTEX_OP_ADD:
+ tmp += oparg;
+ break;
+ case FUTEX_OP_OR:
+ tmp |= oparg;
+ break;
+ case FUTEX_OP_ANDN:
+ tmp &= ~oparg;
+ break;
+ case FUTEX_OP_XOR:
+ tmp ^= oparg;
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
+ ret = -EFAULT;
+
+out_pagefault_enable:
+ preempt_enable();
+
+ if (ret == 0)
+ *oval = oldval;
+
+ return ret;
+}
+
+/**
+ * futex_atomic_cmpxchg_inatomic_local() - Compare and exchange the content of the
+ * uaddr with newval if the current value is
+ * oldval.
+ * @uval: pointer to store content of @uaddr
+ * @uaddr: pointer to user space address
+ * @oldval: old value
+ * @newval: new value to store to @uaddr
+ *
+ * Return:
+ * 0 - On success
+ * -EFAULT - User access resulted in a page fault
+ * -EAGAIN - Atomic operation was unable to complete due to contention
+ */
+static inline int
+futex_atomic_cmpxchg_inatomic_local(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ u32 val;
+
+ preempt_disable();
+ if (unlikely(get_user(val, uaddr) != 0)) {
+ preempt_enable();
+ return -EFAULT;
+ }
+
+ if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
+ preempt_enable();
+ return -EFAULT;
+ }
+
+ *uval = val;
+ preempt_enable();
+
+ return 0;
+}
+
+#endif
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
new file mode 100644
index 000000000..f2979e3a9
--- /dev/null
+++ b/include/asm-generic/getorder.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_GETORDER_H
+#define __ASM_GENERIC_GETORDER_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+#include <linux/log2.h>
+
+/**
+ * get_order - Determine the allocation order of a memory size
+ * @size: The size for which to get the order
+ *
+ * Determine the allocation order of a particular sized block of memory. This
+ * is on a logarithmic scale, where:
+ *
+ * 0 -> 2^0 * PAGE_SIZE and below
+ * 1 -> 2^1 * PAGE_SIZE to 2^0 * PAGE_SIZE + 1
+ * 2 -> 2^2 * PAGE_SIZE to 2^1 * PAGE_SIZE + 1
+ * 3 -> 2^3 * PAGE_SIZE to 2^2 * PAGE_SIZE + 1
+ * 4 -> 2^4 * PAGE_SIZE to 2^3 * PAGE_SIZE + 1
+ * ...
+ *
+ * The order returned is used to find the smallest allocation granule required
+ * to hold an object of the specified size.
+ *
+ * The result is undefined if the size is 0.
+ */
+static __always_inline __attribute_const__ int get_order(unsigned long size)
+{
+ if (__builtin_constant_p(size)) {
+ if (!size)
+ return BITS_PER_LONG - PAGE_SHIFT;
+
+ if (size < (1UL << PAGE_SHIFT))
+ return 0;
+
+ return ilog2((size) - 1) - PAGE_SHIFT + 1;
+ }
+
+ size--;
+ size >>= PAGE_SHIFT;
+#if BITS_PER_LONG == 32
+ return fls(size);
+#else
+ return fls64(size);
+#endif
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_GENERIC_GETORDER_H */
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
new file mode 100644
index 000000000..aea9aee1f
--- /dev/null
+++ b/include/asm-generic/gpio.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_GPIO_H
+#define _ASM_GENERIC_GPIO_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#ifdef CONFIG_GPIOLIB
+
+#include <linux/compiler.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
+
+/* Platforms may implement their GPIO interface with library code,
+ * at a small performance cost for non-inlined operations and some
+ * extra memory (for code and for per-GPIO table entries).
+ *
+ * While the GPIO programming interface defines valid GPIO numbers
+ * to be in the range 0..MAX_INT, this library restricts them to the
+ * smaller range 0..ARCH_NR_GPIOS-1.
+ *
+ * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of
+ * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is
+ * actually an estimate of a board-specific value.
+ */
+
+#ifndef ARCH_NR_GPIOS
+#if defined(CONFIG_ARCH_NR_GPIO) && CONFIG_ARCH_NR_GPIO > 0
+#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
+#else
+#define ARCH_NR_GPIOS 512
+#endif
+#endif
+
+/*
+ * "valid" GPIO numbers are nonnegative and may be passed to
+ * setup routines like gpio_request(). only some valid numbers
+ * can successfully be requested and used.
+ *
+ * Invalid GPIO numbers are useful for indicating no-such-GPIO in
+ * platform data and other tables.
+ */
+
+static inline bool gpio_is_valid(int number)
+{
+ return number >= 0 && number < ARCH_NR_GPIOS;
+}
+
+struct device;
+struct gpio;
+struct seq_file;
+struct module;
+struct device_node;
+struct gpio_desc;
+
+/* caller holds gpio_lock *OR* gpio is marked as requested */
+static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
+{
+ return gpiod_to_chip(gpio_to_desc(gpio));
+}
+
+/* Always use the library code for GPIO management calls,
+ * or when sleeping may be involved.
+ */
+extern int gpio_request(unsigned gpio, const char *label);
+extern void gpio_free(unsigned gpio);
+
+static inline int gpio_direction_input(unsigned gpio)
+{
+ return gpiod_direction_input(gpio_to_desc(gpio));
+}
+static inline int gpio_direction_output(unsigned gpio, int value)
+{
+ return gpiod_direction_output_raw(gpio_to_desc(gpio), value);
+}
+
+static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
+{
+ return gpiod_set_debounce(gpio_to_desc(gpio), debounce);
+}
+
+static inline int gpio_get_value_cansleep(unsigned gpio)
+{
+ return gpiod_get_raw_value_cansleep(gpio_to_desc(gpio));
+}
+static inline void gpio_set_value_cansleep(unsigned gpio, int value)
+{
+ return gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value);
+}
+
+
+/* A platform's <asm/gpio.h> code may want to inline the I/O calls when
+ * the GPIO is constant and refers to some always-present controller,
+ * giving direct access to chip registers and tight bitbanging loops.
+ */
+static inline int __gpio_get_value(unsigned gpio)
+{
+ return gpiod_get_raw_value(gpio_to_desc(gpio));
+}
+static inline void __gpio_set_value(unsigned gpio, int value)
+{
+ return gpiod_set_raw_value(gpio_to_desc(gpio), value);
+}
+
+static inline int __gpio_cansleep(unsigned gpio)
+{
+ return gpiod_cansleep(gpio_to_desc(gpio));
+}
+
+static inline int __gpio_to_irq(unsigned gpio)
+{
+ return gpiod_to_irq(gpio_to_desc(gpio));
+}
+
+extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
+extern int gpio_request_array(const struct gpio *array, size_t num);
+extern void gpio_free_array(const struct gpio *array, size_t num);
+
+/*
+ * A sysfs interface can be exported by individual drivers if they want,
+ * but more typically is configured entirely from userspace.
+ */
+static inline int gpio_export(unsigned gpio, bool direction_may_change)
+{
+ return gpiod_export(gpio_to_desc(gpio), direction_may_change);
+}
+
+static inline int gpio_export_link(struct device *dev, const char *name,
+ unsigned gpio)
+{
+ return gpiod_export_link(dev, name, gpio_to_desc(gpio));
+}
+
+static inline void gpio_unexport(unsigned gpio)
+{
+ gpiod_unexport(gpio_to_desc(gpio));
+}
+
+#else /* !CONFIG_GPIOLIB */
+
+#include <linux/kernel.h>
+
+static inline bool gpio_is_valid(int number)
+{
+ /* only non-negative numbers are valid */
+ return number >= 0;
+}
+
+/* platforms that don't directly support access to GPIOs through I2C, SPI,
+ * or other blocking infrastructure can use these wrappers.
+ */
+
+static inline int gpio_cansleep(unsigned gpio)
+{
+ return 0;
+}
+
+static inline int gpio_get_value_cansleep(unsigned gpio)
+{
+ might_sleep();
+ return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value_cansleep(unsigned gpio, int value)
+{
+ might_sleep();
+ __gpio_set_value(gpio, value);
+}
+
+#endif /* !CONFIG_GPIOLIB */
+
+#endif /* _ASM_GENERIC_GPIO_H */
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h
new file mode 100644
index 000000000..7317e8258
--- /dev/null
+++ b/include/asm-generic/hardirq.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_HARDIRQ_H
+#define __ASM_GENERIC_HARDIRQ_H
+
+#include <linux/cache.h>
+#include <linux/threads.h>
+
+typedef struct {
+ unsigned int __softirq_pending;
+#ifdef ARCH_WANTS_NMI_IRQSTAT
+ unsigned int __nmi_count;
+#endif
+} ____cacheline_aligned irq_cpustat_t;
+
+DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
+
+#include <linux/irq.h>
+
+#ifndef ack_bad_irq
+static inline void ack_bad_irq(unsigned int irq)
+{
+ printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
+}
+#endif
+
+#endif /* __ASM_GENERIC_HARDIRQ_H */
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
new file mode 100644
index 000000000..a57d667ad
--- /dev/null
+++ b/include/asm-generic/hugetlb.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_HUGETLB_H
+#define _ASM_GENERIC_HUGETLB_H
+
+#include <linux/swap.h>
+#include <linux/swapops.h>
+
+static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
+{
+ return mk_pte(page, pgprot);
+}
+
+static inline unsigned long huge_pte_write(pte_t pte)
+{
+ return pte_write(pte);
+}
+
+static inline unsigned long huge_pte_dirty(pte_t pte)
+{
+ return pte_dirty(pte);
+}
+
+static inline pte_t huge_pte_mkwrite(pte_t pte)
+{
+ return pte_mkwrite(pte);
+}
+
+static inline pte_t huge_pte_mkdirty(pte_t pte)
+{
+ return pte_mkdirty(pte);
+}
+
+static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return pte_modify(pte, newprot);
+}
+
+static inline pte_t huge_pte_mkuffd_wp(pte_t pte)
+{
+ return pte_mkuffd_wp(pte);
+}
+
+static inline pte_t huge_pte_clear_uffd_wp(pte_t pte)
+{
+ return pte_clear_uffd_wp(pte);
+}
+
+static inline int huge_pte_uffd_wp(pte_t pte)
+{
+ return pte_uffd_wp(pte);
+}
+
+#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz)
+{
+ pte_clear(mm, addr, ptep);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
+{
+ free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_clear_flush(vma, addr, ptep);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTE_NONE
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+#endif
+
+/* Please refer to comments above pte_none_mostly() for the usage */
+static inline int huge_pte_none_mostly(pte_t pte)
+{
+ return huge_pte_none(pte) || is_pte_marker(pte);
+}
+
+#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
+{
+ struct hstate *h = hstate_file(file);
+
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+ if (addr & ~huge_page_mask(h))
+ return -EINVAL;
+
+ return 0;
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTEP_GET
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ return ptep_get(ptep);
+}
+#endif
+
+#ifndef __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED
+static inline bool gigantic_page_runtime_supported(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_HAS_GIGANTIC_PAGE);
+}
+#endif /* __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED */
+
+#endif /* _ASM_GENERIC_HUGETLB_H */
diff --git a/include/asm-generic/hw_irq.h b/include/asm-generic/hw_irq.h
new file mode 100644
index 000000000..89036d7b4
--- /dev/null
+++ b/include/asm-generic/hw_irq.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_GENERIC_HW_IRQ_H
+#define __ASM_GENERIC_HW_IRQ_H
+/*
+ * hw_irq.h has internal declarations for the low-level interrupt
+ * controller, like the original i8259A.
+ * In general, this is not needed for new architectures.
+ */
+
+#endif /* __ASM_GENERIC_HW_IRQ_H */
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
new file mode 100644
index 000000000..b17c6eeb9
--- /dev/null
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -0,0 +1,793 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file contains definitions from Hyper-V Hypervisor Top-Level Functional
+ * Specification (TLFS):
+ * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
+ */
+
+#ifndef _ASM_GENERIC_HYPERV_TLFS_H
+#define _ASM_GENERIC_HYPERV_TLFS_H
+
+#include <linux/types.h>
+#include <linux/bits.h>
+#include <linux/time64.h>
+
+/*
+ * While not explicitly listed in the TLFS, Hyper-V always runs with a page size
+ * of 4096. These definitions are used when communicating with Hyper-V using
+ * guest physical pages and guest physical page addresses, since the guest page
+ * size may not be 4096 on all architectures.
+ */
+#define HV_HYP_PAGE_SHIFT 12
+#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT)
+#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1))
+
+/*
+ * Hyper-V provides two categories of flags relevant to guest VMs. The
+ * "Features" category indicates specific functionality that is available
+ * to guests on this particular instance of Hyper-V. The "Features"
+ * are presented in four groups, each of which is 32 bits. The group A
+ * and B definitions are common across architectures and are listed here.
+ * However, not all flags are relevant on all architectures.
+ *
+ * Groups C and D vary across architectures and are listed in the
+ * architecture specific portion of hyperv-tlfs.h. Some of these flags exist
+ * on multiple architectures, but the bit positions are different so they
+ * cannot appear in the generic portion of hyperv-tlfs.h.
+ *
+ * The "Enlightenments" category provides recommendations on whether to use
+ * specific enlightenments that are available. The Enlighenments are a single
+ * group of 32 bits, but they vary across architectures and are listed in
+ * the architecture specific portion of hyperv-tlfs.h.
+ */
+
+/*
+ * Group A Features.
+ */
+
+/* VP Runtime register available */
+#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0)
+/* Partition Reference Counter available*/
+#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1)
+/* Basic SynIC register available */
+#define HV_MSR_SYNIC_AVAILABLE BIT(2)
+/* Synthetic Timer registers available */
+#define HV_MSR_SYNTIMER_AVAILABLE BIT(3)
+/* Virtual APIC assist and VP assist page registers available */
+#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4)
+/* Hypercall and Guest OS ID registers available*/
+#define HV_MSR_HYPERCALL_AVAILABLE BIT(5)
+/* Access virtual processor index register available*/
+#define HV_MSR_VP_INDEX_AVAILABLE BIT(6)
+/* Virtual system reset register available*/
+#define HV_MSR_RESET_AVAILABLE BIT(7)
+/* Access statistics page registers available */
+#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8)
+/* Partition reference TSC register is available */
+#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9)
+/* Partition Guest IDLE register is available */
+#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10)
+/* Partition local APIC and TSC frequency registers available */
+#define HV_ACCESS_FREQUENCY_MSRS BIT(11)
+/* AccessReenlightenmentControls privilege */
+#define HV_ACCESS_REENLIGHTENMENT BIT(13)
+/* AccessTscInvariantControls privilege */
+#define HV_ACCESS_TSC_INVARIANT BIT(15)
+
+/*
+ * Group B features.
+ */
+#define HV_CREATE_PARTITIONS BIT(0)
+#define HV_ACCESS_PARTITION_ID BIT(1)
+#define HV_ACCESS_MEMORY_POOL BIT(2)
+#define HV_ADJUST_MESSAGE_BUFFERS BIT(3)
+#define HV_POST_MESSAGES BIT(4)
+#define HV_SIGNAL_EVENTS BIT(5)
+#define HV_CREATE_PORT BIT(6)
+#define HV_CONNECT_PORT BIT(7)
+#define HV_ACCESS_STATS BIT(8)
+#define HV_DEBUGGING BIT(11)
+#define HV_CPU_MANAGEMENT BIT(12)
+#define HV_ENABLE_EXTENDED_HYPERCALLS BIT(20)
+#define HV_ISOLATION BIT(22)
+
+/*
+ * TSC page layout.
+ */
+struct ms_hyperv_tsc_page {
+ volatile u32 tsc_sequence;
+ u32 reserved1;
+ volatile u64 tsc_scale;
+ volatile s64 tsc_offset;
+} __packed;
+
+union hv_reference_tsc_msr {
+ u64 as_uint64;
+ struct {
+ u64 enable:1;
+ u64 reserved:11;
+ u64 pfn:52;
+ } __packed;
+};
+
+/*
+ * The guest OS needs to register the guest ID with the hypervisor.
+ * The guest ID is a 64 bit entity and the structure of this ID is
+ * specified in the Hyper-V specification:
+ *
+ * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
+ *
+ * While the current guideline does not specify how Linux guest ID(s)
+ * need to be generated, our plan is to publish the guidelines for
+ * Linux and other guest operating systems that currently are hosted
+ * on Hyper-V. The implementation here conforms to this yet
+ * unpublished guidelines.
+ *
+ *
+ * Bit(s)
+ * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
+ * 62:56 - Os Type; Linux is 0x100
+ * 55:48 - Distro specific identification
+ * 47:16 - Linux kernel version number
+ * 15:0 - Distro specific identification
+ *
+ *
+ */
+
+#define HV_LINUX_VENDOR_ID 0x8100
+
+/*
+ * Crash notification flags.
+ */
+#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62)
+#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63)
+
+/* Declare the various hypercall operations. */
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
+#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
+#define HVCALL_SEND_IPI 0x000b
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
+#define HVCALL_SEND_IPI_EX 0x0015
+#define HVCALL_GET_PARTITION_ID 0x0046
+#define HVCALL_DEPOSIT_MEMORY 0x0048
+#define HVCALL_CREATE_VP 0x004e
+#define HVCALL_GET_VP_REGISTERS 0x0050
+#define HVCALL_SET_VP_REGISTERS 0x0051
+#define HVCALL_POST_MESSAGE 0x005c
+#define HVCALL_SIGNAL_EVENT 0x005d
+#define HVCALL_POST_DEBUG_DATA 0x0069
+#define HVCALL_RETRIEVE_DEBUG_DATA 0x006a
+#define HVCALL_RESET_DEBUG_SESSION 0x006b
+#define HVCALL_ADD_LOGICAL_PROCESSOR 0x0076
+#define HVCALL_MAP_DEVICE_INTERRUPT 0x007c
+#define HVCALL_UNMAP_DEVICE_INTERRUPT 0x007d
+#define HVCALL_RETARGET_INTERRUPT 0x007e
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
+#define HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY 0x00db
+
+/* Extended hypercalls */
+#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001
+#define HV_EXT_CALL_MEMORY_HEAT_HINT 0x8003
+
+#define HV_FLUSH_ALL_PROCESSORS BIT(0)
+#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
+#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
+#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
+
+/* Extended capability bits */
+#define HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT BIT(8)
+
+enum HV_GENERIC_SET_FORMAT {
+ HV_GENERIC_SET_SPARSE_4K,
+ HV_GENERIC_SET_ALL,
+};
+
+#define HV_PARTITION_ID_SELF ((u64)-1)
+#define HV_VP_INDEX_SELF ((u32)-2)
+
+#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0)
+#define HV_HYPERCALL_FAST_BIT BIT(16)
+#define HV_HYPERCALL_VARHEAD_OFFSET 17
+#define HV_HYPERCALL_VARHEAD_MASK GENMASK_ULL(26, 17)
+#define HV_HYPERCALL_RSVD0_MASK GENMASK_ULL(31, 27)
+#define HV_HYPERCALL_REP_COMP_OFFSET 32
+#define HV_HYPERCALL_REP_COMP_1 BIT_ULL(32)
+#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32)
+#define HV_HYPERCALL_RSVD1_MASK GENMASK_ULL(47, 44)
+#define HV_HYPERCALL_REP_START_OFFSET 48
+#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48)
+#define HV_HYPERCALL_RSVD2_MASK GENMASK_ULL(63, 60)
+#define HV_HYPERCALL_RSVD_MASK (HV_HYPERCALL_RSVD0_MASK | \
+ HV_HYPERCALL_RSVD1_MASK | \
+ HV_HYPERCALL_RSVD2_MASK)
+
+/* hypercall status code */
+#define HV_STATUS_SUCCESS 0
+#define HV_STATUS_INVALID_HYPERCALL_CODE 2
+#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
+#define HV_STATUS_INVALID_ALIGNMENT 4
+#define HV_STATUS_INVALID_PARAMETER 5
+#define HV_STATUS_ACCESS_DENIED 6
+#define HV_STATUS_OPERATION_DENIED 8
+#define HV_STATUS_INSUFFICIENT_MEMORY 11
+#define HV_STATUS_INVALID_PORT_ID 17
+#define HV_STATUS_INVALID_CONNECTION_ID 18
+#define HV_STATUS_INSUFFICIENT_BUFFERS 19
+
+/*
+ * The Hyper-V TimeRefCount register and the TSC
+ * page provide a guest VM clock with 100ns tick rate
+ */
+#define HV_CLOCK_HZ (NSEC_PER_SEC/100)
+
+/* Define the number of synthetic interrupt sources. */
+#define HV_SYNIC_SINT_COUNT (16)
+/* Define the expected SynIC version. */
+#define HV_SYNIC_VERSION_1 (0x1)
+/* Valid SynIC vectors are 16-255. */
+#define HV_SYNIC_FIRST_VALID_VECTOR (16)
+
+#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0)
+#define HV_SYNIC_SIMP_ENABLE (1ULL << 0)
+#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0)
+#define HV_SYNIC_SINT_MASKED (1ULL << 16)
+#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17)
+#define HV_SYNIC_SINT_VECTOR_MASK (0xFF)
+
+#define HV_SYNIC_STIMER_COUNT (4)
+
+/* Define synthetic interrupt controller message constants. */
+#define HV_MESSAGE_SIZE (256)
+#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240)
+#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30)
+
+/*
+ * Define hypervisor message types. Some of the message types
+ * are x86/x64 specific, but there's no good way to separate
+ * them out into the arch-specific version of hyperv-tlfs.h
+ * because C doesn't provide a way to extend enum types.
+ * Keeping them all in the arch neutral hyperv-tlfs.h seems
+ * the least messy compromise.
+ */
+enum hv_message_type {
+ HVMSG_NONE = 0x00000000,
+
+ /* Memory access messages. */
+ HVMSG_UNMAPPED_GPA = 0x80000000,
+ HVMSG_GPA_INTERCEPT = 0x80000001,
+
+ /* Timer notification messages. */
+ HVMSG_TIMER_EXPIRED = 0x80000010,
+
+ /* Error messages. */
+ HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
+ HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021,
+ HVMSG_UNSUPPORTED_FEATURE = 0x80000022,
+
+ /* Trace buffer complete messages. */
+ HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040,
+
+ /* Platform-specific processor intercept messages. */
+ HVMSG_X64_IOPORT_INTERCEPT = 0x80010000,
+ HVMSG_X64_MSR_INTERCEPT = 0x80010001,
+ HVMSG_X64_CPUID_INTERCEPT = 0x80010002,
+ HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003,
+ HVMSG_X64_APIC_EOI = 0x80010004,
+ HVMSG_X64_LEGACY_FP_ERROR = 0x80010005
+};
+
+/* Define synthetic interrupt controller message flags. */
+union hv_message_flags {
+ __u8 asu8;
+ struct {
+ __u8 msg_pending:1;
+ __u8 reserved:7;
+ } __packed;
+};
+
+/* Define port identifier type. */
+union hv_port_id {
+ __u32 asu32;
+ struct {
+ __u32 id:24;
+ __u32 reserved:8;
+ } __packed u;
+};
+
+/* Define synthetic interrupt controller message header. */
+struct hv_message_header {
+ __u32 message_type;
+ __u8 payload_size;
+ union hv_message_flags message_flags;
+ __u8 reserved[2];
+ union {
+ __u64 sender;
+ union hv_port_id port;
+ };
+} __packed;
+
+/* Define synthetic interrupt controller message format. */
+struct hv_message {
+ struct hv_message_header header;
+ union {
+ __u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
+ } u;
+} __packed;
+
+/* Define the synthetic interrupt message page layout. */
+struct hv_message_page {
+ struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
+} __packed;
+
+/* Define timer message payload structure. */
+struct hv_timer_message_payload {
+ __u32 timer_index;
+ __u32 reserved;
+ __u64 expiration_time; /* When the timer expired */
+ __u64 delivery_time; /* When the message was delivered */
+} __packed;
+
+
+/* Define synthetic interrupt controller flag constants. */
+#define HV_EVENT_FLAGS_COUNT (256 * 8)
+#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long))
+
+/*
+ * Synthetic timer configuration.
+ */
+union hv_stimer_config {
+ u64 as_uint64;
+ struct {
+ u64 enable:1;
+ u64 periodic:1;
+ u64 lazy:1;
+ u64 auto_enable:1;
+ u64 apic_vector:8;
+ u64 direct_mode:1;
+ u64 reserved_z0:3;
+ u64 sintx:4;
+ u64 reserved_z1:44;
+ } __packed;
+};
+
+
+/* Define the synthetic interrupt controller event flags format. */
+union hv_synic_event_flags {
+ unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT];
+};
+
+/* Define SynIC control register. */
+union hv_synic_scontrol {
+ u64 as_uint64;
+ struct {
+ u64 enable:1;
+ u64 reserved:63;
+ } __packed;
+};
+
+/* Define synthetic interrupt source. */
+union hv_synic_sint {
+ u64 as_uint64;
+ struct {
+ u64 vector:8;
+ u64 reserved1:8;
+ u64 masked:1;
+ u64 auto_eoi:1;
+ u64 polling:1;
+ u64 reserved2:45;
+ } __packed;
+};
+
+/* Define the format of the SIMP register */
+union hv_synic_simp {
+ u64 as_uint64;
+ struct {
+ u64 simp_enabled:1;
+ u64 preserved:11;
+ u64 base_simp_gpa:52;
+ } __packed;
+};
+
+/* Define the format of the SIEFP register */
+union hv_synic_siefp {
+ u64 as_uint64;
+ struct {
+ u64 siefp_enabled:1;
+ u64 preserved:11;
+ u64 base_siefp_gpa:52;
+ } __packed;
+};
+
+struct hv_vpset {
+ u64 format;
+ u64 valid_bank_mask;
+ u64 bank_contents[];
+} __packed;
+
+/* HvCallSendSyntheticClusterIpi hypercall */
+struct hv_send_ipi {
+ u32 vector;
+ u32 reserved;
+ u64 cpu_mask;
+} __packed;
+
+/* HvCallSendSyntheticClusterIpiEx hypercall */
+struct hv_send_ipi_ex {
+ u32 vector;
+ u32 reserved;
+ struct hv_vpset vp_set;
+} __packed;
+
+/* HvFlushGuestPhysicalAddressSpace hypercalls */
+struct hv_guest_mapping_flush {
+ u64 address_space;
+ u64 flags;
+} __packed;
+
+/*
+ * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited
+ * by the bitwidth of "additional_pages" in union hv_gpa_page_range.
+ */
+#define HV_MAX_FLUSH_PAGES (2048)
+#define HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB 0
+#define HV_GPA_PAGE_RANGE_PAGE_SIZE_1GB 1
+
+/* HvFlushGuestPhysicalAddressList, HvExtCallMemoryHeatHint hypercall */
+union hv_gpa_page_range {
+ u64 address_space;
+ struct {
+ u64 additional_pages:11;
+ u64 largepage:1;
+ u64 basepfn:52;
+ } page;
+ struct {
+ u64 reserved:12;
+ u64 page_size:1;
+ u64 reserved1:8;
+ u64 base_large_pfn:43;
+ };
+};
+
+/*
+ * All input flush parameters should be in single page. The max flush
+ * count is equal with how many entries of union hv_gpa_page_range can
+ * be populated into the input parameter page.
+ */
+#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) / \
+ sizeof(union hv_gpa_page_range))
+
+struct hv_guest_mapping_flush_list {
+ u64 address_space;
+ u64 flags;
+ union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT];
+};
+
+/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
+struct hv_tlb_flush {
+ u64 address_space;
+ u64 flags;
+ u64 processor_mask;
+ u64 gva_list[];
+} __packed;
+
+/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
+struct hv_tlb_flush_ex {
+ u64 address_space;
+ u64 flags;
+ struct hv_vpset hv_vp_set;
+ u64 gva_list[];
+} __packed;
+
+/* HvGetPartitionId hypercall (output only) */
+struct hv_get_partition_id {
+ u64 partition_id;
+} __packed;
+
+/* HvDepositMemory hypercall */
+struct hv_deposit_memory {
+ u64 partition_id;
+ u64 gpa_page_list[];
+} __packed;
+
+struct hv_proximity_domain_flags {
+ u32 proximity_preferred : 1;
+ u32 reserved : 30;
+ u32 proximity_info_valid : 1;
+} __packed;
+
+/* Not a union in windows but useful for zeroing */
+union hv_proximity_domain_info {
+ struct {
+ u32 domain_id;
+ struct hv_proximity_domain_flags flags;
+ };
+ u64 as_uint64;
+} __packed;
+
+struct hv_lp_startup_status {
+ u64 hv_status;
+ u64 substatus1;
+ u64 substatus2;
+ u64 substatus3;
+ u64 substatus4;
+ u64 substatus5;
+ u64 substatus6;
+} __packed;
+
+/* HvAddLogicalProcessor hypercall */
+struct hv_add_logical_processor_in {
+ u32 lp_index;
+ u32 apic_id;
+ union hv_proximity_domain_info proximity_domain_info;
+ u64 flags;
+} __packed;
+
+struct hv_add_logical_processor_out {
+ struct hv_lp_startup_status startup_status;
+} __packed;
+
+enum HV_SUBNODE_TYPE
+{
+ HvSubnodeAny = 0,
+ HvSubnodeSocket = 1,
+ HvSubnodeAmdNode = 2,
+ HvSubnodeL3 = 3,
+ HvSubnodeCount = 4,
+ HvSubnodeInvalid = -1
+};
+
+/* HvCreateVp hypercall */
+struct hv_create_vp {
+ u64 partition_id;
+ u32 vp_index;
+ u8 padding[3];
+ u8 subnode_type;
+ u64 subnode_id;
+ union hv_proximity_domain_info proximity_domain_info;
+ u64 flags;
+} __packed;
+
+enum hv_interrupt_source {
+ HV_INTERRUPT_SOURCE_MSI = 1, /* MSI and MSI-X */
+ HV_INTERRUPT_SOURCE_IOAPIC,
+};
+
+union hv_ioapic_rte {
+ u64 as_uint64;
+
+ struct {
+ u32 vector:8;
+ u32 delivery_mode:3;
+ u32 destination_mode:1;
+ u32 delivery_status:1;
+ u32 interrupt_polarity:1;
+ u32 remote_irr:1;
+ u32 trigger_mode:1;
+ u32 interrupt_mask:1;
+ u32 reserved1:15;
+
+ u32 reserved2:24;
+ u32 destination_id:8;
+ };
+
+ struct {
+ u32 low_uint32;
+ u32 high_uint32;
+ };
+} __packed;
+
+struct hv_interrupt_entry {
+ u32 source;
+ u32 reserved1;
+ union {
+ union hv_msi_entry msi_entry;
+ union hv_ioapic_rte ioapic_rte;
+ };
+} __packed;
+
+/*
+ * flags for hv_device_interrupt_target.flags
+ */
+#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
+#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
+
+struct hv_device_interrupt_target {
+ u32 vector;
+ u32 flags;
+ union {
+ u64 vp_mask;
+ struct hv_vpset vp_set;
+ };
+} __packed;
+
+struct hv_retarget_device_interrupt {
+ u64 partition_id; /* use "self" */
+ u64 device_id;
+ struct hv_interrupt_entry int_entry;
+ u64 reserved2;
+ struct hv_device_interrupt_target int_target;
+} __packed __aligned(8);
+
+
+/* HvGetVpRegisters hypercall input with variable size reg name list*/
+struct hv_get_vp_registers_input {
+ struct {
+ u64 partitionid;
+ u32 vpindex;
+ u8 inputvtl;
+ u8 padding[3];
+ } header;
+ struct input {
+ u32 name0;
+ u32 name1;
+ } element[];
+} __packed;
+
+
+/* HvGetVpRegisters returns an array of these output elements */
+struct hv_get_vp_registers_output {
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ u32 c;
+ u32 d;
+ } as32 __packed;
+ struct {
+ u64 low;
+ u64 high;
+ } as64 __packed;
+ };
+};
+
+/* HvSetVpRegisters hypercall with variable size reg name/value list*/
+struct hv_set_vp_registers_input {
+ struct {
+ u64 partitionid;
+ u32 vpindex;
+ u8 inputvtl;
+ u8 padding[3];
+ } header;
+ struct {
+ u32 name;
+ u32 padding1;
+ u64 padding2;
+ u64 valuelow;
+ u64 valuehigh;
+ } element[];
+} __packed;
+
+enum hv_device_type {
+ HV_DEVICE_TYPE_LOGICAL = 0,
+ HV_DEVICE_TYPE_PCI = 1,
+ HV_DEVICE_TYPE_IOAPIC = 2,
+ HV_DEVICE_TYPE_ACPI = 3,
+};
+
+typedef u16 hv_pci_rid;
+typedef u16 hv_pci_segment;
+typedef u64 hv_logical_device_id;
+union hv_pci_bdf {
+ u16 as_uint16;
+
+ struct {
+ u8 function:3;
+ u8 device:5;
+ u8 bus;
+ };
+} __packed;
+
+union hv_pci_bus_range {
+ u16 as_uint16;
+
+ struct {
+ u8 subordinate_bus;
+ u8 secondary_bus;
+ };
+} __packed;
+
+union hv_device_id {
+ u64 as_uint64;
+
+ struct {
+ u64 reserved0:62;
+ u64 device_type:2;
+ };
+
+ /* HV_DEVICE_TYPE_LOGICAL */
+ struct {
+ u64 id:62;
+ u64 device_type:2;
+ } logical;
+
+ /* HV_DEVICE_TYPE_PCI */
+ struct {
+ union {
+ hv_pci_rid rid;
+ union hv_pci_bdf bdf;
+ };
+
+ hv_pci_segment segment;
+ union hv_pci_bus_range shadow_bus_range;
+
+ u16 phantom_function_bits:2;
+ u16 source_shadow:1;
+
+ u16 rsvdz0:11;
+ u16 device_type:2;
+ } pci;
+
+ /* HV_DEVICE_TYPE_IOAPIC */
+ struct {
+ u8 ioapic_id;
+ u8 rsvdz0;
+ u16 rsvdz1;
+ u16 rsvdz2;
+
+ u16 rsvdz3:14;
+ u16 device_type:2;
+ } ioapic;
+
+ /* HV_DEVICE_TYPE_ACPI */
+ struct {
+ u32 input_mapping_base;
+ u32 input_mapping_count:30;
+ u32 device_type:2;
+ } acpi;
+} __packed;
+
+enum hv_interrupt_trigger_mode {
+ HV_INTERRUPT_TRIGGER_MODE_EDGE = 0,
+ HV_INTERRUPT_TRIGGER_MODE_LEVEL = 1,
+};
+
+struct hv_device_interrupt_descriptor {
+ u32 interrupt_type;
+ u32 trigger_mode;
+ u32 vector_count;
+ u32 reserved;
+ struct hv_device_interrupt_target target;
+} __packed;
+
+struct hv_input_map_device_interrupt {
+ u64 partition_id;
+ u64 device_id;
+ u64 flags;
+ struct hv_interrupt_entry logical_interrupt_entry;
+ struct hv_device_interrupt_descriptor interrupt_descriptor;
+} __packed;
+
+struct hv_output_map_device_interrupt {
+ struct hv_interrupt_entry interrupt_entry;
+} __packed;
+
+struct hv_input_unmap_device_interrupt {
+ u64 partition_id;
+ u64 device_id;
+ struct hv_interrupt_entry interrupt_entry;
+} __packed;
+
+#define HV_SOURCE_SHADOW_NONE 0x0
+#define HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE 0x1
+
+/*
+ * The whole argument should fit in a page to be able to pass to the hypervisor
+ * in one hypercall.
+ */
+#define HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_memory_hint)) / \
+ sizeof(union hv_gpa_page_range))
+
+/* HvExtCallMemoryHeatHint hypercall */
+#define HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD 2
+struct hv_memory_hint {
+ u64 type:2;
+ u64 reserved:62;
+ union hv_gpa_page_range ranges[];
+} __packed;
+
+#endif
diff --git a/include/asm-generic/ide_iops.h b/include/asm-generic/ide_iops.h
new file mode 100644
index 000000000..81dfa3ee5
--- /dev/null
+++ b/include/asm-generic/ide_iops.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Generic I/O and MEMIO string operations. */
+
+#define __ide_insw insw
+#define __ide_insl insl
+#define __ide_outsw outsw
+#define __ide_outsl outsl
+
+static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
+{
+ while (count--) {
+ *(u16 *)addr = readw(port);
+ addr += 2;
+ }
+}
+
+static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
+{
+ while (count--) {
+ *(u32 *)addr = readl(port);
+ addr += 4;
+ }
+}
+
+static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
+{
+ while (count--) {
+ writew(*(u16 *)addr, port);
+ addr += 2;
+ }
+}
+
+static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
+{
+ while (count--) {
+ writel(*(u32 *)addr, port);
+ addr += 4;
+ }
+}
diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
new file mode 100644
index 000000000..a248545f1
--- /dev/null
+++ b/include/asm-generic/int-ll64.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * asm-generic/int-ll64.h
+ *
+ * Integer declarations for architectures which use "long long"
+ * for 64-bit types.
+ */
+#ifndef _ASM_GENERIC_INT_LL64_H
+#define _ASM_GENERIC_INT_LL64_H
+
+#include <uapi/asm-generic/int-ll64.h>
+
+
+#ifndef __ASSEMBLY__
+
+typedef __s8 s8;
+typedef __u8 u8;
+typedef __s16 s16;
+typedef __u16 u16;
+typedef __s32 s32;
+typedef __u32 u32;
+typedef __s64 s64;
+typedef __u64 u64;
+
+#define S8_C(x) x
+#define U8_C(x) x ## U
+#define S16_C(x) x
+#define U16_C(x) x ## U
+#define S32_C(x) x
+#define U32_C(x) x ## U
+#define S64_C(x) x ## LL
+#define U64_C(x) x ## ULL
+
+#else /* __ASSEMBLY__ */
+
+#define S8_C(x) x
+#define U8_C(x) x
+#define S16_C(x) x
+#define U16_C(x) x
+#define S32_C(x) x
+#define U32_C(x) x
+#define S64_C(x) x
+#define U64_C(x) x
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_GENERIC_INT_LL64_H */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
new file mode 100644
index 000000000..cde032f86
--- /dev/null
+++ b/include/asm-generic/io.h
@@ -0,0 +1,1228 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Generic I/O port emulation.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+#ifndef __ASM_GENERIC_IO_H
+#define __ASM_GENERIC_IO_H
+
+#include <asm/page.h> /* I/O is all done through memory accesses */
+#include <linux/string.h> /* for memset() and memcpy() */
+#include <linux/types.h>
+#include <linux/instruction_pointer.h>
+
+#ifdef CONFIG_GENERIC_IOMAP
+#include <asm-generic/iomap.h>
+#endif
+
+#include <asm/mmiowb.h>
+#include <asm-generic/pci_iomap.h>
+
+#ifndef __io_br
+#define __io_br() barrier()
+#endif
+
+/* prevent prefetching of coherent DMA data ahead of a dma-complete */
+#ifndef __io_ar
+#ifdef rmb
+#define __io_ar(v) rmb()
+#else
+#define __io_ar(v) barrier()
+#endif
+#endif
+
+/* flush writes to coherent DMA data before possibly triggering a DMA read */
+#ifndef __io_bw
+#ifdef wmb
+#define __io_bw() wmb()
+#else
+#define __io_bw() barrier()
+#endif
+#endif
+
+/* serialize device access against a spin_unlock, usually handled there. */
+#ifndef __io_aw
+#define __io_aw() mmiowb_set_pending()
+#endif
+
+#ifndef __io_pbw
+#define __io_pbw() __io_bw()
+#endif
+
+#ifndef __io_paw
+#define __io_paw() __io_aw()
+#endif
+
+#ifndef __io_pbr
+#define __io_pbr() __io_br()
+#endif
+
+#ifndef __io_par
+#define __io_par(v) __io_ar(v)
+#endif
+
+/*
+ * "__DISABLE_TRACE_MMIO__" flag can be used to disable MMIO tracing for
+ * specific kernel drivers in case of excessive/unwanted logging.
+ *
+ * Usage: Add a #define flag at the beginning of the driver file.
+ * Ex: #define __DISABLE_TRACE_MMIO__
+ * #include <...>
+ * ...
+ */
+#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
+#include <linux/tracepoint-defs.h>
+
+DECLARE_TRACEPOINT(rwmmio_write);
+DECLARE_TRACEPOINT(rwmmio_post_write);
+DECLARE_TRACEPOINT(rwmmio_read);
+DECLARE_TRACEPOINT(rwmmio_post_read);
+
+void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr);
+void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr);
+void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr);
+void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr);
+
+#else
+
+static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr) {}
+static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr) {}
+static inline void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr) {}
+static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr) {}
+
+#endif /* CONFIG_TRACE_MMIO_ACCESS */
+
+/*
+ * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
+ *
+ * On some architectures memory mapped IO needs to be accessed differently.
+ * On the simple architectures, we just read/write the memory location
+ * directly.
+ */
+
+#ifndef __raw_readb
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ return *(const volatile u8 __force *)addr;
+}
+#endif
+
+#ifndef __raw_readw
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ return *(const volatile u16 __force *)addr;
+}
+#endif
+
+#ifndef __raw_readl
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ return *(const volatile u32 __force *)addr;
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef __raw_readq
+#define __raw_readq __raw_readq
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+ return *(const volatile u64 __force *)addr;
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef __raw_writeb
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
+{
+ *(volatile u8 __force *)addr = value;
+}
+#endif
+
+#ifndef __raw_writew
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 value, volatile void __iomem *addr)
+{
+ *(volatile u16 __force *)addr = value;
+}
+#endif
+
+#ifndef __raw_writel
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 value, volatile void __iomem *addr)
+{
+ *(volatile u32 __force *)addr = value;
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef __raw_writeq
+#define __raw_writeq __raw_writeq
+static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
+{
+ *(volatile u64 __force *)addr = value;
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+/*
+ * {read,write}{b,w,l,q}() access little endian memory and return result in
+ * native endianness.
+ */
+
+#ifndef readb
+#define readb readb
+static inline u8 readb(const volatile void __iomem *addr)
+{
+ u8 val;
+
+ log_read_mmio(8, addr, _THIS_IP_);
+ __io_br();
+ val = __raw_readb(addr);
+ __io_ar(val);
+ log_post_read_mmio(val, 8, addr, _THIS_IP_);
+ return val;
+}
+#endif
+
+#ifndef readw
+#define readw readw
+static inline u16 readw(const volatile void __iomem *addr)
+{
+ u16 val;
+
+ log_read_mmio(16, addr, _THIS_IP_);
+ __io_br();
+ val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+ __io_ar(val);
+ log_post_read_mmio(val, 16, addr, _THIS_IP_);
+ return val;
+}
+#endif
+
+#ifndef readl
+#define readl readl
+static inline u32 readl(const volatile void __iomem *addr)
+{
+ u32 val;
+
+ log_read_mmio(32, addr, _THIS_IP_);
+ __io_br();
+ val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+ __io_ar(val);
+ log_post_read_mmio(val, 32, addr, _THIS_IP_);
+ return val;
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef readq
+#define readq readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+ u64 val;
+
+ log_read_mmio(64, addr, _THIS_IP_);
+ __io_br();
+ val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+ __io_ar(val);
+ log_post_read_mmio(val, 64, addr, _THIS_IP_);
+ return val;
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef writeb
+#define writeb writeb
+static inline void writeb(u8 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 8, addr, _THIS_IP_);
+ __io_bw();
+ __raw_writeb(value, addr);
+ __io_aw();
+ log_post_write_mmio(value, 8, addr, _THIS_IP_);
+}
+#endif
+
+#ifndef writew
+#define writew writew
+static inline void writew(u16 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 16, addr, _THIS_IP_);
+ __io_bw();
+ __raw_writew((u16 __force)cpu_to_le16(value), addr);
+ __io_aw();
+ log_post_write_mmio(value, 16, addr, _THIS_IP_);
+}
+#endif
+
+#ifndef writel
+#define writel writel
+static inline void writel(u32 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 32, addr, _THIS_IP_);
+ __io_bw();
+ __raw_writel((u32 __force)__cpu_to_le32(value), addr);
+ __io_aw();
+ log_post_write_mmio(value, 32, addr, _THIS_IP_);
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef writeq
+#define writeq writeq
+static inline void writeq(u64 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 64, addr, _THIS_IP_);
+ __io_bw();
+ __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+ __io_aw();
+ log_post_write_mmio(value, 64, addr, _THIS_IP_);
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+/*
+ * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
+ * are not guaranteed to provide ordering against spinlocks or memory
+ * accesses.
+ */
+#ifndef readb_relaxed
+#define readb_relaxed readb_relaxed
+static inline u8 readb_relaxed(const volatile void __iomem *addr)
+{
+ u8 val;
+
+ log_read_mmio(8, addr, _THIS_IP_);
+ val = __raw_readb(addr);
+ log_post_read_mmio(val, 8, addr, _THIS_IP_);
+ return val;
+}
+#endif
+
+#ifndef readw_relaxed
+#define readw_relaxed readw_relaxed
+static inline u16 readw_relaxed(const volatile void __iomem *addr)
+{
+ u16 val;
+
+ log_read_mmio(16, addr, _THIS_IP_);
+ val = __le16_to_cpu(__raw_readw(addr));
+ log_post_read_mmio(val, 16, addr, _THIS_IP_);
+ return val;
+}
+#endif
+
+#ifndef readl_relaxed
+#define readl_relaxed readl_relaxed
+static inline u32 readl_relaxed(const volatile void __iomem *addr)
+{
+ u32 val;
+
+ log_read_mmio(32, addr, _THIS_IP_);
+ val = __le32_to_cpu(__raw_readl(addr));
+ log_post_read_mmio(val, 32, addr, _THIS_IP_);
+ return val;
+}
+#endif
+
+#if defined(readq) && !defined(readq_relaxed)
+#define readq_relaxed readq_relaxed
+static inline u64 readq_relaxed(const volatile void __iomem *addr)
+{
+ u64 val;
+
+ log_read_mmio(64, addr, _THIS_IP_);
+ val = __le64_to_cpu(__raw_readq(addr));
+ log_post_read_mmio(val, 64, addr, _THIS_IP_);
+ return val;
+}
+#endif
+
+#ifndef writeb_relaxed
+#define writeb_relaxed writeb_relaxed
+static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 8, addr, _THIS_IP_);
+ __raw_writeb(value, addr);
+ log_post_write_mmio(value, 8, addr, _THIS_IP_);
+}
+#endif
+
+#ifndef writew_relaxed
+#define writew_relaxed writew_relaxed
+static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 16, addr, _THIS_IP_);
+ __raw_writew(cpu_to_le16(value), addr);
+ log_post_write_mmio(value, 16, addr, _THIS_IP_);
+}
+#endif
+
+#ifndef writel_relaxed
+#define writel_relaxed writel_relaxed
+static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 32, addr, _THIS_IP_);
+ __raw_writel(__cpu_to_le32(value), addr);
+ log_post_write_mmio(value, 32, addr, _THIS_IP_);
+}
+#endif
+
+#if defined(writeq) && !defined(writeq_relaxed)
+#define writeq_relaxed writeq_relaxed
+static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
+{
+ log_write_mmio(value, 64, addr, _THIS_IP_);
+ __raw_writeq(__cpu_to_le64(value), addr);
+ log_post_write_mmio(value, 64, addr, _THIS_IP_);
+}
+#endif
+
+/*
+ * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
+ * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
+ */
+#ifndef readsb
+#define readsb readsb
+static inline void readsb(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u8 *buf = buffer;
+
+ do {
+ u8 x = __raw_readb(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef readsw
+#define readsw readsw
+static inline void readsw(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u16 *buf = buffer;
+
+ do {
+ u16 x = __raw_readw(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef readsl
+#define readsl readsl
+static inline void readsl(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u32 *buf = buffer;
+
+ do {
+ u32 x = __raw_readl(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef readsq
+#define readsq readsq
+static inline void readsq(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u64 *buf = buffer;
+
+ do {
+ u64 x = __raw_readq(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef writesb
+#define writesb writesb
+static inline void writesb(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u8 *buf = buffer;
+
+ do {
+ __raw_writeb(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef writesw
+#define writesw writesw
+static inline void writesw(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u16 *buf = buffer;
+
+ do {
+ __raw_writew(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef writesl
+#define writesl writesl
+static inline void writesl(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u32 *buf = buffer;
+
+ do {
+ __raw_writel(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef writesq
+#define writesq writesq
+static inline void writesq(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u64 *buf = buffer;
+
+ do {
+ __raw_writeq(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef PCI_IOBASE
+#define PCI_IOBASE ((void __iomem *)0)
+#endif
+
+#ifndef IO_SPACE_LIMIT
+#define IO_SPACE_LIMIT 0xffff
+#endif
+
+/*
+ * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
+ * implemented on hardware that needs an additional delay for I/O accesses to
+ * take effect.
+ */
+
+#if !defined(inb) && !defined(_inb)
+#define _inb _inb
+static inline u8 _inb(unsigned long addr)
+{
+ u8 val;
+
+ __io_pbr();
+ val = __raw_readb(PCI_IOBASE + addr);
+ __io_par(val);
+ return val;
+}
+#endif
+
+#if !defined(inw) && !defined(_inw)
+#define _inw _inw
+static inline u16 _inw(unsigned long addr)
+{
+ u16 val;
+
+ __io_pbr();
+ val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr));
+ __io_par(val);
+ return val;
+}
+#endif
+
+#if !defined(inl) && !defined(_inl)
+#define _inl _inl
+static inline u32 _inl(unsigned long addr)
+{
+ u32 val;
+
+ __io_pbr();
+ val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
+ __io_par(val);
+ return val;
+}
+#endif
+
+#if !defined(outb) && !defined(_outb)
+#define _outb _outb
+static inline void _outb(u8 value, unsigned long addr)
+{
+ __io_pbw();
+ __raw_writeb(value, PCI_IOBASE + addr);
+ __io_paw();
+}
+#endif
+
+#if !defined(outw) && !defined(_outw)
+#define _outw _outw
+static inline void _outw(u16 value, unsigned long addr)
+{
+ __io_pbw();
+ __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
+ __io_paw();
+}
+#endif
+
+#if !defined(outl) && !defined(_outl)
+#define _outl _outl
+static inline void _outl(u32 value, unsigned long addr)
+{
+ __io_pbw();
+ __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
+ __io_paw();
+}
+#endif
+
+#include <linux/logic_pio.h>
+
+#ifndef inb
+#define inb _inb
+#endif
+
+#ifndef inw
+#define inw _inw
+#endif
+
+#ifndef inl
+#define inl _inl
+#endif
+
+#ifndef outb
+#define outb _outb
+#endif
+
+#ifndef outw
+#define outw _outw
+#endif
+
+#ifndef outl
+#define outl _outl
+#endif
+
+#ifndef inb_p
+#define inb_p inb_p
+static inline u8 inb_p(unsigned long addr)
+{
+ return inb(addr);
+}
+#endif
+
+#ifndef inw_p
+#define inw_p inw_p
+static inline u16 inw_p(unsigned long addr)
+{
+ return inw(addr);
+}
+#endif
+
+#ifndef inl_p
+#define inl_p inl_p
+static inline u32 inl_p(unsigned long addr)
+{
+ return inl(addr);
+}
+#endif
+
+#ifndef outb_p
+#define outb_p outb_p
+static inline void outb_p(u8 value, unsigned long addr)
+{
+ outb(value, addr);
+}
+#endif
+
+#ifndef outw_p
+#define outw_p outw_p
+static inline void outw_p(u16 value, unsigned long addr)
+{
+ outw(value, addr);
+}
+#endif
+
+#ifndef outl_p
+#define outl_p outl_p
+static inline void outl_p(u32 value, unsigned long addr)
+{
+ outl(value, addr);
+}
+#endif
+
+/*
+ * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
+ * single I/O port multiple times.
+ */
+
+#ifndef insb
+#define insb insb
+static inline void insb(unsigned long addr, void *buffer, unsigned int count)
+{
+ readsb(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef insw
+#define insw insw
+static inline void insw(unsigned long addr, void *buffer, unsigned int count)
+{
+ readsw(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef insl
+#define insl insl
+static inline void insl(unsigned long addr, void *buffer, unsigned int count)
+{
+ readsl(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef outsb
+#define outsb outsb
+static inline void outsb(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ writesb(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef outsw
+#define outsw outsw
+static inline void outsw(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ writesw(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef outsl
+#define outsl outsl
+static inline void outsl(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ writesl(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef insb_p
+#define insb_p insb_p
+static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
+{
+ insb(addr, buffer, count);
+}
+#endif
+
+#ifndef insw_p
+#define insw_p insw_p
+static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
+{
+ insw(addr, buffer, count);
+}
+#endif
+
+#ifndef insl_p
+#define insl_p insl_p
+static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
+{
+ insl(addr, buffer, count);
+}
+#endif
+
+#ifndef outsb_p
+#define outsb_p outsb_p
+static inline void outsb_p(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ outsb(addr, buffer, count);
+}
+#endif
+
+#ifndef outsw_p
+#define outsw_p outsw_p
+static inline void outsw_p(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ outsw(addr, buffer, count);
+}
+#endif
+
+#ifndef outsl_p
+#define outsl_p outsl_p
+static inline void outsl_p(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ outsl(addr, buffer, count);
+}
+#endif
+
+#ifndef CONFIG_GENERIC_IOMAP
+#ifndef ioread8
+#define ioread8 ioread8
+static inline u8 ioread8(const volatile void __iomem *addr)
+{
+ return readb(addr);
+}
+#endif
+
+#ifndef ioread16
+#define ioread16 ioread16
+static inline u16 ioread16(const volatile void __iomem *addr)
+{
+ return readw(addr);
+}
+#endif
+
+#ifndef ioread32
+#define ioread32 ioread32
+static inline u32 ioread32(const volatile void __iomem *addr)
+{
+ return readl(addr);
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef ioread64
+#define ioread64 ioread64
+static inline u64 ioread64(const volatile void __iomem *addr)
+{
+ return readq(addr);
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef iowrite8
+#define iowrite8 iowrite8
+static inline void iowrite8(u8 value, volatile void __iomem *addr)
+{
+ writeb(value, addr);
+}
+#endif
+
+#ifndef iowrite16
+#define iowrite16 iowrite16
+static inline void iowrite16(u16 value, volatile void __iomem *addr)
+{
+ writew(value, addr);
+}
+#endif
+
+#ifndef iowrite32
+#define iowrite32 iowrite32
+static inline void iowrite32(u32 value, volatile void __iomem *addr)
+{
+ writel(value, addr);
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef iowrite64
+#define iowrite64 iowrite64
+static inline void iowrite64(u64 value, volatile void __iomem *addr)
+{
+ writeq(value, addr);
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef ioread16be
+#define ioread16be ioread16be
+static inline u16 ioread16be(const volatile void __iomem *addr)
+{
+ return swab16(readw(addr));
+}
+#endif
+
+#ifndef ioread32be
+#define ioread32be ioread32be
+static inline u32 ioread32be(const volatile void __iomem *addr)
+{
+ return swab32(readl(addr));
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef ioread64be
+#define ioread64be ioread64be
+static inline u64 ioread64be(const volatile void __iomem *addr)
+{
+ return swab64(readq(addr));
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef iowrite16be
+#define iowrite16be iowrite16be
+static inline void iowrite16be(u16 value, void volatile __iomem *addr)
+{
+ writew(swab16(value), addr);
+}
+#endif
+
+#ifndef iowrite32be
+#define iowrite32be iowrite32be
+static inline void iowrite32be(u32 value, volatile void __iomem *addr)
+{
+ writel(swab32(value), addr);
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef iowrite64be
+#define iowrite64be iowrite64be
+static inline void iowrite64be(u64 value, volatile void __iomem *addr)
+{
+ writeq(swab64(value), addr);
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef ioread8_rep
+#define ioread8_rep ioread8_rep
+static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ readsb(addr, buffer, count);
+}
+#endif
+
+#ifndef ioread16_rep
+#define ioread16_rep ioread16_rep
+static inline void ioread16_rep(const volatile void __iomem *addr,
+ void *buffer, unsigned int count)
+{
+ readsw(addr, buffer, count);
+}
+#endif
+
+#ifndef ioread32_rep
+#define ioread32_rep ioread32_rep
+static inline void ioread32_rep(const volatile void __iomem *addr,
+ void *buffer, unsigned int count)
+{
+ readsl(addr, buffer, count);
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef ioread64_rep
+#define ioread64_rep ioread64_rep
+static inline void ioread64_rep(const volatile void __iomem *addr,
+ void *buffer, unsigned int count)
+{
+ readsq(addr, buffer, count);
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef iowrite8_rep
+#define iowrite8_rep iowrite8_rep
+static inline void iowrite8_rep(volatile void __iomem *addr,
+ const void *buffer,
+ unsigned int count)
+{
+ writesb(addr, buffer, count);
+}
+#endif
+
+#ifndef iowrite16_rep
+#define iowrite16_rep iowrite16_rep
+static inline void iowrite16_rep(volatile void __iomem *addr,
+ const void *buffer,
+ unsigned int count)
+{
+ writesw(addr, buffer, count);
+}
+#endif
+
+#ifndef iowrite32_rep
+#define iowrite32_rep iowrite32_rep
+static inline void iowrite32_rep(volatile void __iomem *addr,
+ const void *buffer,
+ unsigned int count)
+{
+ writesl(addr, buffer, count);
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef iowrite64_rep
+#define iowrite64_rep iowrite64_rep
+static inline void iowrite64_rep(volatile void __iomem *addr,
+ const void *buffer,
+ unsigned int count)
+{
+ writesq(addr, buffer, count);
+}
+#endif
+#endif /* CONFIG_64BIT */
+#endif /* CONFIG_GENERIC_IOMAP */
+
+#ifdef __KERNEL__
+
+#include <linux/vmalloc.h>
+#define __io_virt(x) ((void __force *)(x))
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ * These are pretty trivial
+ */
+#ifndef virt_to_phys
+#define virt_to_phys virt_to_phys
+static inline unsigned long virt_to_phys(volatile void *address)
+{
+ return __pa((unsigned long)address);
+}
+#endif
+
+#ifndef phys_to_virt
+#define phys_to_virt phys_to_virt
+static inline void *phys_to_virt(unsigned long address)
+{
+ return __va(address);
+}
+#endif
+
+/**
+ * DOC: ioremap() and ioremap_*() variants
+ *
+ * Architectures with an MMU are expected to provide ioremap() and iounmap()
+ * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide
+ * a default nop-op implementation that expect that the physical address used
+ * for MMIO are already marked as uncached, and can be used as kernel virtual
+ * addresses.
+ *
+ * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes
+ * for specific drivers if the architecture choses to implement them. If they
+ * are not implemented we fall back to plain ioremap. Conversely, ioremap_np()
+ * can provide stricter non-posted write semantics if the architecture
+ * implements them.
+ */
+#ifndef CONFIG_MMU
+#ifndef ioremap
+#define ioremap ioremap
+static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
+{
+ return (void __iomem *)(unsigned long)offset;
+}
+#endif
+
+#ifndef iounmap
+#define iounmap iounmap
+static inline void iounmap(volatile void __iomem *addr)
+{
+}
+#endif
+#elif defined(CONFIG_GENERIC_IOREMAP)
+#include <linux/pgtable.h>
+
+/*
+ * Arch code can implement the following two hooks when using GENERIC_IOREMAP
+ * ioremap_allowed() return a bool,
+ * - true means continue to remap
+ * - false means skip remap and return directly
+ * iounmap_allowed() return a bool,
+ * - true means continue to vunmap
+ * - false means skip vunmap and return directly
+ */
+#ifndef ioremap_allowed
+#define ioremap_allowed ioremap_allowed
+static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size,
+ unsigned long prot)
+{
+ return true;
+}
+#endif
+
+#ifndef iounmap_allowed
+#define iounmap_allowed iounmap_allowed
+static inline bool iounmap_allowed(void *addr)
+{
+ return true;
+}
+#endif
+
+void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+ unsigned long prot);
+void iounmap(volatile void __iomem *addr);
+
+static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
+{
+ /* _PAGE_IOREMAP needs to be supplied by the architecture */
+ return ioremap_prot(addr, size, _PAGE_IOREMAP);
+}
+#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
+
+#ifndef ioremap_wc
+#define ioremap_wc ioremap
+#endif
+
+#ifndef ioremap_wt
+#define ioremap_wt ioremap
+#endif
+
+/*
+ * ioremap_uc is special in that we do require an explicit architecture
+ * implementation. In general you do not want to use this function in a
+ * driver and use plain ioremap, which is uncached by default. Similarly
+ * architectures should not implement it unless they have a very good
+ * reason.
+ */
+#ifndef ioremap_uc
+#define ioremap_uc ioremap_uc
+static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
+{
+ return NULL;
+}
+#endif
+
+/*
+ * ioremap_np needs an explicit architecture implementation, as it
+ * requests stronger semantics than regular ioremap(). Portable drivers
+ * should instead use one of the higher-level abstractions, like
+ * devm_ioremap_resource(), to choose the correct variant for any given
+ * device and bus. Portable drivers with a good reason to want non-posted
+ * write semantics should always provide an ioremap() fallback in case
+ * ioremap_np() is not available.
+ */
+#ifndef ioremap_np
+#define ioremap_np ioremap_np
+static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
+{
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_HAS_IOPORT_MAP
+#ifndef CONFIG_GENERIC_IOMAP
+#ifndef ioport_map
+#define ioport_map ioport_map
+static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ port &= IO_SPACE_LIMIT;
+ return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
+}
+#define ARCH_HAS_GENERIC_IOPORT_MAP
+#endif
+
+#ifndef ioport_unmap
+#define ioport_unmap ioport_unmap
+static inline void ioport_unmap(void __iomem *p)
+{
+}
+#endif
+#else /* CONFIG_GENERIC_IOMAP */
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+extern void ioport_unmap(void __iomem *p);
+#endif /* CONFIG_GENERIC_IOMAP */
+#endif /* CONFIG_HAS_IOPORT_MAP */
+
+#ifndef CONFIG_GENERIC_IOMAP
+#ifndef pci_iounmap
+#define ARCH_WANTS_GENERIC_PCI_IOUNMAP
+#endif
+#endif
+
+#ifndef xlate_dev_mem_ptr
+#define xlate_dev_mem_ptr xlate_dev_mem_ptr
+static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
+{
+ return __va(addr);
+}
+#endif
+
+#ifndef unxlate_dev_mem_ptr
+#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
+static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
+{
+}
+#endif
+
+#ifndef memset_io
+#define memset_io memset_io
+/**
+ * memset_io Set a range of I/O memory to a constant value
+ * @addr: The beginning of the I/O-memory range to set
+ * @val: The value to set the memory to
+ * @count: The number of bytes to set
+ *
+ * Set a range of I/O memory to a given value.
+ */
+static inline void memset_io(volatile void __iomem *addr, int value,
+ size_t size)
+{
+ memset(__io_virt(addr), value, size);
+}
+#endif
+
+#ifndef memcpy_fromio
+#define memcpy_fromio memcpy_fromio
+/**
+ * memcpy_fromio Copy a block of data from I/O memory
+ * @dst: The (RAM) destination for the copy
+ * @src: The (I/O memory) source for the data
+ * @count: The number of bytes to copy
+ *
+ * Copy a block of data from I/O memory.
+ */
+static inline void memcpy_fromio(void *buffer,
+ const volatile void __iomem *addr,
+ size_t size)
+{
+ memcpy(buffer, __io_virt(addr), size);
+}
+#endif
+
+#ifndef memcpy_toio
+#define memcpy_toio memcpy_toio
+/**
+ * memcpy_toio Copy a block of data into I/O memory
+ * @dst: The (I/O memory) destination for the copy
+ * @src: The (RAM) source for the data
+ * @count: The number of bytes to copy
+ *
+ * Copy a block of data to I/O memory.
+ */
+static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
+ size_t size)
+{
+ memcpy(__io_virt(addr), buffer, size);
+}
+#endif
+
+extern int devmem_is_allowed(unsigned long pfn);
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_GENERIC_IO_H */
diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h
new file mode 100644
index 000000000..9fda9ed00
--- /dev/null
+++ b/include/asm-generic/ioctl.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_IOCTL_H
+#define _ASM_GENERIC_IOCTL_H
+
+#include <uapi/asm-generic/ioctl.h>
+
+#ifdef __CHECKER__
+#define _IOC_TYPECHECK(t) (sizeof(t))
+#else
+/* provoke compile error for invalid uses of size argument */
+extern unsigned int __invalid_size_argument_for_IOC;
+#define _IOC_TYPECHECK(t) \
+ ((sizeof(t) == sizeof(t[1]) && \
+ sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
+ sizeof(t) : __invalid_size_argument_for_IOC)
+#endif
+
+#endif /* _ASM_GENERIC_IOCTL_H */
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
new file mode 100644
index 000000000..08237ae8b
--- /dev/null
+++ b/include/asm-generic/iomap.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __GENERIC_IO_H
+#define __GENERIC_IO_H
+
+#include <linux/linkage.h>
+#include <asm/byteorder.h>
+
+/*
+ * These are the "generic" interfaces for doing new-style
+ * memory-mapped or PIO accesses. Architectures may do
+ * their own arch-optimized versions, these just act as
+ * wrappers around the old-style IO register access functions:
+ * read[bwl]/write[bwl]/in[bwl]/out[bwl]
+ *
+ * Don't include this directly, include it from <asm/io.h>.
+ */
+
+/*
+ * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
+ * access or a MMIO access, these functions don't care. The info is
+ * encoded in the hardware mapping set up by the mapping functions
+ * (or the cookie itself, depending on implementation and hw).
+ *
+ * The generic routines just encode the PIO/MMIO as part of the
+ * cookie, and coldly assume that the MMIO IO mappings are not
+ * in the low address range. Architectures for which this is not
+ * true can't use this generic implementation.
+ */
+extern unsigned int ioread8(const void __iomem *);
+extern unsigned int ioread16(const void __iomem *);
+extern unsigned int ioread16be(const void __iomem *);
+extern unsigned int ioread32(const void __iomem *);
+extern unsigned int ioread32be(const void __iomem *);
+#ifdef CONFIG_64BIT
+extern u64 ioread64(const void __iomem *);
+extern u64 ioread64be(const void __iomem *);
+#endif
+
+#ifdef readq
+#define ioread64_lo_hi ioread64_lo_hi
+#define ioread64_hi_lo ioread64_hi_lo
+#define ioread64be_lo_hi ioread64be_lo_hi
+#define ioread64be_hi_lo ioread64be_hi_lo
+extern u64 ioread64_lo_hi(const void __iomem *addr);
+extern u64 ioread64_hi_lo(const void __iomem *addr);
+extern u64 ioread64be_lo_hi(const void __iomem *addr);
+extern u64 ioread64be_hi_lo(const void __iomem *addr);
+#endif
+
+extern void iowrite8(u8, void __iomem *);
+extern void iowrite16(u16, void __iomem *);
+extern void iowrite16be(u16, void __iomem *);
+extern void iowrite32(u32, void __iomem *);
+extern void iowrite32be(u32, void __iomem *);
+#ifdef CONFIG_64BIT
+extern void iowrite64(u64, void __iomem *);
+extern void iowrite64be(u64, void __iomem *);
+#endif
+
+#ifdef writeq
+#define iowrite64_lo_hi iowrite64_lo_hi
+#define iowrite64_hi_lo iowrite64_hi_lo
+#define iowrite64be_lo_hi iowrite64be_lo_hi
+#define iowrite64be_hi_lo iowrite64be_hi_lo
+extern void iowrite64_lo_hi(u64 val, void __iomem *addr);
+extern void iowrite64_hi_lo(u64 val, void __iomem *addr);
+extern void iowrite64be_lo_hi(u64 val, void __iomem *addr);
+extern void iowrite64be_hi_lo(u64 val, void __iomem *addr);
+#endif
+
+/*
+ * "string" versions of the above. Note that they
+ * use native byte ordering for the accesses (on
+ * the assumption that IO and memory agree on a
+ * byte order, and CPU byteorder is irrelevant).
+ *
+ * They do _not_ update the port address. If you
+ * want MMIO that copies stuff laid out in MMIO
+ * memory across multiple ports, use "memcpy_toio()"
+ * and friends.
+ */
+extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count);
+
+extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
+
+#ifdef CONFIG_HAS_IOPORT_MAP
+/* Create a virtual mapping cookie for an IO port range */
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+extern void ioport_unmap(void __iomem *);
+#endif
+
+#ifndef ARCH_HAS_IOREMAP_WC
+#define ioremap_wc ioremap
+#endif
+
+#ifndef ARCH_HAS_IOREMAP_WT
+#define ioremap_wt ioremap
+#endif
+
+#ifndef ARCH_HAS_IOREMAP_NP
+/* See the comment in asm-generic/io.h about ioremap_np(). */
+#define ioremap_np ioremap_np
+static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
+{
+ return NULL;
+}
+#endif
+
+#include <asm-generic/pci_iomap.h>
+
+#endif
diff --git a/include/asm-generic/irq.h b/include/asm-generic/irq.h
new file mode 100644
index 000000000..da21de991
--- /dev/null
+++ b/include/asm-generic/irq.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_IRQ_H
+#define __ASM_GENERIC_IRQ_H
+
+/*
+ * NR_IRQS is the upper bound of how many interrupts can be handled
+ * in the platform. It is used to size the static irq_map array,
+ * so don't make it too big.
+ */
+#ifndef NR_IRQS
+#define NR_IRQS 64
+#endif
+
+static inline int irq_canonicalize(int irq)
+{
+ return irq;
+}
+
+#endif /* __ASM_GENERIC_IRQ_H */
diff --git a/include/asm-generic/irq_regs.h b/include/asm-generic/irq_regs.h
new file mode 100644
index 000000000..2e7c6e89d
--- /dev/null
+++ b/include/asm-generic/irq_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Fallback per-CPU frame pointer holder
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _ASM_GENERIC_IRQ_REGS_H
+#define _ASM_GENERIC_IRQ_REGS_H
+
+#include <linux/percpu.h>
+
+/*
+ * Per-cpu current frame pointer - the location of the last exception frame on
+ * the stack
+ */
+DECLARE_PER_CPU(struct pt_regs *, __irq_regs);
+
+static inline struct pt_regs *get_irq_regs(void)
+{
+ return __this_cpu_read(__irq_regs);
+}
+
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+ struct pt_regs *old_regs;
+
+ old_regs = __this_cpu_read(__irq_regs);
+ __this_cpu_write(__irq_regs, new_regs);
+ return old_regs;
+}
+
+#endif /* _ASM_GENERIC_IRQ_REGS_H */
diff --git a/include/asm-generic/irq_work.h b/include/asm-generic/irq_work.h
new file mode 100644
index 000000000..d5dce06f7
--- /dev/null
+++ b/include/asm-generic/irq_work.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_IRQ_WORK_H
+#define __ASM_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return false;
+}
+
+#endif /* __ASM_IRQ_WORK_H */
+
diff --git a/include/asm-generic/irqflags.h b/include/asm-generic/irqflags.h
new file mode 100644
index 000000000..19ccbf483
--- /dev/null
+++ b/include/asm-generic/irqflags.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_IRQFLAGS_H
+#define __ASM_GENERIC_IRQFLAGS_H
+
+/*
+ * All architectures should implement at least the first two functions,
+ * usually inline assembly will be the best way.
+ */
+#ifndef ARCH_IRQ_DISABLED
+#define ARCH_IRQ_DISABLED 0
+#define ARCH_IRQ_ENABLED 1
+#endif
+
+/* read interrupt enabled status */
+#ifndef arch_local_save_flags
+unsigned long arch_local_save_flags(void);
+#endif
+
+/* set interrupt enabled status */
+#ifndef arch_local_irq_restore
+void arch_local_irq_restore(unsigned long flags);
+#endif
+
+/* get status and disable interrupts */
+#ifndef arch_local_irq_save
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+ flags = arch_local_save_flags();
+ arch_local_irq_restore(ARCH_IRQ_DISABLED);
+ return flags;
+}
+#endif
+
+/* test flags */
+#ifndef arch_irqs_disabled_flags
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return flags == ARCH_IRQ_DISABLED;
+}
+#endif
+
+/* unconditionally enable interrupts */
+#ifndef arch_local_irq_enable
+static inline void arch_local_irq_enable(void)
+{
+ arch_local_irq_restore(ARCH_IRQ_ENABLED);
+}
+#endif
+
+/* unconditionally disable interrupts */
+#ifndef arch_local_irq_disable
+static inline void arch_local_irq_disable(void)
+{
+ arch_local_irq_restore(ARCH_IRQ_DISABLED);
+}
+#endif
+
+/* test hardware interrupt enable bit */
+#ifndef arch_irqs_disabled
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+#endif
+
+#endif /* __ASM_GENERIC_IRQFLAGS_H */
diff --git a/include/asm-generic/kdebug.h b/include/asm-generic/kdebug.h
new file mode 100644
index 000000000..2b10b31b0
--- /dev/null
+++ b/include/asm-generic/kdebug.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_KDEBUG_H
+#define _ASM_GENERIC_KDEBUG_H
+
+enum die_val {
+ DIE_UNUSED,
+ DIE_OOPS = 1,
+};
+
+#endif /* _ASM_GENERIC_KDEBUG_H */
diff --git a/include/asm-generic/kmap_size.h b/include/asm-generic/kmap_size.h
new file mode 100644
index 000000000..6e36b2443
--- /dev/null
+++ b/include/asm-generic/kmap_size.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_KMAP_SIZE_H
+#define _ASM_GENERIC_KMAP_SIZE_H
+
+/* For debug this provides guard pages between the maps */
+#ifdef CONFIG_DEBUG_KMAP_LOCAL
+# define KM_MAX_IDX 33
+#else
+# define KM_MAX_IDX 16
+#endif
+
+#endif
diff --git a/include/asm-generic/kprobes.h b/include/asm-generic/kprobes.h
new file mode 100644
index 000000000..060eab094
--- /dev/null
+++ b/include/asm-generic/kprobes.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_KPROBES_H
+#define _ASM_GENERIC_KPROBES_H
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+#ifdef CONFIG_KPROBES
+/*
+ * Blacklist ganerating macro. Specify functions which is not probed
+ * by using this macro.
+ */
+# define __NOKPROBE_SYMBOL(fname) \
+static unsigned long __used \
+ __section("_kprobe_blacklist") \
+ _kbl_addr_##fname = (unsigned long)fname;
+# define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname)
+/* Use this to forbid a kprobes attach on very low level functions */
+# define __kprobes __section(".kprobes.text")
+# define nokprobe_inline __always_inline
+#else
+# define NOKPROBE_SYMBOL(fname)
+# define __kprobes
+# define nokprobe_inline inline
+#endif
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
+
+#endif /* _ASM_GENERIC_KPROBES_H */
diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h
new file mode 100644
index 000000000..728e5c570
--- /dev/null
+++ b/include/asm-generic/kvm_para.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_KVM_PARA_H
+#define _ASM_GENERIC_KVM_PARA_H
+
+#include <uapi/asm-generic/kvm_para.h>
+
+
+/*
+ * This function is used by architectures that support kvm to avoid issuing
+ * false soft lockup messages.
+ */
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+static inline unsigned int kvm_arch_para_hints(void)
+{
+ return 0;
+}
+
+static inline bool kvm_para_available(void)
+{
+ return false;
+}
+
+#endif
diff --git a/include/asm-generic/kvm_types.h b/include/asm-generic/kvm_types.h
new file mode 100644
index 000000000..2a82daf11
--- /dev/null
+++ b/include/asm-generic/kvm_types.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_KVM_TYPES_H
+#define _ASM_GENERIC_KVM_TYPES_H
+
+#endif
diff --git a/include/asm-generic/linkage.h b/include/asm-generic/linkage.h
new file mode 100644
index 000000000..fef7a01e5
--- /dev/null
+++ b/include/asm-generic/linkage.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_GENERIC_LINKAGE_H
+#define __ASM_GENERIC_LINKAGE_H
+/*
+ * linux/linkage.h provides reasonable defaults.
+ * an architecture can override them by providing its own version.
+ */
+
+#endif /* __ASM_GENERIC_LINKAGE_H */
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
new file mode 100644
index 000000000..fca7f1d84
--- /dev/null
+++ b/include/asm-generic/local.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_LOCAL_H
+#define _ASM_GENERIC_LOCAL_H
+
+#include <linux/percpu.h>
+#include <linux/atomic.h>
+#include <asm/types.h>
+
+/*
+ * A signed long type for operations which are atomic for a single CPU.
+ * Usually used in combination with per-cpu variables.
+ *
+ * This is the default implementation, which uses atomic_long_t. Which is
+ * rather pointless. The whole point behind local_t is that some processors
+ * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
+ * running on this CPU. local_t allows exploitation of such capabilities.
+ */
+
+/* Implement in terms of atomics. */
+
+/* Don't use typedef: don't want them to be mixed with atomic_t's. */
+typedef struct
+{
+ atomic_long_t a;
+} local_t;
+
+#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+#define local_read(l) atomic_long_read(&(l)->a)
+#define local_set(l,i) atomic_long_set((&(l)->a),(i))
+#define local_inc(l) atomic_long_inc(&(l)->a)
+#define local_dec(l) atomic_long_dec(&(l)->a)
+#define local_add(i,l) atomic_long_add((i),(&(l)->a))
+#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
+
+#define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
+#define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
+#define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
+#define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
+#define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
+#define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
+#define local_inc_return(l) atomic_long_inc_return(&(l)->a)
+
+#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
+#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
+#define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
+#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
+
+/* Non-atomic variants, ie. preemption disabled and won't be touched
+ * in interrupt, etc. Some archs can optimize this case well. */
+#define __local_inc(l) local_set((l), local_read(l) + 1)
+#define __local_dec(l) local_set((l), local_read(l) - 1)
+#define __local_add(i,l) local_set((l), local_read(l) + (i))
+#define __local_sub(i,l) local_set((l), local_read(l) - (i))
+
+#endif /* _ASM_GENERIC_LOCAL_H */
diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h
new file mode 100644
index 000000000..765be0b7d
--- /dev/null
+++ b/include/asm-generic/local64.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_LOCAL64_H
+#define _ASM_GENERIC_LOCAL64_H
+
+#include <linux/percpu.h>
+#include <asm/types.h>
+
+/*
+ * A signed long type for operations which are atomic for a single CPU.
+ * Usually used in combination with per-cpu variables.
+ *
+ * This is the default implementation, which uses atomic64_t. Which is
+ * rather pointless. The whole point behind local64_t is that some processors
+ * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
+ * running on this CPU. local64_t allows exploitation of such capabilities.
+ */
+
+/* Implement in terms of atomics. */
+
+#if BITS_PER_LONG == 64
+
+#include <asm/local.h>
+
+typedef struct {
+ local_t a;
+} local64_t;
+
+#define LOCAL64_INIT(i) { LOCAL_INIT(i) }
+
+#define local64_read(l) local_read(&(l)->a)
+#define local64_set(l,i) local_set((&(l)->a),(i))
+#define local64_inc(l) local_inc(&(l)->a)
+#define local64_dec(l) local_dec(&(l)->a)
+#define local64_add(i,l) local_add((i),(&(l)->a))
+#define local64_sub(i,l) local_sub((i),(&(l)->a))
+
+#define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a))
+#define local64_dec_and_test(l) local_dec_and_test(&(l)->a)
+#define local64_inc_and_test(l) local_inc_and_test(&(l)->a)
+#define local64_add_negative(i, l) local_add_negative((i), (&(l)->a))
+#define local64_add_return(i, l) local_add_return((i), (&(l)->a))
+#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a))
+#define local64_inc_return(l) local_inc_return(&(l)->a)
+
+#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n))
+#define local64_xchg(l, n) local_xchg((&(l)->a), (n))
+#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u))
+#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a)
+
+/* Non-atomic variants, ie. preemption disabled and won't be touched
+ * in interrupt, etc. Some archs can optimize this case well. */
+#define __local64_inc(l) local64_set((l), local64_read(l) + 1)
+#define __local64_dec(l) local64_set((l), local64_read(l) - 1)
+#define __local64_add(i,l) local64_set((l), local64_read(l) + (i))
+#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i))
+
+#else /* BITS_PER_LONG != 64 */
+
+#include <linux/atomic.h>
+
+/* Don't use typedef: don't want them to be mixed with atomic_t's. */
+typedef struct {
+ atomic64_t a;
+} local64_t;
+
+#define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+#define local64_read(l) atomic64_read(&(l)->a)
+#define local64_set(l,i) atomic64_set((&(l)->a),(i))
+#define local64_inc(l) atomic64_inc(&(l)->a)
+#define local64_dec(l) atomic64_dec(&(l)->a)
+#define local64_add(i,l) atomic64_add((i),(&(l)->a))
+#define local64_sub(i,l) atomic64_sub((i),(&(l)->a))
+
+#define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a))
+#define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a)
+#define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a)
+#define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a))
+#define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a))
+#define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a))
+#define local64_inc_return(l) atomic64_inc_return(&(l)->a)
+
+#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n))
+#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n))
+#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u))
+#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a)
+
+/* Non-atomic variants, ie. preemption disabled and won't be touched
+ * in interrupt, etc. Some archs can optimize this case well. */
+#define __local64_inc(l) local64_set((l), local64_read(l) + 1)
+#define __local64_dec(l) local64_set((l), local64_read(l) - 1)
+#define __local64_add(i,l) local64_set((l), local64_read(l) + (i))
+#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i))
+
+#endif /* BITS_PER_LONG != 64 */
+
+#endif /* _ASM_GENERIC_LOCAL64_H */
diff --git a/include/asm-generic/logic_io.h b/include/asm-generic/logic_io.h
new file mode 100644
index 000000000..8a59b6e56
--- /dev/null
+++ b/include/asm-generic/logic_io.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: johannes@sipsolutions.net
+ */
+#ifndef _LOGIC_IO_H
+#define _LOGIC_IO_H
+#include <linux/types.h>
+
+/* include this file into asm/io.h */
+
+#ifdef CONFIG_INDIRECT_IOMEM
+
+#ifdef CONFIG_INDIRECT_IOMEM_FALLBACK
+/*
+ * If you want emulated IO memory to fall back to 'normal' IO memory
+ * if a region wasn't registered as emulated, then you need to have
+ * all of the real_* functions implemented.
+ */
+#if !defined(real_ioremap) || !defined(real_iounmap) || \
+ !defined(real_raw_readb) || !defined(real_raw_writeb) || \
+ !defined(real_raw_readw) || !defined(real_raw_writew) || \
+ !defined(real_raw_readl) || !defined(real_raw_writel) || \
+ (defined(CONFIG_64BIT) && \
+ (!defined(real_raw_readq) || !defined(real_raw_writeq))) || \
+ !defined(real_memset_io) || \
+ !defined(real_memcpy_fromio) || \
+ !defined(real_memcpy_toio)
+#error "Must provide fallbacks for real IO memory access"
+#endif /* defined ... */
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
+
+#define ioremap ioremap
+void __iomem *ioremap(phys_addr_t offset, size_t size);
+
+#define iounmap iounmap
+void iounmap(void volatile __iomem *addr);
+
+#define __raw_readb __raw_readb
+u8 __raw_readb(const volatile void __iomem *addr);
+
+#define __raw_readw __raw_readw
+u16 __raw_readw(const volatile void __iomem *addr);
+
+#define __raw_readl __raw_readl
+u32 __raw_readl(const volatile void __iomem *addr);
+
+#ifdef CONFIG_64BIT
+#define __raw_readq __raw_readq
+u64 __raw_readq(const volatile void __iomem *addr);
+#endif /* CONFIG_64BIT */
+
+#define __raw_writeb __raw_writeb
+void __raw_writeb(u8 value, volatile void __iomem *addr);
+
+#define __raw_writew __raw_writew
+void __raw_writew(u16 value, volatile void __iomem *addr);
+
+#define __raw_writel __raw_writel
+void __raw_writel(u32 value, volatile void __iomem *addr);
+
+#ifdef CONFIG_64BIT
+#define __raw_writeq __raw_writeq
+void __raw_writeq(u64 value, volatile void __iomem *addr);
+#endif /* CONFIG_64BIT */
+
+#define memset_io memset_io
+void memset_io(volatile void __iomem *addr, int value, size_t size);
+
+#define memcpy_fromio memcpy_fromio
+void memcpy_fromio(void *buffer, const volatile void __iomem *addr,
+ size_t size);
+
+#define memcpy_toio memcpy_toio
+void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size);
+
+#endif /* CONFIG_INDIRECT_IOMEM */
+#endif /* _LOGIC_IO_H */
diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h
new file mode 100644
index 000000000..10cd4ffc6
--- /dev/null
+++ b/include/asm-generic/mcs_spinlock.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_MCS_SPINLOCK_H
+#define __ASM_MCS_SPINLOCK_H
+
+/*
+ * Architectures can define their own:
+ *
+ * arch_mcs_spin_lock_contended(l)
+ * arch_mcs_spin_unlock_contended(l)
+ *
+ * See kernel/locking/mcs_spinlock.c.
+ */
+
+#endif /* __ASM_MCS_SPINLOCK_H */
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
new file mode 100644
index 000000000..a2c8ed602
--- /dev/null
+++ b/include/asm-generic/memory_model.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MEMORY_MODEL_H
+#define __ASM_MEMORY_MODEL_H
+
+#include <linux/pfn.h>
+
+#ifndef __ASSEMBLY__
+
+/*
+ * supports 3 memory models.
+ */
+#if defined(CONFIG_FLATMEM)
+
+#ifndef ARCH_PFN_OFFSET
+#define ARCH_PFN_OFFSET (0UL)
+#endif
+
+#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
+#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
+ ARCH_PFN_OFFSET)
+
+#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
+
+/* memmap is virtually contiguous. */
+#define __pfn_to_page(pfn) (vmemmap + (pfn))
+#define __page_to_pfn(page) (unsigned long)((page) - vmemmap)
+
+#elif defined(CONFIG_SPARSEMEM)
+/*
+ * Note: section's mem_map is encoded to reflect its start_pfn.
+ * section[i].section_mem_map == mem_map's address - start_pfn;
+ */
+#define __page_to_pfn(pg) \
+({ const struct page *__pg = (pg); \
+ int __sec = page_to_section(__pg); \
+ (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
+})
+
+#define __pfn_to_page(pfn) \
+({ unsigned long __pfn = (pfn); \
+ struct mem_section *__sec = __pfn_to_section(__pfn); \
+ __section_mem_map_addr(__sec) + __pfn; \
+})
+#endif /* CONFIG_FLATMEM/SPARSEMEM */
+
+/*
+ * Convert a physical address to a Page Frame Number and back
+ */
+#define __phys_to_pfn(paddr) PHYS_PFN(paddr)
+#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
+
+#define page_to_pfn __page_to_pfn
+#define pfn_to_page __pfn_to_page
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
new file mode 100644
index 000000000..4dbb177d1
--- /dev/null
+++ b/include/asm-generic/mm_hooks.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap
+ * and arch_unmap to be included in asm-FOO/mmu_context.h for any
+ * arch FOO which doesn't need to hook these.
+ */
+#ifndef _ASM_GENERIC_MM_HOOKS_H
+#define _ASM_GENERIC_MM_HOOKS_H
+
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void arch_exit_mmap(struct mm_struct *mm)
+{
+}
+
+static inline void arch_unmap(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
+static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
+ bool write, bool execute, bool foreign)
+{
+ /* by default, allow everything */
+ return true;
+}
+#endif /* _ASM_GENERIC_MM_HOOKS_H */
diff --git a/include/asm-generic/mmiowb.h b/include/asm-generic/mmiowb.h
new file mode 100644
index 000000000..5698fca3b
--- /dev/null
+++ b/include/asm-generic/mmiowb.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MMIOWB_H
+#define __ASM_GENERIC_MMIOWB_H
+
+/*
+ * Generic implementation of mmiowb() tracking for spinlocks.
+ *
+ * If your architecture doesn't ensure that writes to an I/O peripheral
+ * within two spinlocked sections on two different CPUs are seen by the
+ * peripheral in the order corresponding to the lock handover, then you
+ * need to follow these FIVE easy steps:
+ *
+ * 1. Implement mmiowb() (and arch_mmiowb_state() if you're fancy)
+ * in asm/mmiowb.h, then #include this file
+ * 2. Ensure your I/O write accessors call mmiowb_set_pending()
+ * 3. Select ARCH_HAS_MMIOWB
+ * 4. Untangle the resulting mess of header files
+ * 5. Complain to your architects
+ */
+#ifdef CONFIG_MMIOWB
+
+#include <linux/compiler.h>
+#include <asm-generic/mmiowb_types.h>
+
+#ifndef arch_mmiowb_state
+#include <asm/percpu.h>
+#include <asm/smp.h>
+
+DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
+#define __mmiowb_state() raw_cpu_ptr(&__mmiowb_state)
+#else
+#define __mmiowb_state() arch_mmiowb_state()
+#endif /* arch_mmiowb_state */
+
+static inline void mmiowb_set_pending(void)
+{
+ struct mmiowb_state *ms = __mmiowb_state();
+
+ if (likely(ms->nesting_count))
+ ms->mmiowb_pending = ms->nesting_count;
+}
+
+static inline void mmiowb_spin_lock(void)
+{
+ struct mmiowb_state *ms = __mmiowb_state();
+ ms->nesting_count++;
+}
+
+static inline void mmiowb_spin_unlock(void)
+{
+ struct mmiowb_state *ms = __mmiowb_state();
+
+ if (unlikely(ms->mmiowb_pending)) {
+ ms->mmiowb_pending = 0;
+ mmiowb();
+ }
+
+ ms->nesting_count--;
+}
+#else
+#define mmiowb_set_pending() do { } while (0)
+#define mmiowb_spin_lock() do { } while (0)
+#define mmiowb_spin_unlock() do { } while (0)
+#endif /* CONFIG_MMIOWB */
+#endif /* __ASM_GENERIC_MMIOWB_H */
diff --git a/include/asm-generic/mmiowb_types.h b/include/asm-generic/mmiowb_types.h
new file mode 100644
index 000000000..8eb009565
--- /dev/null
+++ b/include/asm-generic/mmiowb_types.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MMIOWB_TYPES_H
+#define __ASM_GENERIC_MMIOWB_TYPES_H
+
+#include <linux/types.h>
+
+struct mmiowb_state {
+ u16 nesting_count;
+ u16 mmiowb_pending;
+};
+
+#endif /* __ASM_GENERIC_MMIOWB_TYPES_H */
diff --git a/include/asm-generic/mmu.h b/include/asm-generic/mmu.h
new file mode 100644
index 000000000..061838037
--- /dev/null
+++ b/include/asm-generic/mmu.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MMU_H
+#define __ASM_GENERIC_MMU_H
+
+/*
+ * This is the mmu.h header for nommu implementations.
+ * Architectures with an MMU need something more complex.
+ */
+#ifndef __ASSEMBLY__
+typedef struct {
+ unsigned long end_brk;
+
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ unsigned long exec_fdpic_loadmap;
+ unsigned long interp_fdpic_loadmap;
+#endif
+} mm_context_t;
+#endif
+
+#endif /* __ASM_GENERIC_MMU_H */
diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h
new file mode 100644
index 000000000..91727065b
--- /dev/null
+++ b/include/asm-generic/mmu_context.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MMU_CONTEXT_H
+#define __ASM_GENERIC_MMU_CONTEXT_H
+
+/*
+ * Generic hooks to implement no-op functionality.
+ */
+
+struct task_struct;
+struct mm_struct;
+
+/*
+ * enter_lazy_tlb - Called when "tsk" is about to enter lazy TLB mode.
+ *
+ * @mm: the currently active mm context which is becoming lazy
+ * @tsk: task which is entering lazy tlb
+ *
+ * tsk->mm will be NULL
+ */
+#ifndef enter_lazy_tlb
+static inline void enter_lazy_tlb(struct mm_struct *mm,
+ struct task_struct *tsk)
+{
+}
+#endif
+
+/**
+ * init_new_context - Initialize context of a new mm_struct.
+ * @tsk: task struct for the mm
+ * @mm: the new mm struct
+ * @return: 0 on success, -errno on failure
+ */
+#ifndef init_new_context
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ return 0;
+}
+#endif
+
+/**
+ * destroy_context - Undo init_new_context when the mm is going away
+ * @mm: old mm struct
+ */
+#ifndef destroy_context
+static inline void destroy_context(struct mm_struct *mm)
+{
+}
+#endif
+
+/**
+ * activate_mm - called after exec switches the current task to a new mm, to switch to it
+ * @prev_mm: previous mm of this task
+ * @next_mm: new mm
+ */
+#ifndef activate_mm
+static inline void activate_mm(struct mm_struct *prev_mm,
+ struct mm_struct *next_mm)
+{
+ switch_mm(prev_mm, next_mm, current);
+}
+#endif
+
+/**
+ * dectivate_mm - called when an mm is released after exit or exec switches away from it
+ * @tsk: the task
+ * @mm: the old mm
+ */
+#ifndef deactivate_mm
+static inline void deactivate_mm(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+}
+#endif
+
+#endif /* __ASM_GENERIC_MMU_CONTEXT_H */
diff --git a/include/asm-generic/module.h b/include/asm-generic/module.h
new file mode 100644
index 000000000..98e1541b7
--- /dev/null
+++ b/include/asm-generic/module.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MODULE_H
+#define __ASM_GENERIC_MODULE_H
+
+/*
+ * Many architectures just need a simple module
+ * loader without arch specific data.
+ */
+#ifndef CONFIG_HAVE_MOD_ARCH_SPECIFIC
+struct mod_arch_specific
+{
+};
+#endif
+
+#ifdef CONFIG_64BIT
+#define Elf_Shdr Elf64_Shdr
+#define Elf_Phdr Elf64_Phdr
+#define Elf_Sym Elf64_Sym
+#define Elf_Dyn Elf64_Dyn
+#define Elf_Ehdr Elf64_Ehdr
+#define Elf_Addr Elf64_Addr
+#ifdef CONFIG_MODULES_USE_ELF_REL
+#define Elf_Rel Elf64_Rel
+#endif
+#ifdef CONFIG_MODULES_USE_ELF_RELA
+#define Elf_Rela Elf64_Rela
+#endif
+#define ELF_R_TYPE(X) ELF64_R_TYPE(X)
+#define ELF_R_SYM(X) ELF64_R_SYM(X)
+
+#else /* CONFIG_64BIT */
+
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Phdr Elf32_Phdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Dyn Elf32_Dyn
+#define Elf_Ehdr Elf32_Ehdr
+#define Elf_Addr Elf32_Addr
+#ifdef CONFIG_MODULES_USE_ELF_REL
+#define Elf_Rel Elf32_Rel
+#endif
+#ifdef CONFIG_MODULES_USE_ELF_RELA
+#define Elf_Rela Elf32_Rela
+#endif
+#define ELF_R_TYPE(X) ELF32_R_TYPE(X)
+#define ELF_R_SYM(X) ELF32_R_SYM(X)
+#endif
+
+#endif /* __ASM_GENERIC_MODULE_H */
diff --git a/include/asm-generic/module.lds.h b/include/asm-generic/module.lds.h
new file mode 100644
index 000000000..f210d5c1b
--- /dev/null
+++ b/include/asm-generic/module.lds.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_GENERIC_MODULE_LDS_H
+#define __ASM_GENERIC_MODULE_LDS_H
+
+/*
+ * <asm/module.lds.h> can specify arch-specific sections for linking modules.
+ * Empty for the asm-generic header.
+ */
+
+#endif /* __ASM_GENERIC_MODULE_LDS_H */
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
new file mode 100644
index 000000000..bfb9eb9d7
--- /dev/null
+++ b/include/asm-generic/mshyperv.h
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Linux-specific definitions for managing interactions with Microsoft's
+ * Hyper-V hypervisor. The definitions in this file are architecture
+ * independent. See arch/<arch>/include/asm/mshyperv.h for definitions
+ * that are specific to architecture <arch>.
+ *
+ * Definitions that are specified in the Hyper-V Top Level Functional
+ * Spec (TLFS) should not go in this file, but should instead go in
+ * hyperv-tlfs.h.
+ *
+ * Copyright (C) 2019, Microsoft, Inc.
+ *
+ * Author : Michael Kelley <mikelley@microsoft.com>
+ */
+
+#ifndef _ASM_GENERIC_MSHYPERV_H
+#define _ASM_GENERIC_MSHYPERV_H
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/cpumask.h>
+#include <linux/nmi.h>
+#include <asm/ptrace.h>
+#include <asm/hyperv-tlfs.h>
+
+struct ms_hyperv_info {
+ u32 features;
+ u32 priv_high;
+ u32 misc_features;
+ u32 hints;
+ u32 nested_features;
+ u32 max_vp_index;
+ u32 max_lp_index;
+ u32 isolation_config_a;
+ union {
+ u32 isolation_config_b;
+ struct {
+ u32 cvm_type : 4;
+ u32 reserved1 : 1;
+ u32 shared_gpa_boundary_active : 1;
+ u32 shared_gpa_boundary_bits : 6;
+ u32 reserved2 : 20;
+ };
+ };
+ u64 shared_gpa_boundary;
+};
+extern struct ms_hyperv_info ms_hyperv;
+
+extern void * __percpu *hyperv_pcpu_input_arg;
+extern void * __percpu *hyperv_pcpu_output_arg;
+
+extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
+extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
+extern bool hv_isolation_type_snp(void);
+
+/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
+static inline int hv_result(u64 status)
+{
+ return status & HV_HYPERCALL_RESULT_MASK;
+}
+
+static inline bool hv_result_success(u64 status)
+{
+ return hv_result(status) == HV_STATUS_SUCCESS;
+}
+
+static inline unsigned int hv_repcomp(u64 status)
+{
+ /* Bits [43:32] of status have 'Reps completed' data. */
+ return (status & HV_HYPERCALL_REP_COMP_MASK) >>
+ HV_HYPERCALL_REP_COMP_OFFSET;
+}
+
+/*
+ * Rep hypercalls. Callers of this functions are supposed to ensure that
+ * rep_count and varhead_size comply with Hyper-V hypercall definition.
+ */
+static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
+ void *input, void *output)
+{
+ u64 control = code;
+ u64 status;
+ u16 rep_comp;
+
+ control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
+ control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
+
+ do {
+ status = hv_do_hypercall(control, input, output);
+ if (!hv_result_success(status))
+ return status;
+
+ rep_comp = hv_repcomp(status);
+
+ control &= ~HV_HYPERCALL_REP_START_MASK;
+ control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
+
+ touch_nmi_watchdog();
+ } while (rep_comp < rep_count);
+
+ return status;
+}
+
+/* Generate the guest OS identifier as described in the Hyper-V TLFS */
+static inline u64 hv_generate_guest_id(u64 kernel_version)
+{
+ u64 guest_id;
+
+ guest_id = (((u64)HV_LINUX_VENDOR_ID) << 48);
+ guest_id |= (kernel_version << 16);
+
+ return guest_id;
+}
+
+/* Free the message slot and signal end-of-message if required */
+static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
+{
+ /*
+ * On crash we're reading some other CPU's message page and we need
+ * to be careful: this other CPU may already had cleared the header
+ * and the host may already had delivered some other message there.
+ * In case we blindly write msg->header.message_type we're going
+ * to lose it. We can still lose a message of the same type but
+ * we count on the fact that there can only be one
+ * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
+ * on crash.
+ */
+ if (cmpxchg(&msg->header.message_type, old_msg_type,
+ HVMSG_NONE) != old_msg_type)
+ return;
+
+ /*
+ * The cmxchg() above does an implicit memory barrier to
+ * ensure the write to MessageType (ie set to
+ * HVMSG_NONE) happens before we read the
+ * MessagePending and EOMing. Otherwise, the EOMing
+ * will not deliver any more messages since there is
+ * no empty slot
+ */
+ if (msg->header.message_flags.msg_pending) {
+ /*
+ * This will cause message queue rescan to
+ * possibly deliver another msg from the
+ * hypervisor
+ */
+ hv_set_register(HV_REGISTER_EOM, 0);
+ }
+}
+
+void hv_setup_vmbus_handler(void (*handler)(void));
+void hv_remove_vmbus_handler(void);
+void hv_setup_stimer0_handler(void (*handler)(void));
+void hv_remove_stimer0_handler(void);
+
+void hv_setup_kexec_handler(void (*handler)(void));
+void hv_remove_kexec_handler(void);
+void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
+void hv_remove_crash_handler(void);
+
+extern int vmbus_interrupt;
+extern int vmbus_irq;
+
+extern bool hv_root_partition;
+
+#if IS_ENABLED(CONFIG_HYPERV)
+/*
+ * Hypervisor's notion of virtual processor ID is different from
+ * Linux' notion of CPU ID. This information can only be retrieved
+ * in the context of the calling CPU. Setup a map for easy access
+ * to this information.
+ */
+extern u32 *hv_vp_index;
+extern u32 hv_max_vp_index;
+
+extern u64 (*hv_read_reference_counter)(void);
+
+/* Sentinel value for an uninitialized entry in hv_vp_index array */
+#define VP_INVAL U32_MAX
+
+int __init hv_common_init(void);
+void __init hv_common_free(void);
+int hv_common_cpu_init(unsigned int cpu);
+int hv_common_cpu_die(unsigned int cpu);
+
+void *hv_alloc_hyperv_page(void);
+void *hv_alloc_hyperv_zeroed_page(void);
+void hv_free_hyperv_page(unsigned long addr);
+
+/**
+ * hv_cpu_number_to_vp_number() - Map CPU to VP.
+ * @cpu_number: CPU number in Linux terms
+ *
+ * This function returns the mapping between the Linux processor
+ * number and the hypervisor's virtual processor number, useful
+ * in making hypercalls and such that talk about specific
+ * processors.
+ *
+ * Return: Virtual processor number in Hyper-V terms
+ */
+static inline int hv_cpu_number_to_vp_number(int cpu_number)
+{
+ return hv_vp_index[cpu_number];
+}
+
+static inline int __cpumask_to_vpset(struct hv_vpset *vpset,
+ const struct cpumask *cpus,
+ bool exclude_self)
+{
+ int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
+ int this_cpu = smp_processor_id();
+
+ /* valid_bank_mask can represent up to 64 banks */
+ if (hv_max_vp_index / 64 >= 64)
+ return 0;
+
+ /*
+ * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
+ * structs are not cleared between calls, we risk flushing unneeded
+ * vCPUs otherwise.
+ */
+ for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
+ vpset->bank_contents[vcpu_bank] = 0;
+
+ /*
+ * Some banks may end up being empty but this is acceptable.
+ */
+ for_each_cpu(cpu, cpus) {
+ if (exclude_self && cpu == this_cpu)
+ continue;
+ vcpu = hv_cpu_number_to_vp_number(cpu);
+ if (vcpu == VP_INVAL)
+ return -1;
+ vcpu_bank = vcpu / 64;
+ vcpu_offset = vcpu % 64;
+ __set_bit(vcpu_offset, (unsigned long *)
+ &vpset->bank_contents[vcpu_bank]);
+ if (vcpu_bank >= nr_bank)
+ nr_bank = vcpu_bank + 1;
+ }
+ vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
+ return nr_bank;
+}
+
+static inline int cpumask_to_vpset(struct hv_vpset *vpset,
+ const struct cpumask *cpus)
+{
+ return __cpumask_to_vpset(vpset, cpus, false);
+}
+
+static inline int cpumask_to_vpset_noself(struct hv_vpset *vpset,
+ const struct cpumask *cpus)
+{
+ WARN_ON_ONCE(preemptible());
+ return __cpumask_to_vpset(vpset, cpus, true);
+}
+
+void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
+bool hv_is_hyperv_initialized(void);
+bool hv_is_hibernation_supported(void);
+enum hv_isolation_type hv_get_isolation_type(void);
+bool hv_is_isolation_supported(void);
+bool hv_isolation_type_snp(void);
+u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
+void hyperv_cleanup(void);
+bool hv_query_ext_cap(u64 cap_query);
+void hv_setup_dma_ops(struct device *dev, bool coherent);
+void *hv_map_memory(void *addr, unsigned long size);
+void hv_unmap_memory(void *addr);
+#else /* CONFIG_HYPERV */
+static inline bool hv_is_hyperv_initialized(void) { return false; }
+static inline bool hv_is_hibernation_supported(void) { return false; }
+static inline void hyperv_cleanup(void) {}
+static inline bool hv_is_isolation_supported(void) { return false; }
+static inline enum hv_isolation_type hv_get_isolation_type(void)
+{
+ return HV_ISOLATION_TYPE_NONE;
+}
+#endif /* CONFIG_HYPERV */
+
+#endif
diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h
new file mode 100644
index 000000000..bf910d47e
--- /dev/null
+++ b/include/asm-generic/msi.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MSI_H
+#define __ASM_GENERIC_MSI_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+
+#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS
+# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2
+#endif
+
+struct msi_desc;
+
+/**
+ * struct msi_alloc_info - Default structure for MSI interrupt allocation.
+ * @desc: Pointer to msi descriptor
+ * @hwirq: Associated hw interrupt number in the domain
+ * @scratchpad: Storage for implementation specific scratch data
+ *
+ * Architectures can provide their own implementation by not including
+ * asm-generic/msi.h into their arch specific header file.
+ */
+typedef struct msi_alloc_info {
+ struct msi_desc *desc;
+ irq_hw_number_t hwirq;
+ unsigned long flags;
+ union {
+ unsigned long ul;
+ void *ptr;
+ } scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS];
+} msi_alloc_info_t;
+
+/* Device generating MSIs is proxying for another device */
+#define MSI_ALLOC_FLAGS_PROXY_DEVICE (1UL << 0)
+
+#define GENERIC_MSI_DOMAIN_OPS 1
+
+#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
+
+#endif
diff --git a/include/asm-generic/nommu_context.h b/include/asm-generic/nommu_context.h
new file mode 100644
index 000000000..4f916f9e1
--- /dev/null
+++ b/include/asm-generic/nommu_context.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_NOMMU_H
+#define __ASM_GENERIC_NOMMU_H
+
+/*
+ * Generic hooks for NOMMU architectures, which do not need to do
+ * anything special here.
+ */
+#include <asm-generic/mm_hooks.h>
+
+static inline void switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+}
+
+#include <asm-generic/mmu_context.h>
+
+#endif /* __ASM_GENERIC_NOMMU_H */
diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h
new file mode 100644
index 000000000..1a3ad6d29
--- /dev/null
+++ b/include/asm-generic/numa.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_NUMA_H
+#define __ASM_GENERIC_NUMA_H
+
+#ifdef CONFIG_NUMA
+
+#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2)
+
+int __node_distance(int from, int to);
+#define node_distance(a, b) __node_distance(a, b)
+
+extern nodemask_t numa_nodes_parsed __initdata;
+
+extern bool numa_off;
+
+/* Mappings between node number and cpus on that node. */
+extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+void numa_clear_node(unsigned int cpu);
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+const struct cpumask *cpumask_of_node(int node);
+#else
+/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
+static inline const struct cpumask *cpumask_of_node(int node)
+{
+ if (node == NUMA_NO_NODE)
+ return cpu_all_mask;
+
+ return node_to_cpumask_map[node];
+}
+#endif
+
+void __init arch_numa_init(void);
+int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+void __init numa_set_distance(int from, int to, int distance);
+void __init numa_free_distance(void);
+void __init early_map_cpu_to_node(unsigned int cpu, int nid);
+void numa_store_cpu_info(unsigned int cpu);
+void numa_add_cpu(unsigned int cpu);
+void numa_remove_cpu(unsigned int cpu);
+
+#else /* CONFIG_NUMA */
+
+static inline void numa_store_cpu_info(unsigned int cpu) { }
+static inline void numa_add_cpu(unsigned int cpu) { }
+static inline void numa_remove_cpu(unsigned int cpu) { }
+static inline void arch_numa_init(void) { }
+static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
+
+#endif /* CONFIG_NUMA */
+
+#endif /* __ASM_GENERIC_NUMA_H */
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
new file mode 100644
index 000000000..6fc475618
--- /dev/null
+++ b/include/asm-generic/page.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_PAGE_H
+#define __ASM_GENERIC_PAGE_H
+/*
+ * Generic page.h implementation, for NOMMU architectures.
+ * This provides the dummy definitions for the memory management.
+ */
+
+#ifdef CONFIG_MMU
+#error need to provide a real asm/page.h
+#endif
+
+
+/* PAGE_SHIFT determines the page size */
+
+#define PAGE_SHIFT 12
+#ifdef __ASSEMBLY__
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#else
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#endif
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#include <asm/setup.h>
+
+#ifndef __ASSEMBLY__
+
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
+#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct {
+ unsigned long pte;
+} pte_t;
+typedef struct {
+ unsigned long pmd[16];
+} pmd_t;
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((&x)->pmd[0])
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+extern unsigned long memory_start;
+extern unsigned long memory_end;
+
+#endif /* !__ASSEMBLY__ */
+
+#define PAGE_OFFSET (0)
+
+#ifndef ARCH_PFN_OFFSET
+#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
+#endif
+
+#ifndef __ASSEMBLY__
+
+#define __va(x) ((void *)((unsigned long) (x)))
+#define __pa(x) ((unsigned long) (x))
+
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+
+#define virt_to_page(addr) pfn_to_page(virt_to_pfn(addr))
+#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
+
+#ifndef page_to_phys
+#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+#endif
+
+#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
+#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
+ ((void *)(kaddr) < (void *)memory_end))
+
+#endif /* __ASSEMBLY__ */
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* __ASM_GENERIC_PAGE_H */
diff --git a/include/asm-generic/param.h b/include/asm-generic/param.h
new file mode 100644
index 000000000..8d3009dd2
--- /dev/null
+++ b/include/asm-generic/param.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_PARAM_H
+#define __ASM_GENERIC_PARAM_H
+
+#include <uapi/asm-generic/param.h>
+
+# undef HZ
+# define HZ CONFIG_HZ /* Internal kernel timer frequency */
+# define USER_HZ 100 /* some user interfaces are */
+# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
+#endif /* __ASM_GENERIC_PARAM_H */
diff --git a/include/asm-generic/parport.h b/include/asm-generic/parport.h
new file mode 100644
index 000000000..483991d61
--- /dev/null
+++ b/include/asm-generic/parport.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_PARPORT_H
+#define __ASM_GENERIC_PARPORT_H
+
+/*
+ * An ISA bus may have i8255 parallel ports at well-known
+ * locations in the I/O space, which are scanned by
+ * parport_pc_find_isa_ports.
+ *
+ * Without ISA support, the driver will only attach
+ * to devices on the PCI bus.
+ */
+
+static int parport_pc_find_isa_ports(int autoirq, int autodma);
+static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
+{
+#ifdef CONFIG_ISA
+ return parport_pc_find_isa_ports(autoirq, autodma);
+#else
+ return 0;
+#endif
+}
+
+#endif /* __ASM_GENERIC_PARPORT_H */
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
new file mode 100644
index 000000000..6869f1061
--- /dev/null
+++ b/include/asm-generic/pci.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_GENERIC_PCI_H
+#define __ASM_GENERIC_PCI_H
+
+#ifndef PCIBIOS_MIN_IO
+#define PCIBIOS_MIN_IO 0
+#endif
+
+#ifndef PCIBIOS_MIN_MEM
+#define PCIBIOS_MIN_MEM 0
+#endif
+
+#ifndef pcibios_assign_all_busses
+/* For bootloaders that do not initialize the PCI bus */
+#define pcibios_assign_all_busses() 1
+#endif
+
+/* Enable generic resource mapping code in drivers/pci/ */
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
+
+#ifdef CONFIG_PCI_DOMAINS
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ /* always show the domain in /proc */
+ return 1;
+}
+#endif /* CONFIG_PCI_DOMAINS */
+
+#endif /* __ASM_GENERIC_PCI_H */
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
new file mode 100644
index 000000000..8fbb0a555
--- /dev/null
+++ b/include/asm-generic/pci_iomap.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Generic I/O port emulation.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+#ifndef __ASM_GENERIC_PCI_IOMAP_H
+#define __ASM_GENERIC_PCI_IOMAP_H
+
+struct pci_dev;
+#ifdef CONFIG_PCI
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+extern void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max);
+extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
+ unsigned long offset,
+ unsigned long maxlen);
+extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
+ unsigned long offset,
+ unsigned long maxlen);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+/* Create a virtual mapping cookie for a port on a given PCI device.
+ * Do not call this directly, it exists to make it easier for architectures
+ * to override */
+#ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP
+extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port,
+ unsigned int nr);
+#elif !defined(CONFIG_HAS_IOPORT_MAP)
+#define __pci_ioport_map(dev, port, nr) NULL
+#else
+#define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr))
+#endif
+
+#elif defined(CONFIG_GENERIC_PCI_IOMAP)
+static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
+{
+ return NULL;
+}
+
+static inline void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max)
+{
+ return NULL;
+}
+static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ return NULL;
+}
+static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ return NULL;
+}
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{ }
+#endif
+
+#endif /* __ASM_GENERIC_PCI_IOMAP_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
new file mode 100644
index 000000000..6432a7fad
--- /dev/null
+++ b/include/asm-generic/percpu.h
@@ -0,0 +1,444 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_PERCPU_H_
+#define _ASM_GENERIC_PERCPU_H_
+
+#include <linux/compiler.h>
+#include <linux/threads.h>
+#include <linux/percpu-defs.h>
+
+#ifdef CONFIG_SMP
+
+/*
+ * per_cpu_offset() is the offset that has to be added to a
+ * percpu variable to get to the instance for a certain processor.
+ *
+ * Most arches use the __per_cpu_offset array for those offsets but
+ * some arches have their own ways of determining the offset (x86_64, s390).
+ */
+#ifndef __per_cpu_offset
+extern unsigned long __per_cpu_offset[NR_CPUS];
+
+#define per_cpu_offset(x) (__per_cpu_offset[x])
+#endif
+
+/*
+ * Determine the offset for the currently active processor.
+ * An arch may define __my_cpu_offset to provide a more effective
+ * means of obtaining the offset to the per cpu variables of the
+ * current processor.
+ */
+#ifndef __my_cpu_offset
+#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
+#endif
+#ifdef CONFIG_DEBUG_PREEMPT
+#define my_cpu_offset per_cpu_offset(smp_processor_id())
+#else
+#define my_cpu_offset __my_cpu_offset
+#endif
+
+/*
+ * Arch may define arch_raw_cpu_ptr() to provide more efficient address
+ * translations for raw_cpu_ptr().
+ */
+#ifndef arch_raw_cpu_ptr
+#define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
+#endif
+
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+extern void setup_per_cpu_areas(void);
+#endif
+
+#endif /* SMP */
+
+#ifndef PER_CPU_BASE_SECTION
+#ifdef CONFIG_SMP
+#define PER_CPU_BASE_SECTION ".data..percpu"
+#else
+#define PER_CPU_BASE_SECTION ".data"
+#endif
+#endif
+
+#ifndef PER_CPU_ATTRIBUTES
+#define PER_CPU_ATTRIBUTES
+#endif
+
+#define raw_cpu_generic_read(pcp) \
+({ \
+ *raw_cpu_ptr(&(pcp)); \
+})
+
+#define raw_cpu_generic_to_op(pcp, val, op) \
+do { \
+ *raw_cpu_ptr(&(pcp)) op val; \
+} while (0)
+
+#define raw_cpu_generic_add_return(pcp, val) \
+({ \
+ typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
+ \
+ *__p += val; \
+ *__p; \
+})
+
+#define raw_cpu_generic_xchg(pcp, nval) \
+({ \
+ typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
+ typeof(pcp) __ret; \
+ __ret = *__p; \
+ *__p = nval; \
+ __ret; \
+})
+
+#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
+({ \
+ typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
+ typeof(pcp) __ret; \
+ __ret = *__p; \
+ if (__ret == (oval)) \
+ *__p = nval; \
+ __ret; \
+})
+
+#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+({ \
+ typeof(pcp1) *__p1 = raw_cpu_ptr(&(pcp1)); \
+ typeof(pcp2) *__p2 = raw_cpu_ptr(&(pcp2)); \
+ int __ret = 0; \
+ if (*__p1 == (oval1) && *__p2 == (oval2)) { \
+ *__p1 = nval1; \
+ *__p2 = nval2; \
+ __ret = 1; \
+ } \
+ (__ret); \
+})
+
+#define __this_cpu_generic_read_nopreempt(pcp) \
+({ \
+ typeof(pcp) ___ret; \
+ preempt_disable_notrace(); \
+ ___ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
+ preempt_enable_notrace(); \
+ ___ret; \
+})
+
+#define __this_cpu_generic_read_noirq(pcp) \
+({ \
+ typeof(pcp) ___ret; \
+ unsigned long ___flags; \
+ raw_local_irq_save(___flags); \
+ ___ret = raw_cpu_generic_read(pcp); \
+ raw_local_irq_restore(___flags); \
+ ___ret; \
+})
+
+#define this_cpu_generic_read(pcp) \
+({ \
+ typeof(pcp) __ret; \
+ if (__native_word(pcp)) \
+ __ret = __this_cpu_generic_read_nopreempt(pcp); \
+ else \
+ __ret = __this_cpu_generic_read_noirq(pcp); \
+ __ret; \
+})
+
+#define this_cpu_generic_to_op(pcp, val, op) \
+do { \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ raw_cpu_generic_to_op(pcp, val, op); \
+ raw_local_irq_restore(__flags); \
+} while (0)
+
+
+#define this_cpu_generic_add_return(pcp, val) \
+({ \
+ typeof(pcp) __ret; \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ __ret = raw_cpu_generic_add_return(pcp, val); \
+ raw_local_irq_restore(__flags); \
+ __ret; \
+})
+
+#define this_cpu_generic_xchg(pcp, nval) \
+({ \
+ typeof(pcp) __ret; \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ __ret = raw_cpu_generic_xchg(pcp, nval); \
+ raw_local_irq_restore(__flags); \
+ __ret; \
+})
+
+#define this_cpu_generic_cmpxchg(pcp, oval, nval) \
+({ \
+ typeof(pcp) __ret; \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \
+ raw_local_irq_restore(__flags); \
+ __ret; \
+})
+
+#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+({ \
+ int __ret; \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ __ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \
+ oval1, oval2, nval1, nval2); \
+ raw_local_irq_restore(__flags); \
+ __ret; \
+})
+
+#ifndef raw_cpu_read_1
+#define raw_cpu_read_1(pcp) raw_cpu_generic_read(pcp)
+#endif
+#ifndef raw_cpu_read_2
+#define raw_cpu_read_2(pcp) raw_cpu_generic_read(pcp)
+#endif
+#ifndef raw_cpu_read_4
+#define raw_cpu_read_4(pcp) raw_cpu_generic_read(pcp)
+#endif
+#ifndef raw_cpu_read_8
+#define raw_cpu_read_8(pcp) raw_cpu_generic_read(pcp)
+#endif
+
+#ifndef raw_cpu_write_1
+#define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef raw_cpu_write_2
+#define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef raw_cpu_write_4
+#define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef raw_cpu_write_8
+#define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op(pcp, val, =)
+#endif
+
+#ifndef raw_cpu_add_1
+#define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef raw_cpu_add_2
+#define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef raw_cpu_add_4
+#define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef raw_cpu_add_8
+#define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op(pcp, val, +=)
+#endif
+
+#ifndef raw_cpu_and_1
+#define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef raw_cpu_and_2
+#define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef raw_cpu_and_4
+#define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef raw_cpu_and_8
+#define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op(pcp, val, &=)
+#endif
+
+#ifndef raw_cpu_or_1
+#define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef raw_cpu_or_2
+#define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef raw_cpu_or_4
+#define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef raw_cpu_or_8
+#define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op(pcp, val, |=)
+#endif
+
+#ifndef raw_cpu_add_return_1
+#define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef raw_cpu_add_return_2
+#define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef raw_cpu_add_return_4
+#define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef raw_cpu_add_return_8
+#define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val)
+#endif
+
+#ifndef raw_cpu_xchg_1
+#define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef raw_cpu_xchg_2
+#define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef raw_cpu_xchg_4
+#define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef raw_cpu_xchg_8
+#define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
+#endif
+
+#ifndef raw_cpu_cmpxchg_1
+#define raw_cpu_cmpxchg_1(pcp, oval, nval) \
+ raw_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef raw_cpu_cmpxchg_2
+#define raw_cpu_cmpxchg_2(pcp, oval, nval) \
+ raw_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef raw_cpu_cmpxchg_4
+#define raw_cpu_cmpxchg_4(pcp, oval, nval) \
+ raw_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef raw_cpu_cmpxchg_8
+#define raw_cpu_cmpxchg_8(pcp, oval, nval) \
+ raw_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+
+#ifndef raw_cpu_cmpxchg_double_1
+#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef raw_cpu_cmpxchg_double_2
+#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef raw_cpu_cmpxchg_double_4
+#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef raw_cpu_cmpxchg_double_8
+#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+
+#ifndef this_cpu_read_1
+#define this_cpu_read_1(pcp) this_cpu_generic_read(pcp)
+#endif
+#ifndef this_cpu_read_2
+#define this_cpu_read_2(pcp) this_cpu_generic_read(pcp)
+#endif
+#ifndef this_cpu_read_4
+#define this_cpu_read_4(pcp) this_cpu_generic_read(pcp)
+#endif
+#ifndef this_cpu_read_8
+#define this_cpu_read_8(pcp) this_cpu_generic_read(pcp)
+#endif
+
+#ifndef this_cpu_write_1
+#define this_cpu_write_1(pcp, val) this_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef this_cpu_write_2
+#define this_cpu_write_2(pcp, val) this_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef this_cpu_write_4
+#define this_cpu_write_4(pcp, val) this_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef this_cpu_write_8
+#define this_cpu_write_8(pcp, val) this_cpu_generic_to_op(pcp, val, =)
+#endif
+
+#ifndef this_cpu_add_1
+#define this_cpu_add_1(pcp, val) this_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef this_cpu_add_2
+#define this_cpu_add_2(pcp, val) this_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef this_cpu_add_4
+#define this_cpu_add_4(pcp, val) this_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef this_cpu_add_8
+#define this_cpu_add_8(pcp, val) this_cpu_generic_to_op(pcp, val, +=)
+#endif
+
+#ifndef this_cpu_and_1
+#define this_cpu_and_1(pcp, val) this_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef this_cpu_and_2
+#define this_cpu_and_2(pcp, val) this_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef this_cpu_and_4
+#define this_cpu_and_4(pcp, val) this_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef this_cpu_and_8
+#define this_cpu_and_8(pcp, val) this_cpu_generic_to_op(pcp, val, &=)
+#endif
+
+#ifndef this_cpu_or_1
+#define this_cpu_or_1(pcp, val) this_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef this_cpu_or_2
+#define this_cpu_or_2(pcp, val) this_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef this_cpu_or_4
+#define this_cpu_or_4(pcp, val) this_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef this_cpu_or_8
+#define this_cpu_or_8(pcp, val) this_cpu_generic_to_op(pcp, val, |=)
+#endif
+
+#ifndef this_cpu_add_return_1
+#define this_cpu_add_return_1(pcp, val) this_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef this_cpu_add_return_2
+#define this_cpu_add_return_2(pcp, val) this_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef this_cpu_add_return_4
+#define this_cpu_add_return_4(pcp, val) this_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef this_cpu_add_return_8
+#define this_cpu_add_return_8(pcp, val) this_cpu_generic_add_return(pcp, val)
+#endif
+
+#ifndef this_cpu_xchg_1
+#define this_cpu_xchg_1(pcp, nval) this_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef this_cpu_xchg_2
+#define this_cpu_xchg_2(pcp, nval) this_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef this_cpu_xchg_4
+#define this_cpu_xchg_4(pcp, nval) this_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef this_cpu_xchg_8
+#define this_cpu_xchg_8(pcp, nval) this_cpu_generic_xchg(pcp, nval)
+#endif
+
+#ifndef this_cpu_cmpxchg_1
+#define this_cpu_cmpxchg_1(pcp, oval, nval) \
+ this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef this_cpu_cmpxchg_2
+#define this_cpu_cmpxchg_2(pcp, oval, nval) \
+ this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef this_cpu_cmpxchg_4
+#define this_cpu_cmpxchg_4(pcp, oval, nval) \
+ this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef this_cpu_cmpxchg_8
+#define this_cpu_cmpxchg_8(pcp, oval, nval) \
+ this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+
+#ifndef this_cpu_cmpxchg_double_1
+#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef this_cpu_cmpxchg_double_2
+#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef this_cpu_cmpxchg_double_4
+#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef this_cpu_cmpxchg_double_8
+#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+
+#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
new file mode 100644
index 000000000..977bea16c
--- /dev/null
+++ b/include/asm-generic/pgalloc.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_PGALLOC_H
+#define __ASM_GENERIC_PGALLOC_H
+
+#ifdef CONFIG_MMU
+
+#define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO)
+#define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
+
+/**
+ * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
+ * @mm: the mm_struct of the current context
+ *
+ * This function is intended for architectures that need
+ * anything beyond simple page allocation.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
+{
+ return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL);
+}
+
+#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+/**
+ * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
+ * @mm: the mm_struct of the current context
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+{
+ return __pte_alloc_one_kernel(mm);
+}
+#endif
+
+/**
+ * pte_free_kernel - free PTE-level kernel page table page
+ * @mm: the mm_struct of the current context
+ * @pte: pointer to the memory containing the page table
+ */
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+/**
+ * __pte_alloc_one - allocate a page for PTE-level user page table
+ * @mm: the mm_struct of the current context
+ * @gfp: GFP flags to use for the allocation
+ *
+ * Allocates a page and runs the pgtable_pte_page_ctor().
+ *
+ * This function is intended for architectures that need
+ * anything beyond simple page allocation or must have custom GFP flags.
+ *
+ * Return: `struct page` initialized as page table or %NULL on error
+ */
+static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
+{
+ struct page *pte;
+
+ pte = alloc_page(gfp);
+ if (!pte)
+ return NULL;
+ if (!pgtable_pte_page_ctor(pte)) {
+ __free_page(pte);
+ return NULL;
+ }
+
+ return pte;
+}
+
+#ifndef __HAVE_ARCH_PTE_ALLOC_ONE
+/**
+ * pte_alloc_one - allocate a page for PTE-level user page table
+ * @mm: the mm_struct of the current context
+ *
+ * Allocates a page and runs the pgtable_pte_page_ctor().
+ *
+ * Return: `struct page` initialized as page table or %NULL on error
+ */
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
+{
+ return __pte_alloc_one(mm, GFP_PGTABLE_USER);
+}
+#endif
+
+/*
+ * Should really implement gc for free page table pages. This could be
+ * done with a reference count in struct page.
+ */
+
+/**
+ * pte_free - free PTE-level user page table page
+ * @mm: the mm_struct of the current context
+ * @pte_page: the `struct page` representing the page table
+ */
+static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
+{
+ pgtable_pte_page_dtor(pte_page);
+ __free_page(pte_page);
+}
+
+
+#if CONFIG_PGTABLE_LEVELS > 2
+
+#ifndef __HAVE_ARCH_PMD_ALLOC_ONE
+/**
+ * pmd_alloc_one - allocate a page for PMD-level page table
+ * @mm: the mm_struct of the current context
+ *
+ * Allocates a page and runs the pgtable_pmd_page_ctor().
+ * Allocations use %GFP_PGTABLE_USER in user context and
+ * %GFP_PGTABLE_KERNEL in kernel context.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ struct page *page;
+ gfp_t gfp = GFP_PGTABLE_USER;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ page = alloc_pages(gfp, 0);
+ if (!page)
+ return NULL;
+ if (!pgtable_pmd_page_ctor(page)) {
+ __free_pages(page, 0);
+ return NULL;
+ }
+ return (pmd_t *)page_address(page);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMD_FREE
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
+ pgtable_pmd_page_dtor(virt_to_page(pmd));
+ free_page((unsigned long)pmd);
+}
+#endif
+
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
+
+#if CONFIG_PGTABLE_LEVELS > 3
+
+static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ gfp_t gfp = GFP_PGTABLE_USER;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ return (pud_t *)get_zeroed_page(gfp);
+}
+
+#ifndef __HAVE_ARCH_PUD_ALLOC_ONE
+/**
+ * pud_alloc_one - allocate a page for PUD-level page table
+ * @mm: the mm_struct of the current context
+ *
+ * Allocates a page using %GFP_PGTABLE_USER for user context and
+ * %GFP_PGTABLE_KERNEL for kernel context.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return __pud_alloc_one(mm, addr);
+}
+#endif
+
+static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
+ free_page((unsigned long)pud);
+}
+
+#ifndef __HAVE_ARCH_PUD_FREE
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ __pud_free(mm, pud);
+}
+#endif
+
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+
+#ifndef __HAVE_ARCH_PGD_FREE
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+#endif
+
+#endif /* CONFIG_MMU */
+
+#endif /* __ASM_GENERIC_PGALLOC_H */
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
new file mode 100644
index 000000000..03b7dae47
--- /dev/null
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PGTABLE_NOP4D_H
+#define _PGTABLE_NOP4D_H
+
+#ifndef __ASSEMBLY__
+
+#define __PAGETABLE_P4D_FOLDED 1
+
+typedef struct { pgd_t pgd; } p4d_t;
+
+#define P4D_SHIFT PGDIR_SHIFT
+#define PTRS_PER_P4D 1
+#define P4D_SIZE (1UL << P4D_SHIFT)
+#define P4D_MASK (~(P4D_SIZE-1))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the p4d is never bad, and a p4d always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+static inline int pgd_present(pgd_t pgd) { return 1; }
+static inline void pgd_clear(pgd_t *pgd) { }
+#define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd))
+
+#define pgd_populate(mm, pgd, p4d) do { } while (0)
+#define pgd_populate_safe(mm, pgd, p4d) do { } while (0)
+/*
+ * (p4ds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pgd(pgdptr, pgdval) set_p4d((p4d_t *)(pgdptr), (p4d_t) { pgdval })
+
+static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+{
+ return (p4d_t *)pgd;
+}
+
+#define p4d_val(x) (pgd_val((x).pgd))
+#define __p4d(x) ((p4d_t) { __pgd(x) })
+
+#define pgd_page(pgd) (p4d_page((p4d_t){ pgd }))
+#define pgd_page_vaddr(pgd) ((unsigned long)(p4d_pgtable((p4d_t){ pgd })))
+
+/*
+ * allocating and freeing a p4d is trivial: the 1-entry p4d is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+#define p4d_alloc_one(mm, address) NULL
+#define p4d_free(mm, x) do { } while (0)
+#define p4d_free_tlb(tlb, x, a) do { } while (0)
+
+#undef p4d_addr_end
+#define p4d_addr_end(addr, end) (end)
+
+#endif /* __ASSEMBLY__ */
+#endif /* _PGTABLE_NOP4D_H */
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
new file mode 100644
index 000000000..8ffd64e7a
--- /dev/null
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PGTABLE_NOPMD_H
+#define _PGTABLE_NOPMD_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm-generic/pgtable-nopud.h>
+
+struct mm_struct;
+
+#define __PAGETABLE_PMD_FOLDED 1
+
+/*
+ * Having the pmd type consist of a pud gets the size right, and allows
+ * us to conceptually access the pud entry that this pmd is folded into
+ * without casting.
+ */
+typedef struct { pud_t pud; } pmd_t;
+
+#define PMD_SHIFT PUD_SHIFT
+#define PTRS_PER_PMD 1
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+
+/*
+ * The "pud_xxx()" functions here are trivial for a folded two-level
+ * setup: the pmd is never bad, and a pmd always exists (as it's folded
+ * into the pud entry)
+ */
+static inline int pud_none(pud_t pud) { return 0; }
+static inline int pud_bad(pud_t pud) { return 0; }
+static inline int pud_present(pud_t pud) { return 1; }
+static inline int pud_user(pud_t pud) { return 0; }
+static inline int pud_leaf(pud_t pud) { return 0; }
+static inline void pud_clear(pud_t *pud) { }
+#define pmd_ERROR(pmd) (pud_ERROR((pmd).pud))
+
+#define pud_populate(mm, pmd, pte) do { } while (0)
+
+/*
+ * (pmds are folded into puds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval })
+
+static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
+{
+ return (pmd_t *)pud;
+}
+#define pmd_offset pmd_offset
+
+#define pmd_val(x) (pud_val((x).pud))
+#define __pmd(x) ((pmd_t) { __pud(x) } )
+
+#define pud_page(pud) (pmd_page((pmd_t){ pud }))
+#define pud_pgtable(pud) ((pmd_t *)(pmd_page_vaddr((pmd_t){ pud })))
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pud, so has no extra memory associated with it.
+ */
+#define pmd_alloc_one(mm, address) NULL
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+}
+#define pmd_free_tlb(tlb, x, a) do { } while (0)
+
+#undef pmd_addr_end
+#define pmd_addr_end(addr, end) (end)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _PGTABLE_NOPMD_H */
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
new file mode 100644
index 000000000..eb70c6d7c
--- /dev/null
+++ b/include/asm-generic/pgtable-nopud.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PGTABLE_NOPUD_H
+#define _PGTABLE_NOPUD_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm-generic/pgtable-nop4d.h>
+
+#define __PAGETABLE_PUD_FOLDED 1
+
+/*
+ * Having the pud type consist of a p4d gets the size right, and allows
+ * us to conceptually access the p4d entry that this pud is folded into
+ * without casting.
+ */
+typedef struct { p4d_t p4d; } pud_t;
+
+#define PUD_SHIFT P4D_SHIFT
+#define PTRS_PER_PUD 1
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+
+/*
+ * The "p4d_xxx()" functions here are trivial for a folded two-level
+ * setup: the pud is never bad, and a pud always exists (as it's folded
+ * into the p4d entry)
+ */
+static inline int p4d_none(p4d_t p4d) { return 0; }
+static inline int p4d_bad(p4d_t p4d) { return 0; }
+static inline int p4d_present(p4d_t p4d) { return 1; }
+static inline void p4d_clear(p4d_t *p4d) { }
+#define pud_ERROR(pud) (p4d_ERROR((pud).p4d))
+
+#define p4d_populate(mm, p4d, pud) do { } while (0)
+#define p4d_populate_safe(mm, p4d, pud) do { } while (0)
+/*
+ * (puds are folded into p4ds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_p4d(p4dptr, p4dval) set_pud((pud_t *)(p4dptr), (pud_t) { p4dval })
+
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+ return (pud_t *)p4d;
+}
+#define pud_offset pud_offset
+
+#define pud_val(x) (p4d_val((x).p4d))
+#define __pud(x) ((pud_t) { __p4d(x) })
+
+#define p4d_page(p4d) (pud_page((pud_t){ p4d }))
+#define p4d_pgtable(p4d) ((pud_t *)(pud_pgtable((pud_t){ p4d })))
+
+/*
+ * allocating and freeing a pud is trivial: the 1-entry pud is
+ * inside the p4d, so has no extra memory associated with it.
+ */
+#define pud_alloc_one(mm, address) NULL
+#define pud_free(mm, x) do { } while (0)
+#define pud_free_tlb(tlb, x, a) do { } while (0)
+
+#undef pud_addr_end
+#define pud_addr_end(addr, end) (end)
+
+#endif /* __ASSEMBLY__ */
+#endif /* _PGTABLE_NOPUD_H */
diff --git a/include/asm-generic/pgtable_uffd.h b/include/asm-generic/pgtable_uffd.h
new file mode 100644
index 000000000..828966d4c
--- /dev/null
+++ b/include/asm-generic/pgtable_uffd.h
@@ -0,0 +1,66 @@
+#ifndef _ASM_GENERIC_PGTABLE_UFFD_H
+#define _ASM_GENERIC_PGTABLE_UFFD_H
+
+#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
+static __always_inline int pte_uffd_wp(pte_t pte)
+{
+ return 0;
+}
+
+static __always_inline int pmd_uffd_wp(pmd_t pmd)
+{
+ return 0;
+}
+
+static __always_inline pte_t pte_mkuffd_wp(pte_t pte)
+{
+ return pte;
+}
+
+static __always_inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
+{
+ return pmd;
+}
+
+static __always_inline pte_t pte_clear_uffd_wp(pte_t pte)
+{
+ return pte;
+}
+
+static __always_inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
+{
+ return pmd;
+}
+
+static __always_inline pte_t pte_swp_mkuffd_wp(pte_t pte)
+{
+ return pte;
+}
+
+static __always_inline int pte_swp_uffd_wp(pte_t pte)
+{
+ return 0;
+}
+
+static __always_inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline int pmd_swp_uffd_wp(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
+{
+ return pmd;
+}
+#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
+
+#endif /* _ASM_GENERIC_PGTABLE_UFFD_H */
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
new file mode 100644
index 000000000..b4d43a4af
--- /dev/null
+++ b/include/asm-generic/preempt.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_PREEMPT_H
+#define __ASM_PREEMPT_H
+
+#include <linux/thread_info.h>
+
+#define PREEMPT_ENABLED (0)
+
+static __always_inline int preempt_count(void)
+{
+ return READ_ONCE(current_thread_info()->preempt_count);
+}
+
+static __always_inline volatile int *preempt_count_ptr(void)
+{
+ return &current_thread_info()->preempt_count;
+}
+
+static __always_inline void preempt_count_set(int pc)
+{
+ *preempt_count_ptr() = pc;
+}
+
+/*
+ * must be macros to avoid header recursion hell
+ */
+#define init_task_preempt_count(p) do { \
+ task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
+} while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
+} while (0)
+
+static __always_inline void set_preempt_need_resched(void)
+{
+}
+
+static __always_inline void clear_preempt_need_resched(void)
+{
+}
+
+static __always_inline bool test_preempt_need_resched(void)
+{
+ return false;
+}
+
+/*
+ * The various preempt_count add/sub methods
+ */
+
+static __always_inline void __preempt_count_add(int val)
+{
+ *preempt_count_ptr() += val;
+}
+
+static __always_inline void __preempt_count_sub(int val)
+{
+ *preempt_count_ptr() -= val;
+}
+
+static __always_inline bool __preempt_count_dec_and_test(void)
+{
+ /*
+ * Because of load-store architectures cannot do per-cpu atomic
+ * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
+ * lost.
+ */
+ return !--*preempt_count_ptr() && tif_need_resched();
+}
+
+/*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+static __always_inline bool should_resched(int preempt_offset)
+{
+ return unlikely(preempt_count() == preempt_offset &&
+ tif_need_resched());
+}
+
+#ifdef CONFIG_PREEMPTION
+extern asmlinkage void preempt_schedule(void);
+#define __preempt_schedule() preempt_schedule()
+extern asmlinkage void preempt_schedule_notrace(void);
+#define __preempt_schedule_notrace() preempt_schedule_notrace()
+#endif /* CONFIG_PREEMPTION */
+
+#endif /* __ASM_PREEMPT_H */
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
new file mode 100644
index 000000000..75b8f4601
--- /dev/null
+++ b/include/asm-generic/qrwlock.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Queue read/write lock
+ *
+ * These use generic atomic and locking routines, but depend on a fair spinlock
+ * implementation in order to be fair themselves. The implementation in
+ * asm-generic/spinlock.h meets these requirements.
+ *
+ * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
+ *
+ * Authors: Waiman Long <waiman.long@hp.com>
+ */
+#ifndef __ASM_GENERIC_QRWLOCK_H
+#define __ASM_GENERIC_QRWLOCK_H
+
+#include <linux/atomic.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+#include <asm-generic/qrwlock_types.h>
+
+/* Must be included from asm/spinlock.h after defining arch_spin_is_locked. */
+
+/*
+ * Writer states & reader shift and bias.
+ */
+#define _QW_WAITING 0x100 /* A writer is waiting */
+#define _QW_LOCKED 0x0ff /* A writer holds the lock */
+#define _QW_WMASK 0x1ff /* Writer mask */
+#define _QR_SHIFT 9 /* Reader count shift */
+#define _QR_BIAS (1U << _QR_SHIFT)
+
+/*
+ * External function declarations
+ */
+extern void queued_read_lock_slowpath(struct qrwlock *lock);
+extern void queued_write_lock_slowpath(struct qrwlock *lock);
+
+/**
+ * queued_read_trylock - try to acquire read lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static inline int queued_read_trylock(struct qrwlock *lock)
+{
+ int cnts;
+
+ cnts = atomic_read(&lock->cnts);
+ if (likely(!(cnts & _QW_WMASK))) {
+ cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
+ if (likely(!(cnts & _QW_WMASK)))
+ return 1;
+ atomic_sub(_QR_BIAS, &lock->cnts);
+ }
+ return 0;
+}
+
+/**
+ * queued_write_trylock - try to acquire write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static inline int queued_write_trylock(struct qrwlock *lock)
+{
+ int cnts;
+
+ cnts = atomic_read(&lock->cnts);
+ if (unlikely(cnts))
+ return 0;
+
+ return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
+ _QW_LOCKED));
+}
+/**
+ * queued_read_lock - acquire read lock of a queued rwlock
+ * @lock: Pointer to queued rwlock structure
+ */
+static inline void queued_read_lock(struct qrwlock *lock)
+{
+ int cnts;
+
+ cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
+ if (likely(!(cnts & _QW_WMASK)))
+ return;
+
+ /* The slowpath will decrement the reader count, if necessary. */
+ queued_read_lock_slowpath(lock);
+}
+
+/**
+ * queued_write_lock - acquire write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
+ */
+static inline void queued_write_lock(struct qrwlock *lock)
+{
+ int cnts = 0;
+ /* Optimize for the unfair lock case where the fair flag is 0. */
+ if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
+ return;
+
+ queued_write_lock_slowpath(lock);
+}
+
+/**
+ * queued_read_unlock - release read lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
+ */
+static inline void queued_read_unlock(struct qrwlock *lock)
+{
+ /*
+ * Atomically decrement the reader count
+ */
+ (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
+}
+
+/**
+ * queued_write_unlock - release write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
+ */
+static inline void queued_write_unlock(struct qrwlock *lock)
+{
+ smp_store_release(&lock->wlocked, 0);
+}
+
+/**
+ * queued_rwlock_is_contended - check if the lock is contended
+ * @lock : Pointer to queued rwlock structure
+ * Return: 1 if lock contended, 0 otherwise
+ */
+static inline int queued_rwlock_is_contended(struct qrwlock *lock)
+{
+ return arch_spin_is_locked(&lock->wait_lock);
+}
+
+/*
+ * Remapping rwlock architecture specific functions to the corresponding
+ * queued rwlock functions.
+ */
+#define arch_read_lock(l) queued_read_lock(l)
+#define arch_write_lock(l) queued_write_lock(l)
+#define arch_read_trylock(l) queued_read_trylock(l)
+#define arch_write_trylock(l) queued_write_trylock(l)
+#define arch_read_unlock(l) queued_read_unlock(l)
+#define arch_write_unlock(l) queued_write_unlock(l)
+#define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l)
+
+#endif /* __ASM_GENERIC_QRWLOCK_H */
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
new file mode 100644
index 000000000..12392c14c
--- /dev/null
+++ b/include/asm-generic/qrwlock_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_QRWLOCK_TYPES_H
+#define __ASM_GENERIC_QRWLOCK_TYPES_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/spinlock_types.h>
+
+/*
+ * The queued read/write lock data structure
+ */
+
+typedef struct qrwlock {
+ union {
+ atomic_t cnts;
+ struct {
+#ifdef __LITTLE_ENDIAN
+ u8 wlocked; /* Locked for write? */
+ u8 __lstate[3];
+#else
+ u8 __lstate[3];
+ u8 wlocked; /* Locked for write? */
+#endif
+ };
+ };
+ arch_spinlock_t wait_lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { \
+ { .cnts = ATOMIC_INIT(0), }, \
+ .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+}
+
+#endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
new file mode 100644
index 000000000..0655aa5b5
--- /dev/null
+++ b/include/asm-generic/qspinlock.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Queued spinlock
+ *
+ * A 'generic' spinlock implementation that is based on MCS locks. For an
+ * architecture that's looking for a 'generic' spinlock, please first consider
+ * ticket-lock.h and only come looking here when you've considered all the
+ * constraints below and can show your hardware does actually perform better
+ * with qspinlock.
+ *
+ * qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no
+ * weaker than RCtso if you're power), where regular code only expects atomic_t
+ * to be RCpc.
+ *
+ * qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set
+ * of atomic operations to behave well together, please audit them carefully to
+ * ensure they all have forward progress. Many atomic operations may default to
+ * cmpxchg() loops which will not have good forward progress properties on
+ * LL/SC architectures.
+ *
+ * One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply)
+ * do. Carefully read the patches that introduced
+ * queued_fetch_set_pending_acquire().
+ *
+ * qspinlock also heavily relies on mixed size atomic operations, in specific
+ * it requires architectures to have xchg16; something which many LL/SC
+ * architectures need to implement as a 32bit and+or in order to satisfy the
+ * forward progress guarantees mentioned above.
+ *
+ * Further reading on mixed size atomics that might be relevant:
+ *
+ * http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf
+ *
+ * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
+ *
+ * Authors: Waiman Long <waiman.long@hpe.com>
+ */
+#ifndef __ASM_GENERIC_QSPINLOCK_H
+#define __ASM_GENERIC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+#include <linux/atomic.h>
+
+#ifndef queued_spin_is_locked
+/**
+ * queued_spin_is_locked - is the spinlock locked?
+ * @lock: Pointer to queued spinlock structure
+ * Return: 1 if it is locked, 0 otherwise
+ */
+static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
+{
+ /*
+ * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
+ * isn't immediately observable.
+ */
+ return atomic_read(&lock->val);
+}
+#endif
+
+/**
+ * queued_spin_value_unlocked - is the spinlock structure unlocked?
+ * @lock: queued spinlock structure
+ * Return: 1 if it is unlocked, 0 otherwise
+ *
+ * N.B. Whenever there are tasks waiting for the lock, it is considered
+ * locked wrt the lockref code to avoid lock stealing by the lockref
+ * code and change things underneath the lock. This also allows some
+ * optimizations to be applied without conflict with lockref.
+ */
+static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
+{
+ return !lock.val.counter;
+}
+
+/**
+ * queued_spin_is_contended - check if the lock is contended
+ * @lock : Pointer to queued spinlock structure
+ * Return: 1 if lock contended, 0 otherwise
+ */
+static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
+{
+ return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
+}
+/**
+ * queued_spin_trylock - try to acquire the queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int queued_spin_trylock(struct qspinlock *lock)
+{
+ int val = atomic_read(&lock->val);
+
+ if (unlikely(val))
+ return 0;
+
+ return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
+}
+
+extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+#ifndef queued_spin_lock
+/**
+ * queued_spin_lock - acquire a queued spinlock
+ * @lock: Pointer to queued spinlock structure
+ */
+static __always_inline void queued_spin_lock(struct qspinlock *lock)
+{
+ int val = 0;
+
+ if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
+ return;
+
+ queued_spin_lock_slowpath(lock, val);
+}
+#endif
+
+#ifndef queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static __always_inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ /*
+ * unlock() needs release semantics:
+ */
+ smp_store_release(&lock->locked, 0);
+}
+#endif
+
+#ifndef virt_spin_lock
+static __always_inline bool virt_spin_lock(struct qspinlock *lock)
+{
+ return false;
+}
+#endif
+
+/*
+ * Remapping spinlock architecture specific functions to the corresponding
+ * queued spinlock functions.
+ */
+#define arch_spin_is_locked(l) queued_spin_is_locked(l)
+#define arch_spin_is_contended(l) queued_spin_is_contended(l)
+#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
+#define arch_spin_lock(l) queued_spin_lock(l)
+#define arch_spin_trylock(l) queued_spin_trylock(l)
+#define arch_spin_unlock(l) queued_spin_unlock(l)
+
+#endif /* __ASM_GENERIC_QSPINLOCK_H */
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
new file mode 100644
index 000000000..2fd1fb89e
--- /dev/null
+++ b/include/asm-generic/qspinlock_types.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Queued spinlock
+ *
+ * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ *
+ * Authors: Waiman Long <waiman.long@hp.com>
+ */
+#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
+#define __ASM_GENERIC_QSPINLOCK_TYPES_H
+
+#include <linux/types.h>
+
+typedef struct qspinlock {
+ union {
+ atomic_t val;
+
+ /*
+ * By using the whole 2nd least significant byte for the
+ * pending bit, we can allow better optimization of the lock
+ * acquisition for the pending bit holder.
+ */
+#ifdef __LITTLE_ENDIAN
+ struct {
+ u8 locked;
+ u8 pending;
+ };
+ struct {
+ u16 locked_pending;
+ u16 tail;
+ };
+#else
+ struct {
+ u16 tail;
+ u16 locked_pending;
+ };
+ struct {
+ u8 reserved[2];
+ u8 pending;
+ u8 locked;
+ };
+#endif
+ };
+} arch_spinlock_t;
+
+/*
+ * Initializier
+ */
+#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
+
+/*
+ * Bitfields in the atomic value:
+ *
+ * When NR_CPUS < 16K
+ * 0- 7: locked byte
+ * 8: pending
+ * 9-15: not used
+ * 16-17: tail index
+ * 18-31: tail cpu (+1)
+ *
+ * When NR_CPUS >= 16K
+ * 0- 7: locked byte
+ * 8: pending
+ * 9-10: tail index
+ * 11-31: tail cpu (+1)
+ */
+#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
+ << _Q_ ## type ## _OFFSET)
+#define _Q_LOCKED_OFFSET 0
+#define _Q_LOCKED_BITS 8
+#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
+
+#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
+#if CONFIG_NR_CPUS < (1U << 14)
+#define _Q_PENDING_BITS 8
+#else
+#define _Q_PENDING_BITS 1
+#endif
+#define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
+
+#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
+#define _Q_TAIL_IDX_BITS 2
+#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
+
+#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
+#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
+#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
+
+#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
+#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
+
+#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
+#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
+
+#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */
diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
new file mode 100644
index 000000000..8874f681b
--- /dev/null
+++ b/include/asm-generic/resource.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_RESOURCE_H
+#define _ASM_GENERIC_RESOURCE_H
+
+#include <uapi/asm-generic/resource.h>
+
+
+/*
+ * boot-time rlimit defaults for the init task:
+ */
+#define INIT_RLIMITS \
+{ \
+ [RLIMIT_CPU] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_FSIZE] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_DATA] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_STACK] = { _STK_LIM, RLIM_INFINITY }, \
+ [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \
+ [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_NPROC] = { 0, 0 }, \
+ [RLIMIT_NOFILE] = { INR_OPEN_CUR, INR_OPEN_MAX }, \
+ [RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \
+ [RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_SIGPENDING] = { 0, 0 }, \
+ [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \
+ [RLIMIT_NICE] = { 0, 0 }, \
+ [RLIMIT_RTPRIO] = { 0, 0 }, \
+ [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
+}
+
+#endif
diff --git a/include/asm-generic/rwonce.h b/include/asm-generic/rwonce.h
new file mode 100644
index 000000000..8d0a6280e
--- /dev/null
+++ b/include/asm-generic/rwonce.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Prevent the compiler from merging or refetching reads or writes. The
+ * compiler is also forbidden from reordering successive instances of
+ * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
+ * particular ordering. One way to make the compiler aware of ordering is to
+ * put the two invocations of READ_ONCE or WRITE_ONCE in different C
+ * statements.
+ *
+ * These two macros will also work on aggregate data types like structs or
+ * unions.
+ *
+ * Their two major use cases are: (1) Mediating communication between
+ * process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ */
+#ifndef __ASM_GENERIC_RWONCE_H
+#define __ASM_GENERIC_RWONCE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler_types.h>
+#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+
+/*
+ * Yes, this permits 64-bit accesses on 32-bit architectures. These will
+ * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
+ * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
+ * (e.g. a virtual address) and a strong prevailing wind.
+ */
+#define compiletime_assert_rwonce_type(t) \
+ compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
+ "Unsupported access size for {READ,WRITE}_ONCE().")
+
+/*
+ * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
+ * atomicity. Note that this may result in tears!
+ */
+#ifndef __READ_ONCE
+#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
+#endif
+
+#define READ_ONCE(x) \
+({ \
+ compiletime_assert_rwonce_type(x); \
+ __READ_ONCE(x); \
+})
+
+#define __WRITE_ONCE(x, val) \
+do { \
+ *(volatile typeof(x) *)&(x) = (val); \
+} while (0)
+
+#define WRITE_ONCE(x, val) \
+do { \
+ compiletime_assert_rwonce_type(x); \
+ __WRITE_ONCE(x, val); \
+} while (0)
+
+static __no_sanitize_or_inline
+unsigned long __read_once_word_nocheck(const void *addr)
+{
+ return __READ_ONCE(*(unsigned long *)addr);
+}
+
+/*
+ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
+ * word from memory atomically but without telling KASAN/KCSAN. This is
+ * usually used by unwinding code when walking the stack of a running process.
+ */
+#define READ_ONCE_NOCHECK(x) \
+({ \
+ compiletime_assert(sizeof(x) == sizeof(unsigned long), \
+ "Unsupported access size for READ_ONCE_NOCHECK()."); \
+ (typeof(x))__read_once_word_nocheck(&(x)); \
+})
+
+static __no_kasan_or_inline
+unsigned long read_word_at_a_time(const void *addr)
+{
+ kasan_check_read(addr, 1);
+ return *(unsigned long *)addr;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_GENERIC_RWONCE_H */
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
new file mode 100644
index 000000000..6b6f42bc5
--- /dev/null
+++ b/include/asm-generic/seccomp.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * include/asm-generic/seccomp.h
+ *
+ * Copyright (C) 2014 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ */
+#ifndef _ASM_GENERIC_SECCOMP_H
+#define _ASM_GENERIC_SECCOMP_H
+
+#include <linux/unistd.h>
+
+#if defined(CONFIG_COMPAT) && !defined(__NR_seccomp_read_32)
+#define __NR_seccomp_read_32 __NR_read
+#define __NR_seccomp_write_32 __NR_write
+#define __NR_seccomp_exit_32 __NR_exit
+#ifndef __NR_seccomp_sigreturn_32
+#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn
+#endif
+#endif /* CONFIG_COMPAT && ! already defined */
+
+#define __NR_seccomp_read __NR_read
+#define __NR_seccomp_write __NR_write
+#define __NR_seccomp_exit __NR_exit
+#ifndef __NR_seccomp_sigreturn
+#define __NR_seccomp_sigreturn __NR_rt_sigreturn
+#endif
+
+#ifdef CONFIG_COMPAT
+#ifndef get_compat_mode1_syscalls
+static inline const int *get_compat_mode1_syscalls(void)
+{
+ static const int mode1_syscalls_32[] = {
+ __NR_seccomp_read_32, __NR_seccomp_write_32,
+ __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
+ -1, /* negative terminated */
+ };
+ return mode1_syscalls_32;
+}
+#endif
+#endif /* CONFIG_COMPAT */
+
+#endif /* _ASM_GENERIC_SECCOMP_H */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
new file mode 100644
index 000000000..db13bb620
--- /dev/null
+++ b/include/asm-generic/sections.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_SECTIONS_H_
+#define _ASM_GENERIC_SECTIONS_H_
+
+/* References to section boundaries */
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/*
+ * Usage guidelines:
+ * _text, _data: architecture specific, don't use them in arch-independent code
+ * [_stext, _etext]: contains .text.* sections, may also contain .rodata.*
+ * and/or .init.* sections
+ * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.*
+ * and/or .init.* sections.
+ * [__start_rodata, __end_rodata]: contains .rodata.* sections
+ * [__start_ro_after_init, __end_ro_after_init]:
+ * contains .data..ro_after_init section
+ * [__init_begin, __init_end]: contains .init.* sections, but .init.text.*
+ * may be out of this range on some architectures.
+ * [_sinittext, _einittext]: contains .init.text.* sections
+ * [__bss_start, __bss_stop]: contains BSS sections
+ *
+ * Following global variables are optional and may be unavailable on some
+ * architectures and/or kernel configurations.
+ * _text, _data
+ * __kprobes_text_start, __kprobes_text_end
+ * __entry_text_start, __entry_text_end
+ * __ctors_start, __ctors_end
+ * __irqentry_text_start, __irqentry_text_end
+ * __softirqentry_text_start, __softirqentry_text_end
+ * __start_opd, __end_opd
+ */
+extern char _text[], _stext[], _etext[];
+extern char _data[], _sdata[], _edata[];
+extern char __bss_start[], __bss_stop[];
+extern char __init_begin[], __init_end[];
+extern char _sinittext[], _einittext[];
+extern char __start_ro_after_init[], __end_ro_after_init[];
+extern char _end[];
+extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
+extern char __kprobes_text_start[], __kprobes_text_end[];
+extern char __entry_text_start[], __entry_text_end[];
+extern char __start_rodata[], __end_rodata[];
+extern char __irqentry_text_start[], __irqentry_text_end[];
+extern char __softirqentry_text_start[], __softirqentry_text_end[];
+extern char __start_once[], __end_once[];
+
+/* Start and end of .ctors section - used for constructor calls. */
+extern char __ctors_start[], __ctors_end[];
+
+/* Start and end of .opd section - used for function descriptors. */
+extern char __start_opd[], __end_opd[];
+
+/* Start and end of instrumentation protected text section */
+extern char __noinstr_text_start[], __noinstr_text_end[];
+
+extern __visible const void __nosave_begin, __nosave_end;
+
+/* Function descriptor handling (if any). Override in asm/sections.h */
+#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
+void *dereference_function_descriptor(void *ptr);
+void *dereference_kernel_function_descriptor(void *ptr);
+#else
+#define dereference_function_descriptor(p) ((void *)(p))
+#define dereference_kernel_function_descriptor(p) ((void *)(p))
+
+/* An address is simply the address of the function. */
+typedef struct {
+ unsigned long addr;
+} func_desc_t;
+#endif
+
+static inline bool have_function_descriptors(void)
+{
+ return IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS);
+}
+
+/**
+ * memory_contains - checks if an object is contained within a memory region
+ * @begin: virtual address of the beginning of the memory region
+ * @end: virtual address of the end of the memory region
+ * @virt: virtual address of the memory object
+ * @size: size of the memory object
+ *
+ * Returns: true if the object specified by @virt and @size is entirely
+ * contained within the memory region defined by @begin and @end, false
+ * otherwise.
+ */
+static inline bool memory_contains(void *begin, void *end, void *virt,
+ size_t size)
+{
+ return virt >= begin && virt + size <= end;
+}
+
+/**
+ * memory_intersects - checks if the region occupied by an object intersects
+ * with another memory region
+ * @begin: virtual address of the beginning of the memory region
+ * @end: virtual address of the end of the memory region
+ * @virt: virtual address of the memory object
+ * @size: size of the memory object
+ *
+ * Returns: true if an object's memory region, specified by @virt and @size,
+ * intersects with the region specified by @begin and @end, false otherwise.
+ */
+static inline bool memory_intersects(void *begin, void *end, void *virt,
+ size_t size)
+{
+ void *vend = virt + size;
+
+ if (virt < end && vend > begin)
+ return true;
+
+ return false;
+}
+
+/**
+ * init_section_contains - checks if an object is contained within the init
+ * section
+ * @virt: virtual address of the memory object
+ * @size: size of the memory object
+ *
+ * Returns: true if the object specified by @virt and @size is entirely
+ * contained within the init section, false otherwise.
+ */
+static inline bool init_section_contains(void *virt, size_t size)
+{
+ return memory_contains(__init_begin, __init_end, virt, size);
+}
+
+/**
+ * init_section_intersects - checks if the region occupied by an object
+ * intersects with the init section
+ * @virt: virtual address of the memory object
+ * @size: size of the memory object
+ *
+ * Returns: true if an object's memory region, specified by @virt and @size,
+ * intersects with the init section, false otherwise.
+ */
+static inline bool init_section_intersects(void *virt, size_t size)
+{
+ return memory_intersects(__init_begin, __init_end, virt, size);
+}
+
+/**
+ * is_kernel_core_data - checks if the pointer address is located in the
+ * .data or .bss section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .data or .bss, false otherwise.
+ * Note: On some archs it may return true for core RODATA, and false
+ * for others. But will always be true for core RW data.
+ */
+static inline bool is_kernel_core_data(unsigned long addr)
+{
+ if (addr >= (unsigned long)_sdata && addr < (unsigned long)_edata)
+ return true;
+
+ if (addr >= (unsigned long)__bss_start &&
+ addr < (unsigned long)__bss_stop)
+ return true;
+
+ return false;
+}
+
+/**
+ * is_kernel_rodata - checks if the pointer address is located in the
+ * .rodata section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .rodata, false otherwise.
+ */
+static inline bool is_kernel_rodata(unsigned long addr)
+{
+ return addr >= (unsigned long)__start_rodata &&
+ addr < (unsigned long)__end_rodata;
+}
+
+/**
+ * is_kernel_inittext - checks if the pointer address is located in the
+ * .init.text section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .init.text, false otherwise.
+ */
+static inline bool is_kernel_inittext(unsigned long addr)
+{
+ return addr >= (unsigned long)_sinittext &&
+ addr < (unsigned long)_einittext;
+}
+
+/**
+ * __is_kernel_text - checks if the pointer address is located in the
+ * .text section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .text, false otherwise.
+ * Note: an internal helper, only check the range of _stext to _etext.
+ */
+static inline bool __is_kernel_text(unsigned long addr)
+{
+ return addr >= (unsigned long)_stext &&
+ addr < (unsigned long)_etext;
+}
+
+/**
+ * __is_kernel - checks if the pointer address is located in the kernel range
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in the kernel range, false otherwise.
+ * Note: an internal helper, check the range of _stext to _end,
+ * and range from __init_begin to __init_end, which can be outside
+ * of the _stext to _end range.
+ */
+static inline bool __is_kernel(unsigned long addr)
+{
+ return ((addr >= (unsigned long)_stext &&
+ addr < (unsigned long)_end) ||
+ (addr >= (unsigned long)__init_begin &&
+ addr < (unsigned long)__init_end));
+}
+
+#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-generic/serial.h b/include/asm-generic/serial.h
new file mode 100644
index 000000000..ca9f7b6be
--- /dev/null
+++ b/include/asm-generic/serial.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_SERIAL_H
+#define __ASM_GENERIC_SERIAL_H
+
+/*
+ * This should not be an architecture specific #define, oh well.
+ *
+ * Traditionally, it just describes i8250 and related serial ports
+ * that have this clock rate.
+ */
+
+#define BASE_BAUD (1843200 / 16)
+
+#endif /* __ASM_GENERIC_SERIAL_H */
diff --git a/include/asm-generic/set_memory.h b/include/asm-generic/set_memory.h
new file mode 100644
index 000000000..c86abf6bc
--- /dev/null
+++ b/include/asm-generic/set_memory.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SET_MEMORY_H
+#define __ASM_SET_MEMORY_H
+
+/*
+ * Functions to change memory attributes.
+ */
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+
+#endif
diff --git a/include/asm-generic/shmparam.h b/include/asm-generic/shmparam.h
new file mode 100644
index 000000000..b8f9035ff
--- /dev/null
+++ b/include/asm-generic/shmparam.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_SHMPARAM_H
+#define __ASM_GENERIC_SHMPARAM_H
+
+#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
+
+#endif /* _ASM_GENERIC_SHMPARAM_H */
diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h
new file mode 100644
index 000000000..663dd6d07
--- /dev/null
+++ b/include/asm-generic/signal.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_SIGNAL_H
+#define __ASM_GENERIC_SIGNAL_H
+
+#include <uapi/asm-generic/signal.h>
+
+#ifndef __ASSEMBLY__
+
+#include <asm/sigcontext.h>
+#undef __HAVE_ARCH_SIG_BITOPS
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_GENERIC_SIGNAL_H */
diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h
new file mode 100644
index 000000000..d0343d58a
--- /dev/null
+++ b/include/asm-generic/simd.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/hardirq.h>
+
+/*
+ * may_use_simd - whether it is allowable at this time to issue SIMD
+ * instructions or access the SIMD register file
+ *
+ * As architectures typically don't preserve the SIMD register file when
+ * taking an interrupt, !in_interrupt() should be a reasonable default.
+ */
+static __must_check inline bool may_use_simd(void)
+{
+ return !in_interrupt();
+}
diff --git a/include/asm-generic/softirq_stack.h b/include/asm-generic/softirq_stack.h
new file mode 100644
index 000000000..2a67aed9a
--- /dev/null
+++ b/include/asm-generic/softirq_stack.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
+#define __ASM_GENERIC_SOFTIRQ_STACK_H
+
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
+void do_softirq_own_stack(void);
+#else
+static inline void do_softirq_own_stack(void)
+{
+ __do_softirq();
+}
+#endif
+
+#endif
diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h
new file mode 100644
index 000000000..fdfebcb05
--- /dev/null
+++ b/include/asm-generic/spinlock.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * 'Generic' ticket-lock implementation.
+ *
+ * It relies on atomic_fetch_add() having well defined forward progress
+ * guarantees under contention. If your architecture cannot provide this, stick
+ * to a test-and-set lock.
+ *
+ * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a
+ * sub-word of the value. This is generally true for anything LL/SC although
+ * you'd be hard pressed to find anything useful in architecture specifications
+ * about this. If your architecture cannot do this you might be better off with
+ * a test-and-set.
+ *
+ * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence
+ * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with
+ * a full fence after the spin to upgrade the otherwise-RCpc
+ * atomic_cond_read_acquire().
+ *
+ * The implementation uses smp_cond_load_acquire() to spin, so if the
+ * architecture has WFE like instructions to sleep instead of poll for word
+ * modifications be sure to implement that (see ARM64 for example).
+ *
+ */
+
+#ifndef __ASM_GENERIC_SPINLOCK_H
+#define __ASM_GENERIC_SPINLOCK_H
+
+#include <linux/atomic.h>
+#include <asm-generic/spinlock_types.h>
+
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ u32 val = atomic_fetch_add(1<<16, lock);
+ u16 ticket = val >> 16;
+
+ if (ticket == (u16)val)
+ return;
+
+ /*
+ * atomic_cond_read_acquire() is RCpc, but rather than defining a
+ * custom cond_read_rcsc() here we just emit a full fence. We only
+ * need the prior reads before subsequent writes ordering from
+ * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we
+ * have no outstanding writes due to the atomic_fetch_add() the extra
+ * orderings are free.
+ */
+ atomic_cond_read_acquire(lock, ticket == (u16)VAL);
+ smp_mb();
+}
+
+static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock)
+{
+ u32 old = atomic_read(lock);
+
+ if ((old >> 16) != (old & 0xffff))
+ return false;
+
+ return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */
+}
+
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
+ u32 val = atomic_read(lock);
+
+ smp_store_release(ptr, (u16)val + 1);
+}
+
+static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ u32 val = atomic_read(lock);
+
+ return ((val >> 16) != (val & 0xffff));
+}
+
+static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+ u32 val = atomic_read(lock);
+
+ return (s16)((val >> 16) - (val & 0xffff)) > 1;
+}
+
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return !arch_spin_is_locked(&lock);
+}
+
+#include <asm/qrwlock.h>
+
+#endif /* __ASM_GENERIC_SPINLOCK_H */
diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h
new file mode 100644
index 000000000..8962bb730
--- /dev/null
+++ b/include/asm-generic/spinlock_types.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_GENERIC_SPINLOCK_TYPES_H
+#define __ASM_GENERIC_SPINLOCK_TYPES_H
+
+#include <linux/types.h>
+typedef atomic_t arch_spinlock_t;
+
+/*
+ * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the
+ * include.
+ */
+#include <asm/qrwlock_types.h>
+
+#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0)
+
+#endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */
diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h
new file mode 100644
index 000000000..f88dcd8ed
--- /dev/null
+++ b/include/asm-generic/statfs.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _GENERIC_STATFS_H
+#define _GENERIC_STATFS_H
+
+#include <uapi/asm-generic/statfs.h>
+
+typedef __kernel_fsid_t fsid_t;
+#endif
diff --git a/include/asm-generic/string.h b/include/asm-generic/string.h
new file mode 100644
index 000000000..de5e02014
--- /dev/null
+++ b/include/asm-generic/string.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_GENERIC_STRING_H
+#define __ASM_GENERIC_STRING_H
+/*
+ * The kernel provides all required functions in lib/string.c
+ *
+ * Architectures probably want to provide at least their own optimized
+ * memcpy and memset functions though.
+ */
+
+#endif /* __ASM_GENERIC_STRING_H */
diff --git a/include/asm-generic/switch_to.h b/include/asm-generic/switch_to.h
new file mode 100644
index 000000000..5897d100a
--- /dev/null
+++ b/include/asm-generic/switch_to.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Generic task switch macro wrapper.
+ *
+ * It should be possible to use these on really simple architectures,
+ * but it serves more as a starting point for new ports.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+#ifndef __ASM_GENERIC_SWITCH_TO_H
+#define __ASM_GENERIC_SWITCH_TO_H
+
+#include <linux/thread_info.h>
+
+/*
+ * Context switching is now performed out-of-line in switch_to.S
+ */
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct task_struct *);
+
+#define switch_to(prev, next, last) \
+ do { \
+ ((last) = __switch_to((prev), (next))); \
+ } while (0)
+
+#endif /* __ASM_GENERIC_SWITCH_TO_H */
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
new file mode 100644
index 000000000..5a80fe728
--- /dev/null
+++ b/include/asm-generic/syscall.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
+ *
+ * This file is a stub providing documentation for what functions
+ * asm-ARCH/syscall.h files need to define. Most arch definitions
+ * will be simple inlines.
+ *
+ * All of these functions expect to be called with no locks,
+ * and only when the caller is sure that the task of interest
+ * cannot return to user mode while we are looking at it.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H 1
+
+struct task_struct;
+struct pt_regs;
+
+/**
+ * syscall_get_nr - find what system call a task is executing
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ *
+ * If @task is executing a system call or is at system call
+ * tracing about to attempt one, returns the system call number.
+ * If @task is not executing a system call, i.e. it's blocked
+ * inside the kernel for a fault or signal, returns -1.
+ *
+ * Note this returns int even on 64-bit machines. Only 32 bits of
+ * system call number can be meaningful. If the actual arch value
+ * is 64 bits, this truncates to 32 bits so 0xffffffff means -1.
+ *
+ * It's only valid to call this when @task is known to be blocked.
+ */
+int syscall_get_nr(struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * syscall_rollback - roll back registers after an aborted system call
+ * @task: task of interest, must be in system call exit tracing
+ * @regs: task_pt_regs() of @task
+ *
+ * It's only valid to call this when @task is stopped for system
+ * call exit tracing (due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT), after ptrace_report_syscall_entry()
+ * returned nonzero to prevent the system call from taking place.
+ *
+ * This rolls back the register state in @regs so it's as if the
+ * system call instruction was a no-op. The registers containing
+ * the system call number and arguments are as they were before the
+ * system call instruction. This may not be the same as what the
+ * register state looked like at system call entry tracing.
+ */
+void syscall_rollback(struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * syscall_get_error - check result of traced system call
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ *
+ * Returns 0 if the system call succeeded, or -ERRORCODE if it failed.
+ *
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
+ */
+long syscall_get_error(struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * syscall_get_return_value - get the return value of a traced system call
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ *
+ * Returns the return value of the successful system call.
+ * This value is meaningless if syscall_get_error() returned nonzero.
+ *
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
+ */
+long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * syscall_set_return_value - change the return value of a traced system call
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ * @error: negative error code, or zero to indicate success
+ * @val: user return value if @error is zero
+ *
+ * This changes the results of the system call that user mode will see.
+ * If @error is zero, the user sees a successful system call with a
+ * return value of @val. If @error is nonzero, it's a negated errno
+ * code; the user sees a failed system call with this errno code.
+ *
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
+ */
+void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ int error, long val);
+
+/**
+ * syscall_get_arguments - extract system call parameter values
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ * @args: array filled with argument values
+ *
+ * Fetches 6 arguments to the system call. First argument is stored in
+* @args[0], and so on.
+ *
+ * It's only valid to call this when @task is stopped for tracing on
+ * entry to a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
+ */
+void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *args);
+
+/**
+ * syscall_get_arch - return the AUDIT_ARCH for the current system call
+ * @task: task of interest, must be blocked
+ *
+ * Returns the AUDIT_ARCH_* based on the system call convention in use.
+ *
+ * It's only valid to call this when @task is stopped on entry to a system
+ * call, due to %SYSCALL_WORK_SYSCALL_TRACE, %SYSCALL_WORK_SYSCALL_AUDIT, or
+ * %SYSCALL_WORK_SECCOMP.
+ *
+ * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must
+ * provide an implementation of this.
+ */
+int syscall_get_arch(struct task_struct *task);
+#endif /* _ASM_SYSCALL_H */
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h
new file mode 100644
index 000000000..933ca6581
--- /dev/null
+++ b/include/asm-generic/syscalls.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_SYSCALLS_H
+#define __ASM_GENERIC_SYSCALLS_H
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+
+/*
+ * Calling conventions for these system calls can differ, so
+ * it's possible to override them.
+ */
+
+#ifndef sys_mmap2
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+#endif
+
+#ifndef sys_mmap
+asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, off_t pgoff);
+#endif
+
+#ifndef sys_rt_sigreturn
+asmlinkage long sys_rt_sigreturn(struct pt_regs *regs);
+#endif
+
+#endif /* __ASM_GENERIC_SYSCALLS_H */
diff --git a/include/asm-generic/timex.h b/include/asm-generic/timex.h
new file mode 100644
index 000000000..50ba9b5ce
--- /dev/null
+++ b/include/asm-generic/timex.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_TIMEX_H
+#define __ASM_GENERIC_TIMEX_H
+
+/*
+ * If you have a cycle counter, return the value here.
+ */
+typedef unsigned long cycles_t;
+#ifndef get_cycles
+static inline cycles_t get_cycles(void)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Architectures are encouraged to implement read_current_timer
+ * and define this in order to avoid the expensive delay loop
+ * calibration during boot.
+ */
+#undef ARCH_HAS_READ_CURRENT_TIMER
+
+#endif /* __ASM_GENERIC_TIMEX_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
new file mode 100644
index 000000000..cab7cfebf
--- /dev/null
+++ b/include/asm-generic/tlb.h
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* include/asm-generic/tlb.h
+ *
+ * Generic TLB shootdown code
+ *
+ * Copyright 2001 Red Hat, Inc.
+ * Based on code from mm/memory.c Copyright Linus Torvalds and others.
+ *
+ * Copyright 2011 Red Hat, Inc., Peter Zijlstra
+ */
+#ifndef _ASM_GENERIC__TLB_H
+#define _ASM_GENERIC__TLB_H
+
+#include <linux/mmu_notifier.h>
+#include <linux/swap.h>
+#include <linux/hugetlb_inline.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Blindly accessing user memory from NMI context can be dangerous
+ * if we're in the middle of switching the current user task or switching
+ * the loaded mm.
+ */
+#ifndef nmi_uaccess_okay
+# define nmi_uaccess_okay() true
+#endif
+
+#ifdef CONFIG_MMU
+
+/*
+ * Generic MMU-gather implementation.
+ *
+ * The mmu_gather data structure is used by the mm code to implement the
+ * correct and efficient ordering of freeing pages and TLB invalidations.
+ *
+ * This correct ordering is:
+ *
+ * 1) unhook page
+ * 2) TLB invalidate page
+ * 3) free page
+ *
+ * That is, we must never free a page before we have ensured there are no live
+ * translations left to it. Otherwise it might be possible to observe (or
+ * worse, change) the page content after it has been reused.
+ *
+ * The mmu_gather API consists of:
+ *
+ * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
+ *
+ * start and finish a mmu_gather
+ *
+ * Finish in particular will issue a (final) TLB invalidate and free
+ * all (remaining) queued pages.
+ *
+ * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
+ *
+ * Defaults to flushing at tlb_end_vma() to reset the range; helps when
+ * there's large holes between the VMAs.
+ *
+ * - tlb_remove_table()
+ *
+ * tlb_remove_table() is the basic primitive to free page-table directories
+ * (__p*_free_tlb()). In it's most primitive form it is an alias for
+ * tlb_remove_page() below, for when page directories are pages and have no
+ * additional constraints.
+ *
+ * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
+ *
+ * - tlb_remove_page() / __tlb_remove_page()
+ * - tlb_remove_page_size() / __tlb_remove_page_size()
+ *
+ * __tlb_remove_page_size() is the basic primitive that queues a page for
+ * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
+ * boolean indicating if the queue is (now) full and a call to
+ * tlb_flush_mmu() is required.
+ *
+ * tlb_remove_page() and tlb_remove_page_size() imply the call to
+ * tlb_flush_mmu() when required and has no return value.
+ *
+ * - tlb_change_page_size()
+ *
+ * call before __tlb_remove_page*() to set the current page-size; implies a
+ * possible tlb_flush_mmu() call.
+ *
+ * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
+ *
+ * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
+ * related state, like the range)
+ *
+ * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
+ * whatever pages are still batched.
+ *
+ * - mmu_gather::fullmm
+ *
+ * A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
+ * the entire mm; this allows a number of optimizations.
+ *
+ * - We can ignore tlb_{start,end}_vma(); because we don't
+ * care about ranges. Everything will be shot down.
+ *
+ * - (RISC) architectures that use ASIDs can cycle to a new ASID
+ * and delay the invalidation until ASID space runs out.
+ *
+ * - mmu_gather::need_flush_all
+ *
+ * A flag that can be set by the arch code if it wants to force
+ * flush the entire TLB irrespective of the range. For instance
+ * x86-PAE needs this when changing top-level entries.
+ *
+ * And allows the architecture to provide and implement tlb_flush():
+ *
+ * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
+ * use of:
+ *
+ * - mmu_gather::start / mmu_gather::end
+ *
+ * which provides the range that needs to be flushed to cover the pages to
+ * be freed.
+ *
+ * - mmu_gather::freed_tables
+ *
+ * set when we freed page table pages
+ *
+ * - tlb_get_unmap_shift() / tlb_get_unmap_size()
+ *
+ * returns the smallest TLB entry size unmapped in this range.
+ *
+ * If an architecture does not provide tlb_flush() a default implementation
+ * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
+ * specified, in which case we'll default to flush_tlb_mm().
+ *
+ * Additionally there are a few opt-in features:
+ *
+ * MMU_GATHER_PAGE_SIZE
+ *
+ * This ensures we call tlb_flush() every time tlb_change_page_size() actually
+ * changes the size and provides mmu_gather::page_size to tlb_flush().
+ *
+ * This might be useful if your architecture has size specific TLB
+ * invalidation instructions.
+ *
+ * MMU_GATHER_TABLE_FREE
+ *
+ * This provides tlb_remove_table(), to be used instead of tlb_remove_page()
+ * for page directores (__p*_free_tlb()).
+ *
+ * Useful if your architecture has non-page page directories.
+ *
+ * When used, an architecture is expected to provide __tlb_remove_table()
+ * which does the actual freeing of these pages.
+ *
+ * MMU_GATHER_RCU_TABLE_FREE
+ *
+ * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
+ * comment below).
+ *
+ * Useful if your architecture doesn't use IPIs for remote TLB invalidates
+ * and therefore doesn't naturally serialize with software page-table walkers.
+ *
+ * MMU_GATHER_NO_FLUSH_CACHE
+ *
+ * Indicates the architecture has flush_cache_range() but it needs *NOT* be called
+ * before unmapping a VMA.
+ *
+ * NOTE: strictly speaking we shouldn't have this knob and instead rely on
+ * flush_cache_range() being a NOP, except Sparc64 seems to be
+ * different here.
+ *
+ * MMU_GATHER_MERGE_VMAS
+ *
+ * Indicates the architecture wants to merge ranges over VMAs; typical when
+ * multiple range invalidates are more expensive than a full invalidate.
+ *
+ * MMU_GATHER_NO_RANGE
+ *
+ * Use this if your architecture lacks an efficient flush_tlb_range(). This
+ * option implies MMU_GATHER_MERGE_VMAS above.
+ *
+ * MMU_GATHER_NO_GATHER
+ *
+ * If the option is set the mmu_gather will not track individual pages for
+ * delayed page free anymore. A platform that enables the option needs to
+ * provide its own implementation of the __tlb_remove_page_size() function to
+ * free pages.
+ *
+ * This is useful if your architecture already flushes TLB entries in the
+ * various ptep_get_and_clear() functions.
+ */
+
+#ifdef CONFIG_MMU_GATHER_TABLE_FREE
+
+struct mmu_table_batch {
+#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
+ struct rcu_head rcu;
+#endif
+ unsigned int nr;
+ void *tables[];
+};
+
+#define MAX_TABLE_BATCH \
+ ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
+
+extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+
+#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
+
+/*
+ * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
+ * page directories and we can use the normal page batching to free them.
+ */
+#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
+
+#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
+
+#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
+/*
+ * This allows an architecture that does not use the linux page-tables for
+ * hardware to skip the TLBI when freeing page tables.
+ */
+#ifndef tlb_needs_table_invalidate
+#define tlb_needs_table_invalidate() (true)
+#endif
+
+void tlb_remove_table_sync_one(void);
+
+#else
+
+#ifdef tlb_needs_table_invalidate
+#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
+#endif
+
+static inline void tlb_remove_table_sync_one(void) { }
+
+#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
+
+
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
+/*
+ * If we can't allocate a page to make a big batch of page pointers
+ * to work on, then just handle a few from the on-stack structure.
+ */
+#define MMU_GATHER_BUNDLE 8
+
+struct mmu_gather_batch {
+ struct mmu_gather_batch *next;
+ unsigned int nr;
+ unsigned int max;
+ struct page *pages[];
+};
+
+#define MAX_GATHER_BATCH \
+ ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
+
+/*
+ * Limit the maximum number of mmu_gather batches to reduce a risk of soft
+ * lockups for non-preemptible kernels on huge machines when a lot of memory
+ * is zapped during unmapping.
+ * 10K pages freed at once should be safe even without a preemption point.
+ */
+#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
+
+extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
+ int page_size);
+#endif
+
+/*
+ * struct mmu_gather is an opaque type used by the mm code for passing around
+ * any data needed by arch specific code for tlb_remove_page.
+ */
+struct mmu_gather {
+ struct mm_struct *mm;
+
+#ifdef CONFIG_MMU_GATHER_TABLE_FREE
+ struct mmu_table_batch *batch;
+#endif
+
+ unsigned long start;
+ unsigned long end;
+ /*
+ * we are in the middle of an operation to clear
+ * a full mm and can make some optimizations
+ */
+ unsigned int fullmm : 1;
+
+ /*
+ * we have performed an operation which
+ * requires a complete flush of the tlb
+ */
+ unsigned int need_flush_all : 1;
+
+ /*
+ * we have removed page directories
+ */
+ unsigned int freed_tables : 1;
+
+ /*
+ * at which levels have we cleared entries?
+ */
+ unsigned int cleared_ptes : 1;
+ unsigned int cleared_pmds : 1;
+ unsigned int cleared_puds : 1;
+ unsigned int cleared_p4ds : 1;
+
+ /*
+ * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
+ */
+ unsigned int vma_exec : 1;
+ unsigned int vma_huge : 1;
+ unsigned int vma_pfn : 1;
+
+ unsigned int batch_count;
+
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
+ struct mmu_gather_batch *active;
+ struct mmu_gather_batch local;
+ struct page *__pages[MMU_GATHER_BUNDLE];
+
+#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
+ unsigned int page_size;
+#endif
+#endif
+};
+
+void tlb_flush_mmu(struct mmu_gather *tlb);
+
+static inline void __tlb_adjust_range(struct mmu_gather *tlb,
+ unsigned long address,
+ unsigned int range_size)
+{
+ tlb->start = min(tlb->start, address);
+ tlb->end = max(tlb->end, address + range_size);
+}
+
+static inline void __tlb_reset_range(struct mmu_gather *tlb)
+{
+ if (tlb->fullmm) {
+ tlb->start = tlb->end = ~0;
+ } else {
+ tlb->start = TASK_SIZE;
+ tlb->end = 0;
+ }
+ tlb->freed_tables = 0;
+ tlb->cleared_ptes = 0;
+ tlb->cleared_pmds = 0;
+ tlb->cleared_puds = 0;
+ tlb->cleared_p4ds = 0;
+ /*
+ * Do not reset mmu_gather::vma_* fields here, we do not
+ * call into tlb_start_vma() again to set them if there is an
+ * intermediate flush.
+ */
+}
+
+#ifdef CONFIG_MMU_GATHER_NO_RANGE
+
+#if defined(tlb_flush)
+#error MMU_GATHER_NO_RANGE relies on default tlb_flush()
+#endif
+
+/*
+ * When an architecture does not have efficient means of range flushing TLBs
+ * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
+ * range small. We equally don't have to worry about page granularity or other
+ * things.
+ *
+ * All we need to do is issue a full flush for any !0 range.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ if (tlb->end)
+ flush_tlb_mm(tlb->mm);
+}
+
+#else /* CONFIG_MMU_GATHER_NO_RANGE */
+
+#ifndef tlb_flush
+/*
+ * When an architecture does not provide its own tlb_flush() implementation
+ * but does have a reasonably efficient flush_vma_range() implementation
+ * use that.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ if (tlb->fullmm || tlb->need_flush_all) {
+ flush_tlb_mm(tlb->mm);
+ } else if (tlb->end) {
+ struct vm_area_struct vma = {
+ .vm_mm = tlb->mm,
+ .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
+ (tlb->vma_huge ? VM_HUGETLB : 0),
+ };
+
+ flush_tlb_range(&vma, tlb->start, tlb->end);
+ }
+}
+#endif
+
+#endif /* CONFIG_MMU_GATHER_NO_RANGE */
+
+static inline void
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ /*
+ * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
+ * mips-4k) flush only large pages.
+ *
+ * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
+ * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
+ * range.
+ *
+ * We rely on tlb_end_vma() to issue a flush, such that when we reset
+ * these values the batch is empty.
+ */
+ tlb->vma_huge = is_vm_hugetlb_page(vma);
+ tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
+ tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
+}
+
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+{
+ /*
+ * Anything calling __tlb_adjust_range() also sets at least one of
+ * these bits.
+ */
+ if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
+ tlb->cleared_puds || tlb->cleared_p4ds))
+ return;
+
+ tlb_flush(tlb);
+ mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
+ __tlb_reset_range(tlb);
+}
+
+static inline void tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size)
+{
+ if (__tlb_remove_page_size(tlb, page, page_size))
+ tlb_flush_mmu(tlb);
+}
+
+static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
+}
+
+/* tlb_remove_page
+ * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
+ * required.
+ */
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ return tlb_remove_page_size(tlb, page, PAGE_SIZE);
+}
+
+static inline void tlb_change_page_size(struct mmu_gather *tlb,
+ unsigned int page_size)
+{
+#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
+ if (tlb->page_size && tlb->page_size != page_size) {
+ if (!tlb->fullmm && !tlb->need_flush_all)
+ tlb_flush_mmu(tlb);
+ }
+
+ tlb->page_size = page_size;
+#endif
+}
+
+static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
+{
+ if (tlb->cleared_ptes)
+ return PAGE_SHIFT;
+ if (tlb->cleared_pmds)
+ return PMD_SHIFT;
+ if (tlb->cleared_puds)
+ return PUD_SHIFT;
+ if (tlb->cleared_p4ds)
+ return P4D_SHIFT;
+
+ return PAGE_SHIFT;
+}
+
+static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
+{
+ return 1UL << tlb_get_unmap_shift(tlb);
+}
+
+/*
+ * In the case of tlb vma handling, we can optimise these away in the
+ * case where we're doing a full MM flush. When we're doing a munmap,
+ * the vmas are adjusted to only cover the region to be torn down.
+ */
+static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ if (tlb->fullmm)
+ return;
+
+ tlb_update_vma_flags(tlb, vma);
+#ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
+ flush_cache_range(vma, vma->vm_start, vma->vm_end);
+#endif
+}
+
+static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ if (tlb->fullmm)
+ return;
+
+ /*
+ * VM_PFNMAP is more fragile because the core mm will not track the
+ * page mapcount -- there might not be page-frames for these PFNs after
+ * all. Force flush TLBs for such ranges to avoid munmap() vs
+ * unmap_mapping_range() races.
+ */
+ if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
+ /*
+ * Do a TLB flush and reset the range at VMA boundaries; this avoids
+ * the ranges growing with the unused space between consecutive VMAs.
+ */
+ tlb_flush_mmu_tlbonly(tlb);
+ }
+}
+
+/*
+ * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
+ * and set corresponding cleared_*.
+ */
+static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_ptes = 1;
+}
+
+static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_pmds = 1;
+}
+
+static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_puds = 1;
+}
+
+static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_p4ds = 1;
+}
+
+#ifndef __tlb_remove_tlb_entry
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+#endif
+
+/**
+ * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
+ *
+ * Record the fact that pte's were really unmapped by updating the range,
+ * so we can later optimise away the tlb invalidate. This helps when
+ * userspace is unmapping already-unmapped pages, which happens quite a lot.
+ */
+#define tlb_remove_tlb_entry(tlb, ptep, address) \
+ do { \
+ tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
+ __tlb_remove_tlb_entry(tlb, ptep, address); \
+ } while (0)
+
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ do { \
+ unsigned long _sz = huge_page_size(h); \
+ if (_sz >= P4D_SIZE) \
+ tlb_flush_p4d_range(tlb, address, _sz); \
+ else if (_sz >= PUD_SIZE) \
+ tlb_flush_pud_range(tlb, address, _sz); \
+ else if (_sz >= PMD_SIZE) \
+ tlb_flush_pmd_range(tlb, address, _sz); \
+ else \
+ tlb_flush_pte_range(tlb, address, _sz); \
+ __tlb_remove_tlb_entry(tlb, ptep, address); \
+ } while (0)
+
+/**
+ * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
+ * This is a nop so far, because only x86 needs it.
+ */
+#ifndef __tlb_remove_pmd_tlb_entry
+#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
+#endif
+
+#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
+ do { \
+ tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
+ __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
+ } while (0)
+
+/**
+ * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
+ * invalidation. This is a nop so far, because only x86 needs it.
+ */
+#ifndef __tlb_remove_pud_tlb_entry
+#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
+#endif
+
+#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
+ do { \
+ tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
+ __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
+ } while (0)
+
+/*
+ * For things like page tables caches (ie caching addresses "inside" the
+ * page tables, like x86 does), for legacy reasons, flushing an
+ * individual page had better flush the page table caches behind it. This
+ * is definitely how x86 works, for example. And if you have an
+ * architected non-legacy page table cache (which I'm not aware of
+ * anybody actually doing), you're going to have some architecturally
+ * explicit flushing for that, likely *separate* from a regular TLB entry
+ * flush, and thus you'd need more than just some range expansion..
+ *
+ * So if we ever find an architecture
+ * that would want something that odd, I think it is up to that
+ * architecture to do its own odd thing, not cause pain for others
+ * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
+ *
+ * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
+ */
+
+#ifndef pte_free_tlb
+#define pte_free_tlb(tlb, ptep, address) \
+ do { \
+ tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
+ __pte_free_tlb(tlb, ptep, address); \
+ } while (0)
+#endif
+
+#ifndef pmd_free_tlb
+#define pmd_free_tlb(tlb, pmdp, address) \
+ do { \
+ tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
+ __pmd_free_tlb(tlb, pmdp, address); \
+ } while (0)
+#endif
+
+#ifndef pud_free_tlb
+#define pud_free_tlb(tlb, pudp, address) \
+ do { \
+ tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
+ __pud_free_tlb(tlb, pudp, address); \
+ } while (0)
+#endif
+
+#ifndef p4d_free_tlb
+#define p4d_free_tlb(tlb, pudp, address) \
+ do { \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
+ __p4d_free_tlb(tlb, pudp, address); \
+ } while (0)
+#endif
+
+#ifndef pte_needs_flush
+static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
+{
+ return true;
+}
+#endif
+
+#ifndef huge_pmd_needs_flush
+static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
+{
+ return true;
+}
+#endif
+
+#endif /* CONFIG_MMU */
+
+#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/asm-generic/tlbflush.h b/include/asm-generic/tlbflush.h
new file mode 100644
index 000000000..dc2669289
--- /dev/null
+++ b/include/asm-generic/tlbflush.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_TLBFLUSH_H
+#define __ASM_GENERIC_TLBFLUSH_H
+/*
+ * This is a dummy tlbflush implementation that can be used on all
+ * nommu architectures.
+ * If you have an MMU, you need to write your own functions.
+ */
+#ifdef CONFIG_MMU
+#error need to implement an architecture specific asm/tlbflush.h
+#endif
+
+#include <linux/bug.h>
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ BUG();
+}
+
+
+#endif /* __ASM_GENERIC_TLBFLUSH_H */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
new file mode 100644
index 000000000..4dbe715be
--- /dev/null
+++ b/include/asm-generic/topology.h
@@ -0,0 +1,77 @@
+/*
+ * linux/include/asm-generic/topology.h
+ *
+ * Written by: Matthew Dobson, IBM Corporation
+ *
+ * Copyright (C) 2002, IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <colpatch@us.ibm.com>
+ */
+#ifndef _ASM_GENERIC_TOPOLOGY_H
+#define _ASM_GENERIC_TOPOLOGY_H
+
+#ifndef CONFIG_NUMA
+
+/* Other architectures wishing to use this simple topology API should fill
+ in the below functions as appropriate in their own <asm/topology.h> file. */
+#ifndef cpu_to_node
+#define cpu_to_node(cpu) ((void)(cpu),0)
+#endif
+#ifndef set_numa_node
+#define set_numa_node(node)
+#endif
+#ifndef set_cpu_numa_node
+#define set_cpu_numa_node(cpu, node)
+#endif
+#ifndef cpu_to_mem
+#define cpu_to_mem(cpu) ((void)(cpu),0)
+#endif
+
+#ifndef cpumask_of_node
+ #ifdef CONFIG_NUMA
+ #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
+ #else
+ #define cpumask_of_node(node) ((void)(node), cpu_online_mask)
+ #endif
+#endif
+#ifndef pcibus_to_node
+#define pcibus_to_node(bus) ((void)(bus), -1)
+#endif
+
+#ifndef cpumask_of_pcibus
+#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
+ cpu_all_mask : \
+ cpumask_of_node(pcibus_to_node(bus)))
+#endif
+
+#endif /* CONFIG_NUMA */
+
+#if !defined(CONFIG_NUMA) || !defined(CONFIG_HAVE_MEMORYLESS_NODES)
+
+#ifndef set_numa_mem
+#define set_numa_mem(node)
+#endif
+#ifndef set_cpu_numa_mem
+#define set_cpu_numa_mem(cpu, node)
+#endif
+
+#endif /* !CONFIG_NUMA || !CONFIG_HAVE_MEMORYLESS_NODES */
+
+#endif /* _ASM_GENERIC_TOPOLOGY_H */
diff --git a/include/asm-generic/trace_clock.h b/include/asm-generic/trace_clock.h
new file mode 100644
index 000000000..cbbca2959
--- /dev/null
+++ b/include/asm-generic/trace_clock.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_TRACE_CLOCK_H
+#define _ASM_GENERIC_TRACE_CLOCK_H
+/*
+ * Arch-specific trace clocks.
+ */
+
+/*
+ * Additional trace clocks added to the trace_clocks
+ * array in kernel/trace/trace.c
+ * None if the architecture has not defined it.
+ */
+#ifndef ARCH_TRACE_CLOCKS
+# define ARCH_TRACE_CLOCKS
+#endif
+
+#endif /* _ASM_GENERIC_TRACE_CLOCK_H */
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
new file mode 100644
index 000000000..a5be9e61a
--- /dev/null
+++ b/include/asm-generic/uaccess.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_UACCESS_H
+#define __ASM_GENERIC_UACCESS_H
+
+/*
+ * User space memory access functions, these should work
+ * on any machine that has kernel and user data in the same
+ * address space, e.g. all NOMMU machines.
+ */
+#include <linux/string.h>
+#include <asm-generic/access_ok.h>
+
+#ifdef CONFIG_UACCESS_MEMCPY
+#include <asm/unaligned.h>
+
+static __always_inline int
+__get_user_fn(size_t size, const void __user *from, void *to)
+{
+ BUILD_BUG_ON(!__builtin_constant_p(size));
+
+ switch (size) {
+ case 1:
+ *(u8 *)to = *((u8 __force *)from);
+ return 0;
+ case 2:
+ *(u16 *)to = get_unaligned((u16 __force *)from);
+ return 0;
+ case 4:
+ *(u32 *)to = get_unaligned((u32 __force *)from);
+ return 0;
+ case 8:
+ *(u64 *)to = get_unaligned((u64 __force *)from);
+ return 0;
+ default:
+ BUILD_BUG();
+ return 0;
+ }
+
+}
+#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
+
+static __always_inline int
+__put_user_fn(size_t size, void __user *to, void *from)
+{
+ BUILD_BUG_ON(!__builtin_constant_p(size));
+
+ switch (size) {
+ case 1:
+ *(u8 __force *)to = *(u8 *)from;
+ return 0;
+ case 2:
+ put_unaligned(*(u16 *)from, (u16 __force *)to);
+ return 0;
+ case 4:
+ put_unaligned(*(u32 *)from, (u32 __force *)to);
+ return 0;
+ case 8:
+ put_unaligned(*(u64 *)from, (u64 __force *)to);
+ return 0;
+ default:
+ BUILD_BUG();
+ return 0;
+ }
+}
+#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
+
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ *((type *)dst) = get_unaligned((type *)(src)); \
+ if (0) /* make sure the label looks used to the compiler */ \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ put_unaligned(*((type *)src), (type *)(dst)); \
+ if (0) /* make sure the label looks used to the compiler */ \
+ goto err_label; \
+} while (0)
+
+static inline __must_check unsigned long
+raw_copy_from_user(void *to, const void __user * from, unsigned long n)
+{
+ memcpy(to, (const void __force *)from, n);
+ return 0;
+}
+
+static inline __must_check unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ memcpy((void __force *)to, from, n);
+ return 0;
+}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+#endif /* CONFIG_UACCESS_MEMCPY */
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ * This version just falls back to copy_{from,to}_user, which should
+ * provide a fast-path for small values.
+ */
+#define __put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __x = (x); \
+ int __pu_err = -EFAULT; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof (*(ptr))) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ case 8: \
+ __pu_err = __put_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ break; \
+ default: \
+ __put_user_bad(); \
+ break; \
+ } \
+ __pu_err; \
+})
+
+#define put_user(x, ptr) \
+({ \
+ void __user *__p = (ptr); \
+ might_fault(); \
+ access_ok(__p, sizeof(*ptr)) ? \
+ __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \
+ -EFAULT; \
+})
+
+#ifndef __put_user_fn
+
+static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
+{
+ return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
+}
+
+#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
+
+#endif
+
+extern int __put_user_bad(void) __attribute__((noreturn));
+
+#define __get_user(x, ptr) \
+({ \
+ int __gu_err = -EFAULT; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: { \
+ unsigned char __x = 0; \
+ __gu_err = __get_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 2: { \
+ unsigned short __x = 0; \
+ __gu_err = __get_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 4: { \
+ unsigned int __x = 0; \
+ __gu_err = __get_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 8: { \
+ unsigned long long __x = 0; \
+ __gu_err = __get_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ default: \
+ __get_user_bad(); \
+ break; \
+ } \
+ __gu_err; \
+})
+
+#define get_user(x, ptr) \
+({ \
+ const void __user *__p = (ptr); \
+ might_fault(); \
+ access_ok(__p, sizeof(*ptr)) ? \
+ __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
+ ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
+})
+
+#ifndef __get_user_fn
+static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
+{
+ return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
+}
+
+#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
+
+#endif
+
+extern int __get_user_bad(void) __attribute__((noreturn));
+
+/*
+ * Zero Userspace
+ */
+#ifndef __clear_user
+static inline __must_check unsigned long
+__clear_user(void __user *to, unsigned long n)
+{
+ memset((void __force *)to, 0, n);
+ return 0;
+}
+#endif
+
+static inline __must_check unsigned long
+clear_user(void __user *to, unsigned long n)
+{
+ might_fault();
+ if (!access_ok(to, n))
+ return n;
+
+ return __clear_user(to, n);
+}
+
+#include <asm/extable.h>
+
+__must_check long strncpy_from_user(char *dst, const char __user *src,
+ long count);
+__must_check long strnlen_user(const char __user *src, long n);
+
+#endif /* __ASM_GENERIC_UACCESS_H */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
new file mode 100644
index 000000000..699650f81
--- /dev/null
+++ b/include/asm-generic/unaligned.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_UNALIGNED_H
+#define __ASM_GENERIC_UNALIGNED_H
+
+/*
+ * This is the most generic implementation of unaligned accesses
+ * and should work almost anywhere.
+ */
+#include <linux/unaligned/packed_struct.h>
+#include <asm/byteorder.h>
+
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define __put_unaligned_t(type, val, ptr) do { \
+ struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x = (val); \
+} while (0)
+
+#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
+#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+ return le16_to_cpu(__get_unaligned_t(__le16, p));
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+ return le32_to_cpu(__get_unaligned_t(__le32, p));
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+ return le64_to_cpu(__get_unaligned_t(__le64, p));
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+ __put_unaligned_t(__le16, cpu_to_le16(val), p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+ __put_unaligned_t(__le32, cpu_to_le32(val), p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+ __put_unaligned_t(__le64, cpu_to_le64(val), p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ return be16_to_cpu(__get_unaligned_t(__be16, p));
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+ return be32_to_cpu(__get_unaligned_t(__be32, p));
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+ return be64_to_cpu(__get_unaligned_t(__be64, p));
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+ __put_unaligned_t(__be16, cpu_to_be16(val), p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+ __put_unaligned_t(__be32, cpu_to_be32(val), p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+ __put_unaligned_t(__be64, cpu_to_be64(val), p);
+}
+
+static inline u32 __get_unaligned_be24(const u8 *p)
+{
+ return p[0] << 16 | p[1] << 8 | p[2];
+}
+
+static inline u32 get_unaligned_be24(const void *p)
+{
+ return __get_unaligned_be24(p);
+}
+
+static inline u32 __get_unaligned_le24(const u8 *p)
+{
+ return p[0] | p[1] << 8 | p[2] << 16;
+}
+
+static inline u32 get_unaligned_le24(const void *p)
+{
+ return __get_unaligned_le24(p);
+}
+
+static inline void __put_unaligned_be24(const u32 val, u8 *p)
+{
+ *p++ = val >> 16;
+ *p++ = val >> 8;
+ *p++ = val;
+}
+
+static inline void put_unaligned_be24(const u32 val, void *p)
+{
+ __put_unaligned_be24(val, p);
+}
+
+static inline void __put_unaligned_le24(const u32 val, u8 *p)
+{
+ *p++ = val;
+ *p++ = val >> 8;
+ *p++ = val >> 16;
+}
+
+static inline void put_unaligned_le24(const u32 val, void *p)
+{
+ __put_unaligned_le24(val, p);
+}
+
+static inline void __put_unaligned_be48(const u64 val, u8 *p)
+{
+ *p++ = val >> 40;
+ *p++ = val >> 32;
+ *p++ = val >> 24;
+ *p++ = val >> 16;
+ *p++ = val >> 8;
+ *p++ = val;
+}
+
+static inline void put_unaligned_be48(const u64 val, void *p)
+{
+ __put_unaligned_be48(val, p);
+}
+
+static inline u64 __get_unaligned_be48(const u8 *p)
+{
+ return (u64)p[0] << 40 | (u64)p[1] << 32 | (u64)p[2] << 24 |
+ p[3] << 16 | p[4] << 8 | p[5];
+}
+
+static inline u64 get_unaligned_be48(const void *p)
+{
+ return __get_unaligned_be48(p);
+}
+
+#endif /* __ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-generic/user.h b/include/asm-generic/user.h
new file mode 100644
index 000000000..35638c347
--- /dev/null
+++ b/include/asm-generic/user.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_GENERIC_USER_H
+#define __ASM_GENERIC_USER_H
+/*
+ * This file may define a 'struct user' structure. However, it is only
+ * used for a.out files, which are not supported on new architectures.
+ */
+
+#endif /* __ASM_GENERIC_USER_H */
diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h
new file mode 100644
index 000000000..c835607f7
--- /dev/null
+++ b/include/asm-generic/vdso/vsyscall.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_VSYSCALL_H
+#define __ASM_GENERIC_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#ifndef __arch_get_k_vdso_data
+static __always_inline struct vdso_data *__arch_get_k_vdso_data(void)
+{
+ return NULL;
+}
+#endif /* __arch_get_k_vdso_data */
+
+#ifndef __arch_update_vsyscall
+static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata,
+ struct timekeeper *tk)
+{
+}
+#endif /* __arch_update_vsyscall */
+
+#ifndef __arch_sync_vdso_data
+static __always_inline void __arch_sync_vdso_data(struct vdso_data *vdata)
+{
+}
+#endif /* __arch_sync_vdso_data */
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_GENERIC_VSYSCALL_H */
diff --git a/include/asm-generic/vermagic.h b/include/asm-generic/vermagic.h
new file mode 100644
index 000000000..084274a12
--- /dev/null
+++ b/include/asm-generic/vermagic.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_GENERIC_VERMAGIC_H
+#define _ASM_GENERIC_VERMAGIC_H
+
+#define MODULE_ARCH_VERMAGIC ""
+
+#endif /* _ASM_GENERIC_VERMAGIC_H */
diff --git a/include/asm-generic/vga.h b/include/asm-generic/vga.h
new file mode 100644
index 000000000..adf91a783
--- /dev/null
+++ b/include/asm-generic/vga.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ */
+#ifndef __ASM_GENERIC_VGA_H
+#define __ASM_GENERIC_VGA_H
+
+/*
+ * On most architectures that support VGA, we can just
+ * recalculate addresses and then access the videoram
+ * directly without any black magic.
+ *
+ * Everyone else needs to ioremap the address and use
+ * proper I/O accesses.
+ */
+#ifndef VGA_MAP_MEM
+#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x)
+#endif
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x, y) (*(y) = (x))
+
+#endif /* _ASM_GENERIC_VGA_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
new file mode 100644
index 000000000..7ad6f51b3
--- /dev/null
+++ b/include/asm-generic/vmlinux.lds.h
@@ -0,0 +1,1192 @@
+/*
+ * Helper macros to support writing architecture specific
+ * linker scripts.
+ *
+ * A minimal linker scripts has following content:
+ * [This is a sample, architectures may have special requiriements]
+ *
+ * OUTPUT_FORMAT(...)
+ * OUTPUT_ARCH(...)
+ * ENTRY(...)
+ * SECTIONS
+ * {
+ * . = START;
+ * __init_begin = .;
+ * HEAD_TEXT_SECTION
+ * INIT_TEXT_SECTION(PAGE_SIZE)
+ * INIT_DATA_SECTION(...)
+ * PERCPU_SECTION(CACHELINE_SIZE)
+ * __init_end = .;
+ *
+ * _stext = .;
+ * TEXT_SECTION = 0
+ * _etext = .;
+ *
+ * _sdata = .;
+ * RO_DATA(PAGE_SIZE)
+ * RW_DATA(...)
+ * _edata = .;
+ *
+ * EXCEPTION_TABLE(...)
+ *
+ * BSS_SECTION(0, 0, 0)
+ * _end = .;
+ *
+ * STABS_DEBUG
+ * DWARF_DEBUG
+ * ELF_DETAILS
+ *
+ * DISCARDS // must be the last
+ * }
+ *
+ * [__init_begin, __init_end] is the init section that may be freed after init
+ * // __init_begin and __init_end should be page aligned, so that we can
+ * // free the whole .init memory
+ * [_stext, _etext] is the text section
+ * [_sdata, _edata] is the data section
+ *
+ * Some of the included output section have their own set of constants.
+ * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
+ * [__nosave_begin, __nosave_end] for the nosave data
+ */
+
+#ifndef LOAD_OFFSET
+#define LOAD_OFFSET 0
+#endif
+
+/*
+ * Only some architectures want to have the .notes segment visible in
+ * a separate PT_NOTE ELF Program Header. When this happens, it needs
+ * to be visible in both the kernel text's PT_LOAD and the PT_NOTE
+ * Program Headers. In this case, though, the PT_LOAD needs to be made
+ * the default again so that all the following sections don't also end
+ * up in the PT_NOTE Program Header.
+ */
+#ifdef EMITS_PT_NOTE
+#define NOTES_HEADERS :text :note
+#define NOTES_HEADERS_RESTORE __restore_ph : { *(.__restore_ph) } :text
+#else
+#define NOTES_HEADERS
+#define NOTES_HEADERS_RESTORE
+#endif
+
+/*
+ * Some architectures have non-executable read-only exception tables.
+ * They can be added to the RO_DATA segment by specifying their desired
+ * alignment.
+ */
+#ifdef RO_EXCEPTION_TABLE_ALIGN
+#define RO_EXCEPTION_TABLE EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN)
+#else
+#define RO_EXCEPTION_TABLE
+#endif
+
+/* Align . to a 8 byte boundary equals to maximum function alignment. */
+#define ALIGN_FUNCTION() . = ALIGN(8)
+
+/*
+ * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
+ * generates .data.identifier sections, which need to be pulled in with
+ * .data. We don't want to pull in .data..other sections, which Linux
+ * has defined. Same for text and bss.
+ *
+ * With LTO_CLANG, the linker also splits sections by default, so we need
+ * these macros to combine the sections during the final link.
+ *
+ * RODATA_MAIN is not used because existing code already defines .rodata.x
+ * sections to be brought in with rodata.
+ */
+#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG)
+#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
+#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
+#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L*
+#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral*
+#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
+#else
+#define TEXT_MAIN .text
+#define DATA_MAIN .data
+#define SDATA_MAIN .sdata
+#define RODATA_MAIN .rodata
+#define BSS_MAIN .bss
+#define SBSS_MAIN .sbss
+#endif
+
+/*
+ * GCC 4.5 and later have a 32 bytes section alignment for structures.
+ * Except GCC 4.9, that feels the need to align on 64 bytes.
+ */
+#define STRUCT_ALIGNMENT 32
+#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
+
+/*
+ * The order of the sched class addresses are important, as they are
+ * used to determine the order of the priority of each sched class in
+ * relation to each other.
+ */
+#define SCHED_DATA \
+ STRUCT_ALIGN(); \
+ __sched_class_highest = .; \
+ *(__stop_sched_class) \
+ *(__dl_sched_class) \
+ *(__rt_sched_class) \
+ *(__fair_sched_class) \
+ *(__idle_sched_class) \
+ __sched_class_lowest = .;
+
+/* The actual configuration determine if the init/exit sections
+ * are handled as text/data or they can be discarded (which
+ * often happens at runtime)
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+#define CPU_KEEP(sec) *(.cpu##sec)
+#define CPU_DISCARD(sec)
+#else
+#define CPU_KEEP(sec)
+#define CPU_DISCARD(sec) *(.cpu##sec)
+#endif
+
+#if defined(CONFIG_MEMORY_HOTPLUG)
+#define MEM_KEEP(sec) *(.mem##sec)
+#define MEM_DISCARD(sec)
+#else
+#define MEM_KEEP(sec)
+#define MEM_DISCARD(sec) *(.mem##sec)
+#endif
+
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
+#define KEEP_PATCHABLE KEEP(*(__patchable_function_entries))
+#define PATCHABLE_DISCARDS
+#else
+#define KEEP_PATCHABLE
+#define PATCHABLE_DISCARDS *(__patchable_function_entries)
+#endif
+
+#ifndef CONFIG_ARCH_SUPPORTS_CFI_CLANG
+/*
+ * Simply points to ftrace_stub, but with the proper protocol.
+ * Defined by the linker script in linux/vmlinux.lds.h
+ */
+#define FTRACE_STUB_HACK ftrace_stub_graph = ftrace_stub;
+#else
+#define FTRACE_STUB_HACK
+#endif
+
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+/*
+ * The ftrace call sites are logged to a section whose name depends on the
+ * compiler option used. A given kernel image will only use one, AKA
+ * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
+ * dependencies for FTRACE_CALLSITE_SECTION's definition.
+ *
+ * ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func
+ * as some archs will have a different prototype for that function
+ * but ftrace_ops_list_func() will have a single prototype.
+ */
+#define MCOUNT_REC() . = ALIGN(8); \
+ __start_mcount_loc = .; \
+ KEEP(*(__mcount_loc)) \
+ KEEP_PATCHABLE \
+ __stop_mcount_loc = .; \
+ FTRACE_STUB_HACK \
+ ftrace_ops_list_func = arch_ftrace_ops_list_func;
+#else
+# ifdef CONFIG_FUNCTION_TRACER
+# define MCOUNT_REC() FTRACE_STUB_HACK \
+ ftrace_ops_list_func = arch_ftrace_ops_list_func;
+# else
+# define MCOUNT_REC()
+# endif
+#endif
+
+#ifdef CONFIG_TRACE_BRANCH_PROFILING
+#define LIKELY_PROFILE() __start_annotated_branch_profile = .; \
+ KEEP(*(_ftrace_annotated_branch)) \
+ __stop_annotated_branch_profile = .;
+#else
+#define LIKELY_PROFILE()
+#endif
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+#define BRANCH_PROFILE() __start_branch_profile = .; \
+ KEEP(*(_ftrace_branch)) \
+ __stop_branch_profile = .;
+#else
+#define BRANCH_PROFILE()
+#endif
+
+#ifdef CONFIG_KPROBES
+#define KPROBE_BLACKLIST() . = ALIGN(8); \
+ __start_kprobe_blacklist = .; \
+ KEEP(*(_kprobe_blacklist)) \
+ __stop_kprobe_blacklist = .;
+#else
+#define KPROBE_BLACKLIST()
+#endif
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \
+ __start_error_injection_whitelist = .; \
+ KEEP(*(_error_injection_whitelist)) \
+ __stop_error_injection_whitelist = .;
+#else
+#define ERROR_INJECT_WHITELIST()
+#endif
+
+#ifdef CONFIG_EVENT_TRACING
+#define FTRACE_EVENTS() . = ALIGN(8); \
+ __start_ftrace_events = .; \
+ KEEP(*(_ftrace_events)) \
+ __stop_ftrace_events = .; \
+ __start_ftrace_eval_maps = .; \
+ KEEP(*(_ftrace_eval_map)) \
+ __stop_ftrace_eval_maps = .;
+#else
+#define FTRACE_EVENTS()
+#endif
+
+#ifdef CONFIG_TRACING
+#define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \
+ KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
+ __stop___trace_bprintk_fmt = .;
+#define TRACEPOINT_STR() __start___tracepoint_str = .; \
+ KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
+ __stop___tracepoint_str = .;
+#else
+#define TRACE_PRINTKS()
+#define TRACEPOINT_STR()
+#endif
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+#define TRACE_SYSCALLS() . = ALIGN(8); \
+ __start_syscalls_metadata = .; \
+ KEEP(*(__syscalls_metadata)) \
+ __stop_syscalls_metadata = .;
+#else
+#define TRACE_SYSCALLS()
+#endif
+
+#ifdef CONFIG_BPF_EVENTS
+#define BPF_RAW_TP() STRUCT_ALIGN(); \
+ __start__bpf_raw_tp = .; \
+ KEEP(*(__bpf_raw_tp_map)) \
+ __stop__bpf_raw_tp = .;
+#else
+#define BPF_RAW_TP()
+#endif
+
+#ifdef CONFIG_SERIAL_EARLYCON
+#define EARLYCON_TABLE() . = ALIGN(8); \
+ __earlycon_table = .; \
+ KEEP(*(__earlycon_table)) \
+ __earlycon_table_end = .;
+#else
+#define EARLYCON_TABLE()
+#endif
+
+#ifdef CONFIG_SECURITY
+#define LSM_TABLE() . = ALIGN(8); \
+ __start_lsm_info = .; \
+ KEEP(*(.lsm_info.init)) \
+ __end_lsm_info = .;
+#define EARLY_LSM_TABLE() . = ALIGN(8); \
+ __start_early_lsm_info = .; \
+ KEEP(*(.early_lsm_info.init)) \
+ __end_early_lsm_info = .;
+#else
+#define LSM_TABLE()
+#define EARLY_LSM_TABLE()
+#endif
+
+#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
+#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
+#define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name)
+#define _OF_TABLE_0(name)
+#define _OF_TABLE_1(name) \
+ . = ALIGN(8); \
+ __##name##_of_table = .; \
+ KEEP(*(__##name##_of_table)) \
+ KEEP(*(__##name##_of_table_end))
+
+#define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer)
+#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
+#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
+#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
+#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
+#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
+
+#ifdef CONFIG_ACPI
+#define ACPI_PROBE_TABLE(name) \
+ . = ALIGN(8); \
+ __##name##_acpi_probe_table = .; \
+ KEEP(*(__##name##_acpi_probe_table)) \
+ __##name##_acpi_probe_table_end = .;
+#else
+#define ACPI_PROBE_TABLE(name)
+#endif
+
+#ifdef CONFIG_THERMAL
+#define THERMAL_TABLE(name) \
+ . = ALIGN(8); \
+ __##name##_thermal_table = .; \
+ KEEP(*(__##name##_thermal_table)) \
+ __##name##_thermal_table_end = .;
+#else
+#define THERMAL_TABLE(name)
+#endif
+
+#define KERNEL_DTB() \
+ STRUCT_ALIGN(); \
+ __dtb_start = .; \
+ KEEP(*(.dtb.init.rodata)) \
+ __dtb_end = .;
+
+/*
+ * .data section
+ */
+#define DATA_DATA \
+ *(.xiptext) \
+ *(DATA_MAIN) \
+ *(.data..decrypted) \
+ *(.ref.data) \
+ *(.data..shared_aligned) /* percpu related */ \
+ MEM_KEEP(init.data*) \
+ MEM_KEEP(exit.data*) \
+ *(.data.unlikely) \
+ __start_once = .; \
+ *(.data.once) \
+ __end_once = .; \
+ STRUCT_ALIGN(); \
+ *(__tracepoints) \
+ /* implement dynamic printk debug */ \
+ . = ALIGN(8); \
+ __start___dyndbg_classes = .; \
+ KEEP(*(__dyndbg_classes)) \
+ __stop___dyndbg_classes = .; \
+ __start___dyndbg = .; \
+ KEEP(*(__dyndbg)) \
+ __stop___dyndbg = .; \
+ LIKELY_PROFILE() \
+ BRANCH_PROFILE() \
+ TRACE_PRINTKS() \
+ BPF_RAW_TP() \
+ TRACEPOINT_STR()
+
+/*
+ * Data section helpers
+ */
+#define NOSAVE_DATA \
+ . = ALIGN(PAGE_SIZE); \
+ __nosave_begin = .; \
+ *(.data..nosave) \
+ . = ALIGN(PAGE_SIZE); \
+ __nosave_end = .;
+
+#define PAGE_ALIGNED_DATA(page_align) \
+ . = ALIGN(page_align); \
+ *(.data..page_aligned) \
+ . = ALIGN(page_align);
+
+#define READ_MOSTLY_DATA(align) \
+ . = ALIGN(align); \
+ *(.data..read_mostly) \
+ . = ALIGN(align);
+
+#define CACHELINE_ALIGNED_DATA(align) \
+ . = ALIGN(align); \
+ *(.data..cacheline_aligned)
+
+#define INIT_TASK_DATA(align) \
+ . = ALIGN(align); \
+ __start_init_task = .; \
+ init_thread_union = .; \
+ init_stack = .; \
+ KEEP(*(.data..init_task)) \
+ KEEP(*(.data..init_thread_info)) \
+ . = __start_init_task + THREAD_SIZE; \
+ __end_init_task = .;
+
+#define JUMP_TABLE_DATA \
+ . = ALIGN(8); \
+ __start___jump_table = .; \
+ KEEP(*(__jump_table)) \
+ __stop___jump_table = .;
+
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+#define STATIC_CALL_DATA \
+ . = ALIGN(8); \
+ __start_static_call_sites = .; \
+ KEEP(*(.static_call_sites)) \
+ __stop_static_call_sites = .; \
+ __start_static_call_tramp_key = .; \
+ KEEP(*(.static_call_tramp_key)) \
+ __stop_static_call_tramp_key = .;
+#else
+#define STATIC_CALL_DATA
+#endif
+
+/*
+ * Allow architectures to handle ro_after_init data on their
+ * own by defining an empty RO_AFTER_INIT_DATA.
+ */
+#ifndef RO_AFTER_INIT_DATA
+#define RO_AFTER_INIT_DATA \
+ . = ALIGN(8); \
+ __start_ro_after_init = .; \
+ *(.data..ro_after_init) \
+ JUMP_TABLE_DATA \
+ STATIC_CALL_DATA \
+ __end_ro_after_init = .;
+#endif
+
+/*
+ * .kcfi_traps contains a list KCFI trap locations.
+ */
+#ifndef KCFI_TRAPS
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+#define KCFI_TRAPS \
+ __kcfi_traps : AT(ADDR(__kcfi_traps) - LOAD_OFFSET) { \
+ __start___kcfi_traps = .; \
+ KEEP(*(.kcfi_traps)) \
+ __stop___kcfi_traps = .; \
+ }
+#else
+#define KCFI_TRAPS
+#endif
+#endif
+
+/*
+ * Read only Data
+ */
+#define RO_DATA(align) \
+ . = ALIGN((align)); \
+ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
+ __start_rodata = .; \
+ *(.rodata) *(.rodata.*) \
+ SCHED_DATA \
+ RO_AFTER_INIT_DATA /* Read only after init */ \
+ . = ALIGN(8); \
+ __start___tracepoints_ptrs = .; \
+ KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
+ __stop___tracepoints_ptrs = .; \
+ *(__tracepoints_strings)/* Tracepoints: strings */ \
+ } \
+ \
+ .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
+ *(.rodata1) \
+ } \
+ \
+ /* PCI quirks */ \
+ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
+ __start_pci_fixups_early = .; \
+ KEEP(*(.pci_fixup_early)) \
+ __end_pci_fixups_early = .; \
+ __start_pci_fixups_header = .; \
+ KEEP(*(.pci_fixup_header)) \
+ __end_pci_fixups_header = .; \
+ __start_pci_fixups_final = .; \
+ KEEP(*(.pci_fixup_final)) \
+ __end_pci_fixups_final = .; \
+ __start_pci_fixups_enable = .; \
+ KEEP(*(.pci_fixup_enable)) \
+ __end_pci_fixups_enable = .; \
+ __start_pci_fixups_resume = .; \
+ KEEP(*(.pci_fixup_resume)) \
+ __end_pci_fixups_resume = .; \
+ __start_pci_fixups_resume_early = .; \
+ KEEP(*(.pci_fixup_resume_early)) \
+ __end_pci_fixups_resume_early = .; \
+ __start_pci_fixups_suspend = .; \
+ KEEP(*(.pci_fixup_suspend)) \
+ __end_pci_fixups_suspend = .; \
+ __start_pci_fixups_suspend_late = .; \
+ KEEP(*(.pci_fixup_suspend_late)) \
+ __end_pci_fixups_suspend_late = .; \
+ } \
+ \
+ FW_LOADER_BUILT_IN_DATA \
+ TRACEDATA \
+ \
+ PRINTK_INDEX \
+ \
+ /* Kernel symbol table: Normal symbols */ \
+ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
+ __start___ksymtab = .; \
+ KEEP(*(SORT(___ksymtab+*))) \
+ __stop___ksymtab = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only symbols */ \
+ __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
+ __start___ksymtab_gpl = .; \
+ KEEP(*(SORT(___ksymtab_gpl+*))) \
+ __stop___ksymtab_gpl = .; \
+ } \
+ \
+ /* Kernel symbol table: Normal symbols */ \
+ __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
+ __start___kcrctab = .; \
+ KEEP(*(SORT(___kcrctab+*))) \
+ __stop___kcrctab = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only symbols */ \
+ __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
+ __start___kcrctab_gpl = .; \
+ KEEP(*(SORT(___kcrctab_gpl+*))) \
+ __stop___kcrctab_gpl = .; \
+ } \
+ \
+ /* Kernel symbol table: strings */ \
+ __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
+ *(__ksymtab_strings) \
+ } \
+ \
+ /* __*init sections */ \
+ __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
+ *(.ref.rodata) \
+ MEM_KEEP(init.rodata) \
+ MEM_KEEP(exit.rodata) \
+ } \
+ \
+ /* Built-in module parameters. */ \
+ __param : AT(ADDR(__param) - LOAD_OFFSET) { \
+ __start___param = .; \
+ KEEP(*(__param)) \
+ __stop___param = .; \
+ } \
+ \
+ /* Built-in module versions. */ \
+ __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
+ __start___modver = .; \
+ KEEP(*(__modver)) \
+ __stop___modver = .; \
+ } \
+ \
+ KCFI_TRAPS \
+ \
+ RO_EXCEPTION_TABLE \
+ NOTES \
+ BTF \
+ \
+ . = ALIGN((align)); \
+ __end_rodata = .;
+
+
+/*
+ * Non-instrumentable text section
+ */
+#define NOINSTR_TEXT \
+ ALIGN_FUNCTION(); \
+ __noinstr_text_start = .; \
+ *(.noinstr.text) \
+ __noinstr_text_end = .;
+
+/*
+ * .text section. Map to function alignment to avoid address changes
+ * during second ld run in second ld pass when generating System.map
+ *
+ * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
+ * code elimination is enabled, so these sections should be converted
+ * to use ".." first.
+ */
+#define TEXT_TEXT \
+ ALIGN_FUNCTION(); \
+ *(.text.hot .text.hot.*) \
+ *(TEXT_MAIN .text.fixup) \
+ *(.text.unlikely .text.unlikely.*) \
+ *(.text.unknown .text.unknown.*) \
+ NOINSTR_TEXT \
+ *(.text..refcount) \
+ *(.ref.text) \
+ *(.text.asan.* .text.tsan.*) \
+ MEM_KEEP(init.text*) \
+ MEM_KEEP(exit.text*) \
+
+
+/* sched.text is aling to function alignment to secure we have same
+ * address even at second ld pass when generating System.map */
+#define SCHED_TEXT \
+ ALIGN_FUNCTION(); \
+ __sched_text_start = .; \
+ *(.sched.text) \
+ __sched_text_end = .;
+
+/* spinlock.text is aling to function alignment to secure we have same
+ * address even at second ld pass when generating System.map */
+#define LOCK_TEXT \
+ ALIGN_FUNCTION(); \
+ __lock_text_start = .; \
+ *(.spinlock.text) \
+ __lock_text_end = .;
+
+#define CPUIDLE_TEXT \
+ ALIGN_FUNCTION(); \
+ __cpuidle_text_start = .; \
+ *(.cpuidle.text) \
+ __cpuidle_text_end = .;
+
+#define KPROBES_TEXT \
+ ALIGN_FUNCTION(); \
+ __kprobes_text_start = .; \
+ *(.kprobes.text) \
+ __kprobes_text_end = .;
+
+#define ENTRY_TEXT \
+ ALIGN_FUNCTION(); \
+ __entry_text_start = .; \
+ *(.entry.text) \
+ __entry_text_end = .;
+
+#define IRQENTRY_TEXT \
+ ALIGN_FUNCTION(); \
+ __irqentry_text_start = .; \
+ *(.irqentry.text) \
+ __irqentry_text_end = .;
+
+#define SOFTIRQENTRY_TEXT \
+ ALIGN_FUNCTION(); \
+ __softirqentry_text_start = .; \
+ *(.softirqentry.text) \
+ __softirqentry_text_end = .;
+
+#define STATIC_CALL_TEXT \
+ ALIGN_FUNCTION(); \
+ __static_call_text_start = .; \
+ *(.static_call.text) \
+ __static_call_text_end = .;
+
+/* Section used for early init (in .S files) */
+#define HEAD_TEXT KEEP(*(.head.text))
+
+#define HEAD_TEXT_SECTION \
+ .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
+ HEAD_TEXT \
+ }
+
+/*
+ * Exception table
+ */
+#define EXCEPTION_TABLE(align) \
+ . = ALIGN(align); \
+ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
+ __start___ex_table = .; \
+ KEEP(*(__ex_table)) \
+ __stop___ex_table = .; \
+ }
+
+/*
+ * .BTF
+ */
+#ifdef CONFIG_DEBUG_INFO_BTF
+#define BTF \
+ .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
+ __start_BTF = .; \
+ KEEP(*(.BTF)) \
+ __stop_BTF = .; \
+ } \
+ . = ALIGN(4); \
+ .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \
+ *(.BTF_ids) \
+ }
+#else
+#define BTF
+#endif
+
+/*
+ * Init task
+ */
+#define INIT_TASK_DATA_SECTION(align) \
+ . = ALIGN(align); \
+ .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
+ INIT_TASK_DATA(align) \
+ }
+
+#ifdef CONFIG_CONSTRUCTORS
+#define KERNEL_CTORS() . = ALIGN(8); \
+ __ctors_start = .; \
+ KEEP(*(SORT(.ctors.*))) \
+ KEEP(*(.ctors)) \
+ KEEP(*(SORT(.init_array.*))) \
+ KEEP(*(.init_array)) \
+ __ctors_end = .;
+#else
+#define KERNEL_CTORS()
+#endif
+
+/* init and exit section handling */
+#define INIT_DATA \
+ KEEP(*(SORT(___kentry+*))) \
+ *(.init.data init.data.*) \
+ MEM_DISCARD(init.data*) \
+ KERNEL_CTORS() \
+ MCOUNT_REC() \
+ *(.init.rodata .init.rodata.*) \
+ FTRACE_EVENTS() \
+ TRACE_SYSCALLS() \
+ KPROBE_BLACKLIST() \
+ ERROR_INJECT_WHITELIST() \
+ MEM_DISCARD(init.rodata) \
+ CLK_OF_TABLES() \
+ RESERVEDMEM_OF_TABLES() \
+ TIMER_OF_TABLES() \
+ CPU_METHOD_OF_TABLES() \
+ CPUIDLE_METHOD_OF_TABLES() \
+ KERNEL_DTB() \
+ IRQCHIP_OF_MATCH_TABLE() \
+ ACPI_PROBE_TABLE(irqchip) \
+ ACPI_PROBE_TABLE(timer) \
+ THERMAL_TABLE(governor) \
+ EARLYCON_TABLE() \
+ LSM_TABLE() \
+ EARLY_LSM_TABLE() \
+ KUNIT_TABLE()
+
+#define INIT_TEXT \
+ *(.init.text .init.text.*) \
+ *(.text.startup) \
+ MEM_DISCARD(init.text*)
+
+#define EXIT_DATA \
+ *(.exit.data .exit.data.*) \
+ *(.fini_array .fini_array.*) \
+ *(.dtors .dtors.*) \
+ MEM_DISCARD(exit.data*) \
+ MEM_DISCARD(exit.rodata*)
+
+#define EXIT_TEXT \
+ *(.exit.text) \
+ *(.text.exit) \
+ MEM_DISCARD(exit.text)
+
+#define EXIT_CALL \
+ *(.exitcall.exit)
+
+/*
+ * bss (Block Started by Symbol) - uninitialized data
+ * zeroed during startup
+ */
+#define SBSS(sbss_align) \
+ . = ALIGN(sbss_align); \
+ .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
+ *(.dynsbss) \
+ *(SBSS_MAIN) \
+ *(.scommon) \
+ }
+
+/*
+ * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
+ * sections to the front of bss.
+ */
+#ifndef BSS_FIRST_SECTIONS
+#define BSS_FIRST_SECTIONS
+#endif
+
+#define BSS(bss_align) \
+ . = ALIGN(bss_align); \
+ .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
+ BSS_FIRST_SECTIONS \
+ . = ALIGN(PAGE_SIZE); \
+ *(.bss..page_aligned) \
+ . = ALIGN(PAGE_SIZE); \
+ *(.dynbss) \
+ *(BSS_MAIN) \
+ *(COMMON) \
+ }
+
+/*
+ * DWARF debug sections.
+ * Symbols in the DWARF debugging sections are relative to
+ * the beginning of the section so we begin them at 0.
+ */
+#define DWARF_DEBUG \
+ /* DWARF 1 */ \
+ .debug 0 : { *(.debug) } \
+ .line 0 : { *(.line) } \
+ /* GNU DWARF 1 extensions */ \
+ .debug_srcinfo 0 : { *(.debug_srcinfo) } \
+ .debug_sfnames 0 : { *(.debug_sfnames) } \
+ /* DWARF 1.1 and DWARF 2 */ \
+ .debug_aranges 0 : { *(.debug_aranges) } \
+ .debug_pubnames 0 : { *(.debug_pubnames) } \
+ /* DWARF 2 */ \
+ .debug_info 0 : { *(.debug_info \
+ .gnu.linkonce.wi.*) } \
+ .debug_abbrev 0 : { *(.debug_abbrev) } \
+ .debug_line 0 : { *(.debug_line) } \
+ .debug_frame 0 : { *(.debug_frame) } \
+ .debug_str 0 : { *(.debug_str) } \
+ .debug_loc 0 : { *(.debug_loc) } \
+ .debug_macinfo 0 : { *(.debug_macinfo) } \
+ .debug_pubtypes 0 : { *(.debug_pubtypes) } \
+ /* DWARF 3 */ \
+ .debug_ranges 0 : { *(.debug_ranges) } \
+ /* SGI/MIPS DWARF 2 extensions */ \
+ .debug_weaknames 0 : { *(.debug_weaknames) } \
+ .debug_funcnames 0 : { *(.debug_funcnames) } \
+ .debug_typenames 0 : { *(.debug_typenames) } \
+ .debug_varnames 0 : { *(.debug_varnames) } \
+ /* GNU DWARF 2 extensions */ \
+ .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \
+ .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \
+ /* DWARF 4 */ \
+ .debug_types 0 : { *(.debug_types) } \
+ /* DWARF 5 */ \
+ .debug_addr 0 : { *(.debug_addr) } \
+ .debug_line_str 0 : { *(.debug_line_str) } \
+ .debug_loclists 0 : { *(.debug_loclists) } \
+ .debug_macro 0 : { *(.debug_macro) } \
+ .debug_names 0 : { *(.debug_names) } \
+ .debug_rnglists 0 : { *(.debug_rnglists) } \
+ .debug_str_offsets 0 : { *(.debug_str_offsets) }
+
+/* Stabs debugging sections. */
+#define STABS_DEBUG \
+ .stab 0 : { *(.stab) } \
+ .stabstr 0 : { *(.stabstr) } \
+ .stab.excl 0 : { *(.stab.excl) } \
+ .stab.exclstr 0 : { *(.stab.exclstr) } \
+ .stab.index 0 : { *(.stab.index) } \
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+
+/* Required sections not related to debugging. */
+#define ELF_DETAILS \
+ .comment 0 : { *(.comment) } \
+ .symtab 0 : { *(.symtab) } \
+ .strtab 0 : { *(.strtab) } \
+ .shstrtab 0 : { *(.shstrtab) }
+
+#ifdef CONFIG_GENERIC_BUG
+#define BUG_TABLE \
+ . = ALIGN(8); \
+ __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
+ __start___bug_table = .; \
+ KEEP(*(__bug_table)) \
+ __stop___bug_table = .; \
+ }
+#else
+#define BUG_TABLE
+#endif
+
+#ifdef CONFIG_UNWINDER_ORC
+#define ORC_UNWIND_TABLE \
+ . = ALIGN(4); \
+ .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \
+ __start_orc_unwind_ip = .; \
+ KEEP(*(.orc_unwind_ip)) \
+ __stop_orc_unwind_ip = .; \
+ } \
+ . = ALIGN(2); \
+ .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
+ __start_orc_unwind = .; \
+ KEEP(*(.orc_unwind)) \
+ __stop_orc_unwind = .; \
+ } \
+ text_size = _etext - _stext; \
+ . = ALIGN(4); \
+ .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
+ orc_lookup = .; \
+ . += (((text_size + LOOKUP_BLOCK_SIZE - 1) / \
+ LOOKUP_BLOCK_SIZE) + 1) * 4; \
+ orc_lookup_end = .; \
+ }
+#else
+#define ORC_UNWIND_TABLE
+#endif
+
+/* Built-in firmware blobs */
+#ifdef CONFIG_FW_LOADER
+#define FW_LOADER_BUILT_IN_DATA \
+ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
+ __start_builtin_fw = .; \
+ KEEP(*(.builtin_fw)) \
+ __end_builtin_fw = .; \
+ }
+#else
+#define FW_LOADER_BUILT_IN_DATA
+#endif
+
+#ifdef CONFIG_PM_TRACE
+#define TRACEDATA \
+ . = ALIGN(4); \
+ .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
+ __tracedata_start = .; \
+ KEEP(*(.tracedata)) \
+ __tracedata_end = .; \
+ }
+#else
+#define TRACEDATA
+#endif
+
+#ifdef CONFIG_PRINTK_INDEX
+#define PRINTK_INDEX \
+ .printk_index : AT(ADDR(.printk_index) - LOAD_OFFSET) { \
+ __start_printk_index = .; \
+ *(.printk_index) \
+ __stop_printk_index = .; \
+ }
+#else
+#define PRINTK_INDEX
+#endif
+
+/*
+ * Discard .note.GNU-stack, which is emitted as PROGBITS by the compiler.
+ * Otherwise, the type of .notes section would become PROGBITS instead of NOTES.
+ */
+#define NOTES \
+ /DISCARD/ : { *(.note.GNU-stack) } \
+ .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
+ __start_notes = .; \
+ KEEP(*(.note.*)) \
+ __stop_notes = .; \
+ } NOTES_HEADERS \
+ NOTES_HEADERS_RESTORE
+
+#define INIT_SETUP(initsetup_align) \
+ . = ALIGN(initsetup_align); \
+ __setup_start = .; \
+ KEEP(*(.init.setup)) \
+ __setup_end = .;
+
+#define INIT_CALLS_LEVEL(level) \
+ __initcall##level##_start = .; \
+ KEEP(*(.initcall##level##.init)) \
+ KEEP(*(.initcall##level##s.init)) \
+
+#define INIT_CALLS \
+ __initcall_start = .; \
+ KEEP(*(.initcallearly.init)) \
+ INIT_CALLS_LEVEL(0) \
+ INIT_CALLS_LEVEL(1) \
+ INIT_CALLS_LEVEL(2) \
+ INIT_CALLS_LEVEL(3) \
+ INIT_CALLS_LEVEL(4) \
+ INIT_CALLS_LEVEL(5) \
+ INIT_CALLS_LEVEL(rootfs) \
+ INIT_CALLS_LEVEL(6) \
+ INIT_CALLS_LEVEL(7) \
+ __initcall_end = .;
+
+#define CON_INITCALL \
+ __con_initcall_start = .; \
+ KEEP(*(.con_initcall.init)) \
+ __con_initcall_end = .;
+
+/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
+#define KUNIT_TABLE() \
+ . = ALIGN(8); \
+ __kunit_suites_start = .; \
+ KEEP(*(.kunit_test_suites)) \
+ __kunit_suites_end = .;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+#define INIT_RAM_FS \
+ . = ALIGN(4); \
+ __initramfs_start = .; \
+ KEEP(*(.init.ramfs)) \
+ . = ALIGN(8); \
+ KEEP(*(.init.ramfs.info))
+#else
+#define INIT_RAM_FS
+#endif
+
+/*
+ * Memory encryption operates on a page basis. Since we need to clear
+ * the memory encryption mask for this section, it needs to be aligned
+ * on a page boundary and be a page-size multiple in length.
+ *
+ * Note: We use a separate section so that only this section gets
+ * decrypted to avoid exposing more than we wish.
+ */
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+#define PERCPU_DECRYPTED_SECTION \
+ . = ALIGN(PAGE_SIZE); \
+ *(.data..percpu..decrypted) \
+ . = ALIGN(PAGE_SIZE);
+#else
+#define PERCPU_DECRYPTED_SECTION
+#endif
+
+
+/*
+ * Default discarded sections.
+ *
+ * Some archs want to discard exit text/data at runtime rather than
+ * link time due to cross-section references such as alt instructions,
+ * bug table, eh_frame, etc. DISCARDS must be the last of output
+ * section definitions so that such archs put those in earlier section
+ * definitions.
+ */
+#ifdef RUNTIME_DISCARD_EXIT
+#define EXIT_DISCARDS
+#else
+#define EXIT_DISCARDS \
+ EXIT_TEXT \
+ EXIT_DATA
+#endif
+
+/*
+ * Clang's -fprofile-arcs, -fsanitize=kernel-address, and
+ * -fsanitize=thread produce unwanted sections (.eh_frame
+ * and .init_array.*), but CONFIG_CONSTRUCTORS wants to
+ * keep any .init_array.* sections.
+ * https://bugs.llvm.org/show_bug.cgi?id=46478
+ */
+#if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN)
+# ifdef CONFIG_CONSTRUCTORS
+# define SANITIZER_DISCARDS \
+ *(.eh_frame)
+# else
+# define SANITIZER_DISCARDS \
+ *(.init_array) *(.init_array.*) \
+ *(.eh_frame)
+# endif
+#else
+# define SANITIZER_DISCARDS
+#endif
+
+#define COMMON_DISCARDS \
+ SANITIZER_DISCARDS \
+ PATCHABLE_DISCARDS \
+ *(.discard) \
+ *(.discard.*) \
+ *(.modinfo) \
+ /* ld.bfd warns about .gnu.version* even when not emitted */ \
+ *(.gnu.version*) \
+
+#define DISCARDS \
+ /DISCARD/ : { \
+ EXIT_DISCARDS \
+ EXIT_CALL \
+ COMMON_DISCARDS \
+ }
+
+/**
+ * PERCPU_INPUT - the percpu input sections
+ * @cacheline: cacheline size
+ *
+ * The core percpu section names and core symbols which do not rely
+ * directly upon load addresses.
+ *
+ * @cacheline is used to align subsections to avoid false cacheline
+ * sharing between subsections for different purposes.
+ */
+#define PERCPU_INPUT(cacheline) \
+ __per_cpu_start = .; \
+ *(.data..percpu..first) \
+ . = ALIGN(PAGE_SIZE); \
+ *(.data..percpu..page_aligned) \
+ . = ALIGN(cacheline); \
+ *(.data..percpu..read_mostly) \
+ . = ALIGN(cacheline); \
+ *(.data..percpu) \
+ *(.data..percpu..shared_aligned) \
+ PERCPU_DECRYPTED_SECTION \
+ __per_cpu_end = .;
+
+/**
+ * PERCPU_VADDR - define output section for percpu area
+ * @cacheline: cacheline size
+ * @vaddr: explicit base address (optional)
+ * @phdr: destination PHDR (optional)
+ *
+ * Macro which expands to output section for percpu area.
+ *
+ * @cacheline is used to align subsections to avoid false cacheline
+ * sharing between subsections for different purposes.
+ *
+ * If @vaddr is not blank, it specifies explicit base address and all
+ * percpu symbols will be offset from the given address. If blank,
+ * @vaddr always equals @laddr + LOAD_OFFSET.
+ *
+ * @phdr defines the output PHDR to use if not blank. Be warned that
+ * output PHDR is sticky. If @phdr is specified, the next output
+ * section in the linker script will go there too. @phdr should have
+ * a leading colon.
+ *
+ * Note that this macros defines __per_cpu_load as an absolute symbol.
+ * If there is no need to put the percpu section at a predetermined
+ * address, use PERCPU_SECTION.
+ */
+#define PERCPU_VADDR(cacheline, vaddr, phdr) \
+ __per_cpu_load = .; \
+ .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \
+ PERCPU_INPUT(cacheline) \
+ } phdr \
+ . = __per_cpu_load + SIZEOF(.data..percpu);
+
+/**
+ * PERCPU_SECTION - define output section for percpu area, simple version
+ * @cacheline: cacheline size
+ *
+ * Align to PAGE_SIZE and outputs output section for percpu area. This
+ * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
+ * __per_cpu_start will be identical.
+ *
+ * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
+ * except that __per_cpu_load is defined as a relative symbol against
+ * .data..percpu which is required for relocatable x86_32 configuration.
+ */
+#define PERCPU_SECTION(cacheline) \
+ . = ALIGN(PAGE_SIZE); \
+ .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
+ __per_cpu_load = .; \
+ PERCPU_INPUT(cacheline) \
+ }
+
+
+/*
+ * Definition of the high level *_SECTION macros
+ * They will fit only a subset of the architectures
+ */
+
+
+/*
+ * Writeable data.
+ * All sections are combined in a single .data section.
+ * The sections following CONSTRUCTORS are arranged so their
+ * typical alignment matches.
+ * A cacheline is typical/always less than a PAGE_SIZE so
+ * the sections that has this restriction (or similar)
+ * is located before the ones requiring PAGE_SIZE alignment.
+ * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
+ * matches the requirement of PAGE_ALIGNED_DATA.
+ *
+ * use 0 as page_align if page_aligned data is not used */
+#define RW_DATA(cacheline, pagealigned, inittask) \
+ . = ALIGN(PAGE_SIZE); \
+ .data : AT(ADDR(.data) - LOAD_OFFSET) { \
+ INIT_TASK_DATA(inittask) \
+ NOSAVE_DATA \
+ PAGE_ALIGNED_DATA(pagealigned) \
+ CACHELINE_ALIGNED_DATA(cacheline) \
+ READ_MOSTLY_DATA(cacheline) \
+ DATA_DATA \
+ CONSTRUCTORS \
+ } \
+ BUG_TABLE \
+
+#define INIT_TEXT_SECTION(inittext_align) \
+ . = ALIGN(inittext_align); \
+ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
+ _sinittext = .; \
+ INIT_TEXT \
+ _einittext = .; \
+ }
+
+#define INIT_DATA_SECTION(initsetup_align) \
+ .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
+ INIT_DATA \
+ INIT_SETUP(initsetup_align) \
+ INIT_CALLS \
+ CON_INITCALL \
+ INIT_RAM_FS \
+ }
+
+#define BSS_SECTION(sbss_align, bss_align, stop_align) \
+ . = ALIGN(sbss_align); \
+ __bss_start = .; \
+ SBSS(sbss_align) \
+ BSS(bss_align) \
+ . = ALIGN(stop_align); \
+ __bss_stop = .;
diff --git a/include/asm-generic/vtime.h b/include/asm-generic/vtime.h
new file mode 100644
index 000000000..b1a49677f
--- /dev/null
+++ b/include/asm-generic/vtime.h
@@ -0,0 +1 @@
+/* no content, but patch(1) dislikes empty files */
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
new file mode 100644
index 000000000..95a1d2141
--- /dev/null
+++ b/include/asm-generic/word-at-a-time.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
+
+struct word_at_a_time {
+ const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+ unsigned long mask = (val & c->low_bits) + c->low_bits;
+ return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+ long byte = 0;
+#ifdef CONFIG_64BIT
+ if (mask >> 32)
+ mask >>= 32;
+ else
+ byte = 4;
+#endif
+ if (mask >> 16)
+ mask >>= 16;
+ else
+ byte += 2;
+ return (mask >> 8) ? byte : byte + 1;
+}
+
+static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+ unsigned long rhs = val | c->low_bits;
+ *data = rhs;
+ return (val + c->high_bits) & ~rhs;
+}
+
+#ifndef zero_bytemask
+#define zero_bytemask(mask) (~1ul << __fls(mask))
+#endif
+
+#else
+
+/*
+ * The optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+ return mask*0x0001020304050608ul >> 56;
+}
+
+#else /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return count_masked_bytes(mask);
+}
+
+#endif /* __BIG_ENDIAN */
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h
new file mode 100644
index 000000000..44509d48f
--- /dev/null
+++ b/include/asm-generic/xor.h
@@ -0,0 +1,738 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/asm-generic/xor.h
+ *
+ * Generic optimized RAID-5 checksumming functions.
+ */
+
+#include <linux/prefetch.h>
+
+static void
+xor_8regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ p1[0] ^= p2[0];
+ p1[1] ^= p2[1];
+ p1[2] ^= p2[2];
+ p1[3] ^= p2[3];
+ p1[4] ^= p2[4];
+ p1[5] ^= p2[5];
+ p1[6] ^= p2[6];
+ p1[7] ^= p2[7];
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_8regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ p1[0] ^= p2[0] ^ p3[0];
+ p1[1] ^= p2[1] ^ p3[1];
+ p1[2] ^= p2[2] ^ p3[2];
+ p1[3] ^= p2[3] ^ p3[3];
+ p1[4] ^= p2[4] ^ p3[4];
+ p1[5] ^= p2[5] ^ p3[5];
+ p1[6] ^= p2[6] ^ p3[6];
+ p1[7] ^= p2[7] ^ p3[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_8regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ p1[0] ^= p2[0] ^ p3[0] ^ p4[0];
+ p1[1] ^= p2[1] ^ p3[1] ^ p4[1];
+ p1[2] ^= p2[2] ^ p3[2] ^ p4[2];
+ p1[3] ^= p2[3] ^ p3[3] ^ p4[3];
+ p1[4] ^= p2[4] ^ p3[4] ^ p4[4];
+ p1[5] ^= p2[5] ^ p3[5] ^ p4[5];
+ p1[6] ^= p2[6] ^ p3[6] ^ p4[6];
+ p1[7] ^= p2[7] ^ p3[7] ^ p4[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_8regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0];
+ p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1];
+ p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2];
+ p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3];
+ p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4];
+ p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5];
+ p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6];
+ p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_32regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_32regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_32regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ d0 ^= p4[0];
+ d1 ^= p4[1];
+ d2 ^= p4[2];
+ d3 ^= p4[3];
+ d4 ^= p4[4];
+ d5 ^= p4[5];
+ d6 ^= p4[6];
+ d7 ^= p4[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_32regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ d0 ^= p4[0];
+ d1 ^= p4[1];
+ d2 ^= p4[2];
+ d3 ^= p4[3];
+ d4 ^= p4[4];
+ d5 ^= p4[5];
+ d6 ^= p4[6];
+ d7 ^= p4[7];
+ d0 ^= p5[0];
+ d1 ^= p5[1];
+ d2 ^= p5[2];
+ d3 ^= p5[3];
+ d4 ^= p5[4];
+ d5 ^= p5[5];
+ d6 ^= p5[6];
+ d7 ^= p5[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_8regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+ prefetchw(p1);
+ prefetch(p2);
+
+ do {
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ once_more:
+ p1[0] ^= p2[0];
+ p1[1] ^= p2[1];
+ p1[2] ^= p2[2];
+ p1[3] ^= p2[3];
+ p1[4] ^= p2[4];
+ p1[5] ^= p2[5];
+ p1[6] ^= p2[6];
+ p1[7] ^= p2[7];
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_8regs_p_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+
+ do {
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ once_more:
+ p1[0] ^= p2[0] ^ p3[0];
+ p1[1] ^= p2[1] ^ p3[1];
+ p1[2] ^= p2[2] ^ p3[2];
+ p1[3] ^= p2[3] ^ p3[3];
+ p1[4] ^= p2[4] ^ p3[4];
+ p1[5] ^= p2[5] ^ p3[5];
+ p1[6] ^= p2[6] ^ p3[6];
+ p1[7] ^= p2[7] ^ p3[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_8regs_p_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+ prefetch(p4);
+
+ do {
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ prefetch(p4+8);
+ once_more:
+ p1[0] ^= p2[0] ^ p3[0] ^ p4[0];
+ p1[1] ^= p2[1] ^ p3[1] ^ p4[1];
+ p1[2] ^= p2[2] ^ p3[2] ^ p4[2];
+ p1[3] ^= p2[3] ^ p3[3] ^ p4[3];
+ p1[4] ^= p2[4] ^ p3[4] ^ p4[4];
+ p1[5] ^= p2[5] ^ p3[5] ^ p4[5];
+ p1[6] ^= p2[6] ^ p3[6] ^ p4[6];
+ p1[7] ^= p2[7] ^ p3[7] ^ p4[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_8regs_p_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+ prefetch(p4);
+ prefetch(p5);
+
+ do {
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ prefetch(p4+8);
+ prefetch(p5+8);
+ once_more:
+ p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0];
+ p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1];
+ p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2];
+ p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3];
+ p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4];
+ p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5];
+ p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6];
+ p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_32regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ once_more:
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_32regs_p_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ once_more:
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_32regs_p_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+ prefetch(p4);
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ prefetch(p4+8);
+ once_more:
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ d0 ^= p4[0];
+ d1 ^= p4[1];
+ d2 ^= p4[2];
+ d3 ^= p4[3];
+ d4 ^= p4[4];
+ d5 ^= p4[5];
+ d6 ^= p4[6];
+ d7 ^= p4[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_32regs_p_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+ prefetch(p4);
+ prefetch(p5);
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ prefetch(p4+8);
+ prefetch(p5+8);
+ once_more:
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ d0 ^= p4[0];
+ d1 ^= p4[1];
+ d2 ^= p4[2];
+ d3 ^= p4[3];
+ d4 ^= p4[4];
+ d5 ^= p4[5];
+ d6 ^= p4[6];
+ d7 ^= p4[7];
+ d0 ^= p5[0];
+ d1 ^= p5[1];
+ d2 ^= p5[2];
+ d3 ^= p5[3];
+ d4 ^= p5[4];
+ d5 ^= p5[5];
+ d6 ^= p5[6];
+ d7 ^= p5[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static struct xor_block_template xor_block_8regs = {
+ .name = "8regs",
+ .do_2 = xor_8regs_2,
+ .do_3 = xor_8regs_3,
+ .do_4 = xor_8regs_4,
+ .do_5 = xor_8regs_5,
+};
+
+static struct xor_block_template xor_block_32regs = {
+ .name = "32regs",
+ .do_2 = xor_32regs_2,
+ .do_3 = xor_32regs_3,
+ .do_4 = xor_32regs_4,
+ .do_5 = xor_32regs_5,
+};
+
+static struct xor_block_template xor_block_8regs_p __maybe_unused = {
+ .name = "8regs_prefetch",
+ .do_2 = xor_8regs_p_2,
+ .do_3 = xor_8regs_p_3,
+ .do_4 = xor_8regs_p_4,
+ .do_5 = xor_8regs_p_5,
+};
+
+static struct xor_block_template xor_block_32regs_p __maybe_unused = {
+ .name = "32regs_prefetch",
+ .do_2 = xor_32regs_p_2,
+ .do_3 = xor_32regs_p_3,
+ .do_4 = xor_32regs_p_4,
+ .do_5 = xor_32regs_p_5,
+};
+
+#define XOR_TRY_TEMPLATES \
+ do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_8regs_p); \
+ xor_speed(&xor_block_32regs); \
+ xor_speed(&xor_block_32regs_p); \
+ } while (0)