summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--include/asm-generic/4level-fixup.h40
-rw-r--r--include/asm-generic/5level-fixup.h43
-rw-r--r--include/asm-generic/asm-offsets.h1
-rw-r--r--include/asm-generic/asm-prototypes.h14
-rw-r--r--include/asm-generic/atomic-instrumented.h467
-rw-r--r--include/asm-generic/atomic-long.h269
-rw-r--r--include/asm-generic/atomic.h202
-rw-r--r--include/asm-generic/atomic64.h60
-rw-r--r--include/asm-generic/audit_change_attr.h33
-rw-r--r--include/asm-generic/audit_dir_write.h38
-rw-r--r--include/asm-generic/audit_read.h14
-rw-r--r--include/asm-generic/audit_signal.h3
-rw-r--r--include/asm-generic/audit_write.h25
-rw-r--r--include/asm-generic/barrier.h265
-rw-r--r--include/asm-generic/bitops.h38
-rw-r--r--include/asm-generic/bitops/__ffs.h44
-rw-r--r--include/asm-generic/bitops/__fls.h44
-rw-r--r--include/asm-generic/bitops/arch_hweight.h26
-rw-r--r--include/asm-generic/bitops/atomic.h68
-rw-r--r--include/asm-generic/bitops/builtin-__ffs.h16
-rw-r--r--include/asm-generic/bitops/builtin-__fls.h16
-rw-r--r--include/asm-generic/bitops/builtin-ffs.h18
-rw-r--r--include/asm-generic/bitops/builtin-fls.h17
-rw-r--r--include/asm-generic/bitops/const_hweight.h44
-rw-r--r--include/asm-generic/bitops/ext2-atomic-setbit.h12
-rw-r--r--include/asm-generic/bitops/ext2-atomic.h27
-rw-r--r--include/asm-generic/bitops/ffs.h42
-rw-r--r--include/asm-generic/bitops/ffz.h13
-rw-r--r--include/asm-generic/bitops/find.h83
-rw-r--r--include/asm-generic/bitops/fls.h42
-rw-r--r--include/asm-generic/bitops/fls64.h37
-rw-r--r--include/asm-generic/bitops/hweight.h8
-rw-r--r--include/asm-generic/bitops/le.h98
-rw-r--r--include/asm-generic/bitops/lock.h91
-rw-r--r--include/asm-generic/bitops/non-atomic.h109
-rw-r--r--include/asm-generic/bitops/sched.h32
-rw-r--r--include/asm-generic/bitsperlong.h26
-rw-r--r--include/asm-generic/bug.h254
-rw-r--r--include/asm-generic/bugs.h11
-rw-r--r--include/asm-generic/cache.h13
-rw-r--r--include/asm-generic/cacheflush.h35
-rw-r--r--include/asm-generic/checksum.h88
-rw-r--r--include/asm-generic/cmpxchg-local.h68
-rw-r--r--include/asm-generic/cmpxchg.h109
-rw-r--r--include/asm-generic/compat.h3
-rw-r--r--include/asm-generic/current.h10
-rw-r--r--include/asm-generic/delay.h45
-rw-r--r--include/asm-generic/device.h15
-rw-r--r--include/asm-generic/div64.h249
-rw-r--r--include/asm-generic/dma-contiguous.h10
-rw-r--r--include/asm-generic/dma-mapping.h19
-rw-r--r--include/asm-generic/dma.h16
-rw-r--r--include/asm-generic/early_ioremap.h53
-rw-r--r--include/asm-generic/emergency-restart.h10
-rw-r--r--include/asm-generic/error-injection.h35
-rw-r--r--include/asm-generic/exec.h19
-rw-r--r--include/asm-generic/export.h93
-rw-r--r--include/asm-generic/extable.h27
-rw-r--r--include/asm-generic/fb.h13
-rw-r--r--include/asm-generic/fixmap.h103
-rw-r--r--include/asm-generic/ftrace.h16
-rw-r--r--include/asm-generic/futex.h151
-rw-r--r--include/asm-generic/getorder.h52
-rw-r--r--include/asm-generic/gpio.h172
-rw-r--r--include/asm-generic/hardirq.h22
-rw-r--r--include/asm-generic/hugetlb.h43
-rw-r--r--include/asm-generic/hw_irq.h9
-rw-r--r--include/asm-generic/ide_iops.h39
-rw-r--r--include/asm-generic/int-ll64.h47
-rw-r--r--include/asm-generic/io.h1140
-rw-r--r--include/asm-generic/ioctl.h18
-rw-r--r--include/asm-generic/iomap.h94
-rw-r--r--include/asm-generic/irq.h19
-rw-r--r--include/asm-generic/irq_regs.h37
-rw-r--r--include/asm-generic/irq_work.h11
-rw-r--r--include/asm-generic/irqflags.h67
-rw-r--r--include/asm-generic/kdebug.h10
-rw-r--r--include/asm-generic/kmap_types.h11
-rw-r--r--include/asm-generic/kprobes.h26
-rw-r--r--include/asm-generic/kvm_para.h32
-rw-r--r--include/asm-generic/linkage.h8
-rw-r--r--include/asm-generic/local.h56
-rw-r--r--include/asm-generic/local64.h97
-rw-r--r--include/asm-generic/mcs_spinlock.h13
-rw-r--r--include/asm-generic/memory_model.h86
-rw-r--r--include/asm-generic/mm-arch-hooks.h16
-rw-r--r--include/asm-generic/mm_hooks.h37
-rw-r--r--include/asm-generic/mmu.h20
-rw-r--r--include/asm-generic/mmu_context.h46
-rw-r--r--include/asm-generic/module.h49
-rw-r--r--include/asm-generic/msi.h33
-rw-r--r--include/asm-generic/page.h101
-rw-r--r--include/asm-generic/param.h11
-rw-r--r--include/asm-generic/parport.h24
-rw-r--r--include/asm-generic/pci.h17
-rw-r--r--include/asm-generic/pci_iomap.h55
-rw-r--r--include/asm-generic/percpu.h448
-rw-r--r--include/asm-generic/pgalloc.h13
-rw-r--r--include/asm-generic/pgtable-nop4d-hack.h63
-rw-r--r--include/asm-generic/pgtable-nop4d.h58
-rw-r--r--include/asm-generic/pgtable-nopmd.h70
-rw-r--r--include/asm-generic/pgtable-nopud.h68
-rw-r--r--include/asm-generic/pgtable.h1155
-rw-r--r--include/asm-generic/preempt.h88
-rw-r--r--include/asm-generic/ptrace.h74
-rw-r--r--include/asm-generic/qrwlock.h138
-rw-r--r--include/asm-generic/qrwlock_types.h34
-rw-r--r--include/asm-generic/qspinlock.h123
-rw-r--r--include/asm-generic/qspinlock_types.h112
-rw-r--r--include/asm-generic/resource.h31
-rw-r--r--include/asm-generic/rwsem.h140
-rw-r--r--include/asm-generic/seccomp.h46
-rw-r--r--include/asm-generic/sections.h147
-rw-r--r--include/asm-generic/segment.h9
-rw-r--r--include/asm-generic/serial.h14
-rw-r--r--include/asm-generic/set_memory.h13
-rw-r--r--include/asm-generic/signal.h15
-rw-r--r--include/asm-generic/simd.h15
-rw-r--r--include/asm-generic/sizes.h2
-rw-r--r--include/asm-generic/spinlock.h12
-rw-r--r--include/asm-generic/statfs.h8
-rw-r--r--include/asm-generic/string.h10
-rw-r--r--include/asm-generic/switch_to.h30
-rw-r--r--include/asm-generic/syscall.h157
-rw-r--r--include/asm-generic/syscalls.h29
-rw-r--r--include/asm-generic/termios-base.h78
-rw-r--r--include/asm-generic/termios.h108
-rw-r--r--include/asm-generic/timex.h23
-rw-r--r--include/asm-generic/tlb.h317
-rw-r--r--include/asm-generic/tlbflush.h21
-rw-r--r--include/asm-generic/topology.h77
-rw-r--r--include/asm-generic/trace_clock.h17
-rw-r--r--include/asm-generic/uaccess.h228
-rw-r--r--include/asm-generic/unaligned.h36
-rw-r--r--include/asm-generic/unistd.h13
-rw-r--r--include/asm-generic/user.h8
-rw-r--r--include/asm-generic/vga.h25
-rw-r--r--include/asm-generic/vmlinux.lds.h1001
-rw-r--r--include/asm-generic/vtime.h1
-rw-r--r--include/asm-generic/word-at-a-time.h121
-rw-r--r--include/asm-generic/xor.h718
141 files changed, 12322 insertions, 0 deletions
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
new file mode 100644
index 000000000..e3667c9a3
--- /dev/null
+++ b/include/asm-generic/4level-fixup.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _4LEVEL_FIXUP_H
+#define _4LEVEL_FIXUP_H
+
+#define __ARCH_HAS_4LEVEL_HACK
+#define __PAGETABLE_PUD_FOLDED 1
+
+#define PUD_SHIFT PGDIR_SHIFT
+#define PUD_SIZE PGDIR_SIZE
+#define PUD_MASK PGDIR_MASK
+#define PTRS_PER_PUD 1
+
+#define pud_t pgd_t
+
+#define pmd_alloc(mm, pud, address) \
+ ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
+ NULL: pmd_offset(pud, address))
+
+#define pud_offset(pgd, start) (pgd)
+#define pud_none(pud) 0
+#define pud_bad(pud) 0
+#define pud_present(pud) 1
+#define pud_ERROR(pud) do { } while (0)
+#define pud_clear(pud) pgd_clear(pud)
+#define pud_val(pud) pgd_val(pud)
+#define pud_populate(mm, pud, pmd) pgd_populate(mm, pud, pmd)
+#define pud_page(pud) pgd_page(pud)
+#define pud_page_vaddr(pud) pgd_page_vaddr(pud)
+
+#undef pud_free_tlb
+#define pud_free_tlb(tlb, x, addr) do { } while (0)
+#define pud_free(mm, x) do { } while (0)
+#define __pud_free_tlb(tlb, x, addr) do { } while (0)
+
+#undef pud_addr_end
+#define pud_addr_end(addr, end) (end)
+
+#include <asm-generic/5level-fixup.h>
+
+#endif
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
new file mode 100644
index 000000000..73474bb52
--- /dev/null
+++ b/include/asm-generic/5level-fixup.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _5LEVEL_FIXUP_H
+#define _5LEVEL_FIXUP_H
+
+#define __ARCH_HAS_5LEVEL_HACK
+#define __PAGETABLE_P4D_FOLDED 1
+
+#define P4D_SHIFT PGDIR_SHIFT
+#define P4D_SIZE PGDIR_SIZE
+#define P4D_MASK PGDIR_MASK
+#define MAX_PTRS_PER_P4D 1
+#define PTRS_PER_P4D 1
+
+#define p4d_t pgd_t
+
+#define pud_alloc(mm, p4d, address) \
+ ((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \
+ NULL : pud_offset(p4d, address))
+
+#define p4d_alloc(mm, pgd, address) (pgd)
+#define p4d_offset(pgd, start) (pgd)
+#define p4d_none(p4d) 0
+#define p4d_bad(p4d) 0
+#define p4d_present(p4d) 1
+#define p4d_ERROR(p4d) do { } while (0)
+#define p4d_clear(p4d) pgd_clear(p4d)
+#define p4d_val(p4d) pgd_val(p4d)
+#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud)
+#define p4d_page(p4d) pgd_page(p4d)
+#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d)
+
+#define __p4d(x) __pgd(x)
+#define set_p4d(p4dp, p4d) set_pgd(p4dp, p4d)
+
+#undef p4d_free_tlb
+#define p4d_free_tlb(tlb, x, addr) do { } while (0)
+#define p4d_free(mm, x) do { } while (0)
+#define __p4d_free_tlb(tlb, x, addr) do { } while (0)
+
+#undef p4d_addr_end
+#define p4d_addr_end(addr, end) (end)
+
+#endif
diff --git a/include/asm-generic/asm-offsets.h b/include/asm-generic/asm-offsets.h
new file mode 100644
index 000000000..d370ee36a
--- /dev/null
+++ b/include/asm-generic/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/include/asm-generic/asm-prototypes.h b/include/asm-generic/asm-prototypes.h
new file mode 100644
index 000000000..2fa2bc208
--- /dev/null
+++ b/include/asm-generic/asm-prototypes.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/bitops.h>
+#undef __memset
+extern void *__memset(void *, int, __kernel_size_t);
+#undef __memcpy
+extern void *__memcpy(void *, const void *, __kernel_size_t);
+#undef __memmove
+extern void *__memmove(void *, const void *, __kernel_size_t);
+#undef memset
+extern void *memset(void *, int, __kernel_size_t);
+#undef memcpy
+extern void *memcpy(void *, const void *, __kernel_size_t);
+#undef memmove
+extern void *memmove(void *, const void *, __kernel_size_t);
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
new file mode 100644
index 000000000..0d4b1d3db
--- /dev/null
+++ b/include/asm-generic/atomic-instrumented.h
@@ -0,0 +1,467 @@
+/*
+ * This file provides wrappers with KASAN instrumentation for atomic operations.
+ * To use this functionality an arch's atomic.h file needs to define all
+ * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
+ * this file at the end. This file provides atomic_read() that forwards to
+ * arch_atomic_read() for actual atomic operation.
+ * Note: if an arch atomic operation is implemented by means of other atomic
+ * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
+ * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
+ * double instrumentation.
+ */
+
+#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
+#define _LINUX_ATOMIC_INSTRUMENTED_H
+
+#include <linux/build_bug.h>
+#include <linux/kasan-checks.h>
+
+static __always_inline int atomic_read(const atomic_t *v)
+{
+ kasan_check_read(v, sizeof(*v));
+ return arch_atomic_read(v);
+}
+
+static __always_inline s64 atomic64_read(const atomic64_t *v)
+{
+ kasan_check_read(v, sizeof(*v));
+ return arch_atomic64_read(v);
+}
+
+static __always_inline void atomic_set(atomic_t *v, int i)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_set(v, i);
+}
+
+static __always_inline void atomic64_set(atomic64_t *v, s64 i)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_set(v, i);
+}
+
+static __always_inline int atomic_xchg(atomic_t *v, int i)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_xchg(v, i);
+}
+
+static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_xchg(v, i);
+}
+
+static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_cmpxchg(v, old, new);
+}
+
+static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_cmpxchg(v, old, new);
+}
+
+#ifdef arch_atomic_try_cmpxchg
+#define atomic_try_cmpxchg atomic_try_cmpxchg
+static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ kasan_check_write(v, sizeof(*v));
+ kasan_check_read(old, sizeof(*old));
+ return arch_atomic_try_cmpxchg(v, old, new);
+}
+#endif
+
+#ifdef arch_atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg atomic64_try_cmpxchg
+static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ kasan_check_write(v, sizeof(*v));
+ kasan_check_read(old, sizeof(*old));
+ return arch_atomic64_try_cmpxchg(v, old, new);
+}
+#endif
+
+#ifdef arch_atomic_fetch_add_unless
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_add_unless(v, a, u);
+}
+#endif
+
+#ifdef arch_atomic64_fetch_add_unless
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add_unless(v, a, u);
+}
+#endif
+
+#ifdef arch_atomic_inc
+#define atomic_inc atomic_inc
+static __always_inline void atomic_inc(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_inc(v);
+}
+#endif
+
+#ifdef arch_atomic64_inc
+#define atomic64_inc atomic64_inc
+static __always_inline void atomic64_inc(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_inc(v);
+}
+#endif
+
+#ifdef arch_atomic_dec
+#define atomic_dec atomic_dec
+static __always_inline void atomic_dec(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_dec(v);
+}
+#endif
+
+#ifdef atch_atomic64_dec
+#define atomic64_dec
+static __always_inline void atomic64_dec(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_dec(v);
+}
+#endif
+
+static __always_inline void atomic_add(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_add(i, v);
+}
+
+static __always_inline void atomic64_add(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_add(i, v);
+}
+
+static __always_inline void atomic_sub(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_sub(i, v);
+}
+
+static __always_inline void atomic64_sub(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_sub(i, v);
+}
+
+static __always_inline void atomic_and(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_and(i, v);
+}
+
+static __always_inline void atomic64_and(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_and(i, v);
+}
+
+static __always_inline void atomic_or(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_or(i, v);
+}
+
+static __always_inline void atomic64_or(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_or(i, v);
+}
+
+static __always_inline void atomic_xor(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_xor(i, v);
+}
+
+static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_xor(i, v);
+}
+
+#ifdef arch_atomic_inc_return
+#define atomic_inc_return atomic_inc_return
+static __always_inline int atomic_inc_return(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_inc_return(v);
+}
+#endif
+
+#ifdef arch_atomic64_in_return
+#define atomic64_inc_return atomic64_inc_return
+static __always_inline s64 atomic64_inc_return(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_return(v);
+}
+#endif
+
+#ifdef arch_atomic_dec_return
+#define atomic_dec_return atomic_dec_return
+static __always_inline int atomic_dec_return(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_dec_return(v);
+}
+#endif
+
+#ifdef arch_atomic64_dec_return
+#define atomic64_dec_return atomic64_dec_return
+static __always_inline s64 atomic64_dec_return(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_return(v);
+}
+#endif
+
+#ifdef arch_atomic64_inc_not_zero
+#define atomic64_inc_not_zero atomic64_inc_not_zero
+static __always_inline bool atomic64_inc_not_zero(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_not_zero(v);
+}
+#endif
+
+#ifdef arch_atomic64_dec_if_positive
+#define atomic64_dec_if_positive atomic64_dec_if_positive
+static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_if_positive(v);
+}
+#endif
+
+#ifdef arch_atomic_dec_and_test
+#define atomic_dec_and_test atomic_dec_and_test
+static __always_inline bool atomic_dec_and_test(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_dec_and_test(v);
+}
+#endif
+
+#ifdef arch_atomic64_dec_and_test
+#define atomic64_dec_and_test atomic64_dec_and_test
+static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_and_test(v);
+}
+#endif
+
+#ifdef arch_atomic_inc_and_test
+#define atomic_inc_and_test atomic_inc_and_test
+static __always_inline bool atomic_inc_and_test(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_inc_and_test(v);
+}
+#endif
+
+#ifdef arch_atomic64_inc_and_test
+#define atomic64_inc_and_test atomic64_inc_and_test
+static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_and_test(v);
+}
+#endif
+
+static __always_inline int atomic_add_return(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_add_return(i, v);
+}
+
+static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_add_return(i, v);
+}
+
+static __always_inline int atomic_sub_return(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_sub_return(i, v);
+}
+
+static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_sub_return(i, v);
+}
+
+static __always_inline int atomic_fetch_add(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_add(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add(i, v);
+}
+
+static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_sub(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_sub(i, v);
+}
+
+static __always_inline int atomic_fetch_and(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_and(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_and(i, v);
+}
+
+static __always_inline int atomic_fetch_or(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_or(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_or(i, v);
+}
+
+static __always_inline int atomic_fetch_xor(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_xor(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_xor(i, v);
+}
+
+#ifdef arch_atomic_sub_and_test
+#define atomic_sub_and_test atomic_sub_and_test
+static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_sub_and_test(i, v);
+}
+#endif
+
+#ifdef arch_atomic64_sub_and_test
+#define atomic64_sub_and_test atomic64_sub_and_test
+static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_sub_and_test(i, v);
+}
+#endif
+
+#ifdef arch_atomic_add_negative
+#define atomic_add_negative atomic_add_negative
+static __always_inline bool atomic_add_negative(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_add_negative(i, v);
+}
+#endif
+
+#ifdef arch_atomic64_add_negative
+#define atomic64_add_negative atomic64_add_negative
+static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_add_negative(i, v);
+}
+#endif
+
+#define xchg(ptr, new) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg(__ai_ptr, (new)); \
+})
+
+#define cmpxchg(ptr, old, new) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg(__ai_ptr, (old), (new)); \
+})
+
+#define sync_cmpxchg(ptr, old, new) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_sync_cmpxchg(__ai_ptr, (old), (new)); \
+})
+
+#define cmpxchg_local(ptr, old, new) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_local(__ai_ptr, (old), (new)); \
+})
+
+#define cmpxchg64(ptr, old, new) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64(__ai_ptr, (old), (new)); \
+})
+
+#define cmpxchg64_local(ptr, old, new) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_local(__ai_ptr, (old), (new)); \
+})
+
+#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
+({ \
+ typeof(p1) __ai_p1 = (p1); \
+ kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \
+ arch_cmpxchg_double(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \
+})
+
+#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
+({ \
+ typeof(p1) __ai_p1 = (p1); \
+ kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \
+ arch_cmpxchg_double_local(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \
+})
+
+#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
new file mode 100644
index 000000000..87d14476e
--- /dev/null
+++ b/include/asm-generic/atomic-long.h
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_ATOMIC_LONG_H
+#define _ASM_GENERIC_ATOMIC_LONG_H
+/*
+ * Copyright (C) 2005 Silicon Graphics, Inc.
+ * Christoph Lameter
+ *
+ * Allows to provide arch independent atomic definitions without the need to
+ * edit all arch specific atomic.h files.
+ */
+
+#include <asm/types.h>
+
+/*
+ * Suppport for atomic_long_t
+ *
+ * Casts for parameters are avoided for existing atomic functions in order to
+ * avoid issues with cast-as-lval under gcc 4.x and other limitations that the
+ * macros of a platform may have.
+ */
+
+#if BITS_PER_LONG == 64
+
+typedef atomic64_t atomic_long_t;
+
+#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+#define ATOMIC_LONG_PFX(x) atomic64 ## x
+#define ATOMIC_LONG_TYPE s64
+
+#else
+
+typedef atomic_t atomic_long_t;
+
+#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
+#define ATOMIC_LONG_PFX(x) atomic ## x
+#define ATOMIC_LONG_TYPE int
+
+#endif
+
+#define ATOMIC_LONG_READ_OP(mo) \
+static inline long atomic_long_read##mo(const atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ return (long)ATOMIC_LONG_PFX(_read##mo)(v); \
+}
+ATOMIC_LONG_READ_OP()
+ATOMIC_LONG_READ_OP(_acquire)
+
+#undef ATOMIC_LONG_READ_OP
+
+#define ATOMIC_LONG_SET_OP(mo) \
+static inline void atomic_long_set##mo(atomic_long_t *l, long i) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ ATOMIC_LONG_PFX(_set##mo)(v, i); \
+}
+ATOMIC_LONG_SET_OP()
+ATOMIC_LONG_SET_OP(_release)
+
+#undef ATOMIC_LONG_SET_OP
+
+#define ATOMIC_LONG_ADD_SUB_OP(op, mo) \
+static inline long \
+atomic_long_##op##_return##mo(long i, atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v); \
+}
+ATOMIC_LONG_ADD_SUB_OP(add,)
+ATOMIC_LONG_ADD_SUB_OP(add, _relaxed)
+ATOMIC_LONG_ADD_SUB_OP(add, _acquire)
+ATOMIC_LONG_ADD_SUB_OP(add, _release)
+ATOMIC_LONG_ADD_SUB_OP(sub,)
+ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed)
+ATOMIC_LONG_ADD_SUB_OP(sub, _acquire)
+ATOMIC_LONG_ADD_SUB_OP(sub, _release)
+
+#undef ATOMIC_LONG_ADD_SUB_OP
+
+#define atomic_long_cmpxchg_relaxed(l, old, new) \
+ (ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (old), (new)))
+#define atomic_long_cmpxchg_acquire(l, old, new) \
+ (ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (old), (new)))
+#define atomic_long_cmpxchg_release(l, old, new) \
+ (ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (old), (new)))
+#define atomic_long_cmpxchg(l, old, new) \
+ (ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new)))
+
+
+#define atomic_long_try_cmpxchg_relaxed(l, old, new) \
+ (ATOMIC_LONG_PFX(_try_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
+#define atomic_long_try_cmpxchg_acquire(l, old, new) \
+ (ATOMIC_LONG_PFX(_try_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
+#define atomic_long_try_cmpxchg_release(l, old, new) \
+ (ATOMIC_LONG_PFX(_try_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
+#define atomic_long_try_cmpxchg(l, old, new) \
+ (ATOMIC_LONG_PFX(_try_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
+
+
+#define atomic_long_xchg_relaxed(v, new) \
+ (ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg_acquire(v, new) \
+ (ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg_release(v, new) \
+ (ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg(v, new) \
+ (ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+
+static __always_inline void atomic_long_inc(atomic_long_t *l)
+{
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+
+ ATOMIC_LONG_PFX(_inc)(v);
+}
+
+static __always_inline void atomic_long_dec(atomic_long_t *l)
+{
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+
+ ATOMIC_LONG_PFX(_dec)(v);
+}
+
+#define ATOMIC_LONG_FETCH_OP(op, mo) \
+static inline long \
+atomic_long_fetch_##op##mo(long i, atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v); \
+}
+
+ATOMIC_LONG_FETCH_OP(add, )
+ATOMIC_LONG_FETCH_OP(add, _relaxed)
+ATOMIC_LONG_FETCH_OP(add, _acquire)
+ATOMIC_LONG_FETCH_OP(add, _release)
+ATOMIC_LONG_FETCH_OP(sub, )
+ATOMIC_LONG_FETCH_OP(sub, _relaxed)
+ATOMIC_LONG_FETCH_OP(sub, _acquire)
+ATOMIC_LONG_FETCH_OP(sub, _release)
+ATOMIC_LONG_FETCH_OP(and, )
+ATOMIC_LONG_FETCH_OP(and, _relaxed)
+ATOMIC_LONG_FETCH_OP(and, _acquire)
+ATOMIC_LONG_FETCH_OP(and, _release)
+ATOMIC_LONG_FETCH_OP(andnot, )
+ATOMIC_LONG_FETCH_OP(andnot, _relaxed)
+ATOMIC_LONG_FETCH_OP(andnot, _acquire)
+ATOMIC_LONG_FETCH_OP(andnot, _release)
+ATOMIC_LONG_FETCH_OP(or, )
+ATOMIC_LONG_FETCH_OP(or, _relaxed)
+ATOMIC_LONG_FETCH_OP(or, _acquire)
+ATOMIC_LONG_FETCH_OP(or, _release)
+ATOMIC_LONG_FETCH_OP(xor, )
+ATOMIC_LONG_FETCH_OP(xor, _relaxed)
+ATOMIC_LONG_FETCH_OP(xor, _acquire)
+ATOMIC_LONG_FETCH_OP(xor, _release)
+
+#undef ATOMIC_LONG_FETCH_OP
+
+#define ATOMIC_LONG_FETCH_INC_DEC_OP(op, mo) \
+static inline long \
+atomic_long_fetch_##op##mo(atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(v); \
+}
+
+ATOMIC_LONG_FETCH_INC_DEC_OP(inc,)
+ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _relaxed)
+ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _acquire)
+ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _release)
+ATOMIC_LONG_FETCH_INC_DEC_OP(dec,)
+ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _relaxed)
+ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _acquire)
+ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _release)
+
+#undef ATOMIC_LONG_FETCH_INC_DEC_OP
+
+#define ATOMIC_LONG_OP(op) \
+static __always_inline void \
+atomic_long_##op(long i, atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ ATOMIC_LONG_PFX(_##op)(i, v); \
+}
+
+ATOMIC_LONG_OP(add)
+ATOMIC_LONG_OP(sub)
+ATOMIC_LONG_OP(and)
+ATOMIC_LONG_OP(andnot)
+ATOMIC_LONG_OP(or)
+ATOMIC_LONG_OP(xor)
+
+#undef ATOMIC_LONG_OP
+
+static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+{
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+
+ return ATOMIC_LONG_PFX(_sub_and_test)(i, v);
+}
+
+static inline int atomic_long_dec_and_test(atomic_long_t *l)
+{
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+
+ return ATOMIC_LONG_PFX(_dec_and_test)(v);
+}
+
+static inline int atomic_long_inc_and_test(atomic_long_t *l)
+{
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+
+ return ATOMIC_LONG_PFX(_inc_and_test)(v);
+}
+
+static inline int atomic_long_add_negative(long i, atomic_long_t *l)
+{
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+
+ return ATOMIC_LONG_PFX(_add_negative)(i, v);
+}
+
+#define ATOMIC_LONG_INC_DEC_OP(op, mo) \
+static inline long \
+atomic_long_##op##_return##mo(atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v); \
+}
+ATOMIC_LONG_INC_DEC_OP(inc,)
+ATOMIC_LONG_INC_DEC_OP(inc, _relaxed)
+ATOMIC_LONG_INC_DEC_OP(inc, _acquire)
+ATOMIC_LONG_INC_DEC_OP(inc, _release)
+ATOMIC_LONG_INC_DEC_OP(dec,)
+ATOMIC_LONG_INC_DEC_OP(dec, _relaxed)
+ATOMIC_LONG_INC_DEC_OP(dec, _acquire)
+ATOMIC_LONG_INC_DEC_OP(dec, _release)
+
+#undef ATOMIC_LONG_INC_DEC_OP
+
+static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+{
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+
+ return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u);
+}
+
+#define atomic_long_inc_not_zero(l) \
+ ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
+
+#define atomic_long_cond_read_relaxed(v, c) \
+ ATOMIC_LONG_PFX(_cond_read_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (c))
+#define atomic_long_cond_read_acquire(v, c) \
+ ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c))
+
+#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
new file mode 100644
index 000000000..13324aa82
--- /dev/null
+++ b/include/asm-generic/atomic.h
@@ -0,0 +1,202 @@
+/*
+ * Generic C implementation of atomic counter operations. Usable on
+ * UP systems only. Do not include in machine independent code.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef __ASM_GENERIC_ATOMIC_H
+#define __ASM_GENERIC_ATOMIC_H
+
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+/*
+ * atomic_$op() - $op integer to atomic variable
+ * @i: integer value to $op
+ * @v: pointer to the atomic variable
+ *
+ * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
+ * smp_mb__{before,after}_atomic().
+ */
+
+/*
+ * atomic_$op_return() - $op interer to atomic variable and returns the result
+ * @i: integer value to $op
+ * @v: pointer to the atomic variable
+ *
+ * Atomically $ops @i to @v. Does imply a full memory barrier.
+ */
+
+#ifdef CONFIG_SMP
+
+/* we can build all atomic primitives from cmpxchg */
+
+#define ATOMIC_OP(op, c_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ int c, old; \
+ \
+ c = v->counter; \
+ while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ c = old; \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int c, old; \
+ \
+ c = v->counter; \
+ while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ c = old; \
+ \
+ return c c_op i; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ int c, old; \
+ \
+ c = v->counter; \
+ while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ c = old; \
+ \
+ return c; \
+}
+
+#else
+
+#include <linux/irqflags.h>
+
+#define ATOMIC_OP(op, c_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter = v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int ret; \
+ \
+ raw_local_irq_save(flags); \
+ ret = (v->counter = v->counter c_op i); \
+ raw_local_irq_restore(flags); \
+ \
+ return ret; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int ret; \
+ \
+ raw_local_irq_save(flags); \
+ ret = v->counter; \
+ v->counter = v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ \
+ return ret; \
+}
+
+#endif /* CONFIG_SMP */
+
+#ifndef atomic_add_return
+ATOMIC_OP_RETURN(add, +)
+#endif
+
+#ifndef atomic_sub_return
+ATOMIC_OP_RETURN(sub, -)
+#endif
+
+#ifndef atomic_fetch_add
+ATOMIC_FETCH_OP(add, +)
+#endif
+
+#ifndef atomic_fetch_sub
+ATOMIC_FETCH_OP(sub, -)
+#endif
+
+#ifndef atomic_fetch_and
+ATOMIC_FETCH_OP(and, &)
+#endif
+
+#ifndef atomic_fetch_or
+ATOMIC_FETCH_OP(or, |)
+#endif
+
+#ifndef atomic_fetch_xor
+ATOMIC_FETCH_OP(xor, ^)
+#endif
+
+#ifndef atomic_and
+ATOMIC_OP(and, &)
+#endif
+
+#ifndef atomic_or
+ATOMIC_OP(or, |)
+#endif
+
+#ifndef atomic_xor
+ATOMIC_OP(xor, ^)
+#endif
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ */
+
+#define ATOMIC_INIT(i) { (i) }
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+#ifndef atomic_read
+#define atomic_read(v) READ_ONCE((v)->counter)
+#endif
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+
+#include <linux/irqflags.h>
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ atomic_add_return(i, v);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ atomic_sub_return(i, v);
+}
+
+#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+
+#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
new file mode 100644
index 000000000..97b28b7f1
--- /dev/null
+++ b/include/asm-generic/atomic64.h
@@ -0,0 +1,60 @@
+/*
+ * Generic implementation of 64-bit atomics using spinlocks,
+ * useful on processors that don't have 64-bit atomic instructions.
+ *
+ * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_GENERIC_ATOMIC64_H
+#define _ASM_GENERIC_ATOMIC64_H
+#include <linux/types.h>
+
+typedef struct {
+ long long counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(i) { (i) }
+
+extern long long atomic64_read(const atomic64_t *v);
+extern void atomic64_set(atomic64_t *v, long long i);
+
+#define atomic64_set_release(v, i) atomic64_set((v), (i))
+
+#define ATOMIC64_OP(op) \
+extern void atomic64_##op(long long a, atomic64_t *v);
+
+#define ATOMIC64_OP_RETURN(op) \
+extern long long atomic64_##op##_return(long long a, atomic64_t *v);
+
+#define ATOMIC64_FETCH_OP(op) \
+extern long long atomic64_fetch_##op(long long a, atomic64_t *v);
+
+#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
+
+ATOMIC64_OPS(add)
+ATOMIC64_OPS(sub)
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op)
+
+ATOMIC64_OPS(and)
+ATOMIC64_OPS(or)
+ATOMIC64_OPS(xor)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+extern long long atomic64_dec_if_positive(atomic64_t *v);
+#define atomic64_dec_if_positive atomic64_dec_if_positive
+extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
+extern long long atomic64_xchg(atomic64_t *v, long long new);
+extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u);
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+
+#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h
new file mode 100644
index 000000000..331670807
--- /dev/null
+++ b/include/asm-generic/audit_change_attr.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifdef __NR_chmod
+__NR_chmod,
+#endif
+__NR_fchmod,
+#ifdef __NR_chown
+__NR_chown,
+__NR_lchown,
+#endif
+#ifdef __NR_fchown
+__NR_fchown,
+#endif
+__NR_setxattr,
+__NR_lsetxattr,
+__NR_fsetxattr,
+__NR_removexattr,
+__NR_lremovexattr,
+__NR_fremovexattr,
+#ifdef __NR_fchownat
+__NR_fchownat,
+__NR_fchmodat,
+#endif
+#ifdef __NR_chown32
+__NR_chown32,
+__NR_fchown32,
+__NR_lchown32,
+#endif
+#ifdef __NR_link
+__NR_link,
+#endif
+#ifdef __NR_linkat
+__NR_linkat,
+#endif
diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h
new file mode 100644
index 000000000..dd5a9dd7a
--- /dev/null
+++ b/include/asm-generic/audit_dir_write.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifdef __NR_rename
+__NR_rename,
+#endif
+#ifdef __NR_mkdir
+__NR_mkdir,
+#endif
+#ifdef __NR_rmdir
+__NR_rmdir,
+#endif
+#ifdef __NR_creat
+__NR_creat,
+#endif
+#ifdef __NR_link
+__NR_link,
+#endif
+#ifdef __NR_unlink
+__NR_unlink,
+#endif
+#ifdef __NR_symlink
+__NR_symlink,
+#endif
+#ifdef __NR_mknod
+__NR_mknod,
+#endif
+#ifdef __NR_mkdirat
+__NR_mkdirat,
+__NR_mknodat,
+__NR_unlinkat,
+#ifdef __NR_renameat
+__NR_renameat,
+#endif
+__NR_linkat,
+__NR_symlinkat,
+#endif
+#ifdef __NR_renameat2
+__NR_renameat2,
+#endif
diff --git a/include/asm-generic/audit_read.h b/include/asm-generic/audit_read.h
new file mode 100644
index 000000000..7bb7b5a83
--- /dev/null
+++ b/include/asm-generic/audit_read.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifdef __NR_readlink
+__NR_readlink,
+#endif
+__NR_quotactl,
+__NR_listxattr,
+__NR_llistxattr,
+__NR_flistxattr,
+__NR_getxattr,
+__NR_lgetxattr,
+__NR_fgetxattr,
+#ifdef __NR_readlinkat
+__NR_readlinkat,
+#endif
diff --git a/include/asm-generic/audit_signal.h b/include/asm-generic/audit_signal.h
new file mode 100644
index 000000000..6feab7f18
--- /dev/null
+++ b/include/asm-generic/audit_signal.h
@@ -0,0 +1,3 @@
+__NR_kill,
+__NR_tgkill,
+__NR_tkill,
diff --git a/include/asm-generic/audit_write.h b/include/asm-generic/audit_write.h
new file mode 100644
index 000000000..f9f1d0ae1
--- /dev/null
+++ b/include/asm-generic/audit_write.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm-generic/audit_dir_write.h>
+__NR_acct,
+#ifdef __NR_swapon
+__NR_swapon,
+#endif
+__NR_quotactl,
+#ifdef __NR_truncate
+__NR_truncate,
+#endif
+#ifdef __NR_truncate64
+__NR_truncate64,
+#endif
+#ifdef __NR_ftruncate
+__NR_ftruncate,
+#endif
+#ifdef __NR_ftruncate64
+__NR_ftruncate64,
+#endif
+#ifdef __NR_bind
+__NR_bind, /* bind can affect fs object only in one way... */
+#endif
+#ifdef __NR_fallocate
+__NR_fallocate,
+#endif
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
new file mode 100644
index 000000000..2cafdbb9a
--- /dev/null
+++ b/include/asm-generic/barrier.h
@@ -0,0 +1,265 @@
+/*
+ * Generic barrier definitions.
+ *
+ * It should be possible to use these on really simple architectures,
+ * but it serves more as a starting point for new ports.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef __ASM_GENERIC_BARRIER_H
+#define __ASM_GENERIC_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+
+#ifndef nop
+#define nop() asm volatile ("nop")
+#endif
+
+/*
+ * Force strict CPU ordering. And yes, this is required on UP too when we're
+ * talking to devices.
+ *
+ * Fall back to compiler barriers if nothing better is provided.
+ */
+
+#ifndef mb
+#define mb() barrier()
+#endif
+
+#ifndef rmb
+#define rmb() mb()
+#endif
+
+#ifndef wmb
+#define wmb() mb()
+#endif
+
+#ifndef dma_rmb
+#define dma_rmb() rmb()
+#endif
+
+#ifndef dma_wmb
+#define dma_wmb() wmb()
+#endif
+
+#ifndef read_barrier_depends
+#define read_barrier_depends() do { } while (0)
+#endif
+
+#ifndef __smp_mb
+#define __smp_mb() mb()
+#endif
+
+#ifndef __smp_rmb
+#define __smp_rmb() rmb()
+#endif
+
+#ifndef __smp_wmb
+#define __smp_wmb() wmb()
+#endif
+
+#ifndef __smp_read_barrier_depends
+#define __smp_read_barrier_depends() read_barrier_depends()
+#endif
+
+#ifdef CONFIG_SMP
+
+#ifndef smp_mb
+#define smp_mb() __smp_mb()
+#endif
+
+#ifndef smp_rmb
+#define smp_rmb() __smp_rmb()
+#endif
+
+#ifndef smp_wmb
+#define smp_wmb() __smp_wmb()
+#endif
+
+#ifndef smp_read_barrier_depends
+#define smp_read_barrier_depends() __smp_read_barrier_depends()
+#endif
+
+#else /* !CONFIG_SMP */
+
+#ifndef smp_mb
+#define smp_mb() barrier()
+#endif
+
+#ifndef smp_rmb
+#define smp_rmb() barrier()
+#endif
+
+#ifndef smp_wmb
+#define smp_wmb() barrier()
+#endif
+
+#ifndef smp_read_barrier_depends
+#define smp_read_barrier_depends() do { } while (0)
+#endif
+
+#endif /* CONFIG_SMP */
+
+#ifndef __smp_store_mb
+#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
+#endif
+
+#ifndef __smp_mb__before_atomic
+#define __smp_mb__before_atomic() __smp_mb()
+#endif
+
+#ifndef __smp_mb__after_atomic
+#define __smp_mb__after_atomic() __smp_mb()
+#endif
+
+#ifndef __smp_store_release
+#define __smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ __smp_mb(); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+#endif
+
+#ifndef __smp_load_acquire
+#define __smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ __smp_mb(); \
+ ___p1; \
+})
+#endif
+
+#ifdef CONFIG_SMP
+
+#ifndef smp_store_mb
+#define smp_store_mb(var, value) __smp_store_mb(var, value)
+#endif
+
+#ifndef smp_mb__before_atomic
+#define smp_mb__before_atomic() __smp_mb__before_atomic()
+#endif
+
+#ifndef smp_mb__after_atomic
+#define smp_mb__after_atomic() __smp_mb__after_atomic()
+#endif
+
+#ifndef smp_store_release
+#define smp_store_release(p, v) __smp_store_release(p, v)
+#endif
+
+#ifndef smp_load_acquire
+#define smp_load_acquire(p) __smp_load_acquire(p)
+#endif
+
+#else /* !CONFIG_SMP */
+
+#ifndef smp_store_mb
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
+#endif
+
+#ifndef smp_mb__before_atomic
+#define smp_mb__before_atomic() barrier()
+#endif
+
+#ifndef smp_mb__after_atomic
+#define smp_mb__after_atomic() barrier()
+#endif
+
+#ifndef smp_store_release
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+#endif
+
+#ifndef smp_load_acquire
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ___p1; \
+})
+#endif
+
+#endif /* CONFIG_SMP */
+
+/* Barriers for virtual machine guests when talking to an SMP host */
+#define virt_mb() __smp_mb()
+#define virt_rmb() __smp_rmb()
+#define virt_wmb() __smp_wmb()
+#define virt_read_barrier_depends() __smp_read_barrier_depends()
+#define virt_store_mb(var, value) __smp_store_mb(var, value)
+#define virt_mb__before_atomic() __smp_mb__before_atomic()
+#define virt_mb__after_atomic() __smp_mb__after_atomic()
+#define virt_store_release(p, v) __smp_store_release(p, v)
+#define virt_load_acquire(p) __smp_load_acquire(p)
+
+/**
+ * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
+ *
+ * A control dependency provides a LOAD->STORE order, the additional RMB
+ * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
+ * aka. (load)-ACQUIRE.
+ *
+ * Architectures that do not do load speculation can have this be barrier().
+ */
+#ifndef smp_acquire__after_ctrl_dep
+#define smp_acquire__after_ctrl_dep() smp_rmb()
+#endif
+
+/**
+ * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
+ * @ptr: pointer to the variable to wait on
+ * @cond: boolean expression to wait for
+ *
+ * Equivalent to using READ_ONCE() on the condition variable.
+ *
+ * Due to C lacking lambda expressions we load the value of *ptr into a
+ * pre-named variable @VAL to be used in @cond.
+ */
+#ifndef smp_cond_load_relaxed
+#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
+ typeof(ptr) __PTR = (ptr); \
+ typeof(*ptr) VAL; \
+ for (;;) { \
+ VAL = READ_ONCE(*__PTR); \
+ if (cond_expr) \
+ break; \
+ cpu_relax(); \
+ } \
+ VAL; \
+})
+#endif
+
+/**
+ * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
+ * @ptr: pointer to the variable to wait on
+ * @cond: boolean expression to wait for
+ *
+ * Equivalent to using smp_load_acquire() on the condition variable but employs
+ * the control dependency of the wait to reduce the barrier on many platforms.
+ */
+#ifndef smp_cond_load_acquire
+#define smp_cond_load_acquire(ptr, cond_expr) ({ \
+ typeof(*ptr) _val; \
+ _val = smp_cond_load_relaxed(ptr, cond_expr); \
+ smp_acquire__after_ctrl_dep(); \
+ _val; \
+})
+#endif
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
new file mode 100644
index 000000000..bfc96bf66
--- /dev/null
+++ b/include/asm-generic/bitops.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_BITOPS_H
+#define __ASM_GENERIC_BITOPS_H
+
+/*
+ * For the benefit of those who are trying to port Linux to another
+ * architecture, here are some C-language equivalents. You should
+ * recode these in the native assembly language, if at all possible.
+ *
+ * C language equivalents written by Theodore Ts'o, 9/26/92
+ */
+
+#include <linux/irqflags.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* __ASM_GENERIC_BITOPS_H */
diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h
new file mode 100644
index 000000000..39e56e1c7
--- /dev/null
+++ b/include/asm-generic/bitops/__ffs.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS___FFS_H_
+#define _ASM_GENERIC_BITOPS___FFS_H_
+
+#include <asm/types.h>
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+ int num = 0;
+
+#if BITS_PER_LONG == 64
+ if ((word & 0xffffffff) == 0) {
+ num += 32;
+ word >>= 32;
+ }
+#endif
+ if ((word & 0xffff) == 0) {
+ num += 16;
+ word >>= 16;
+ }
+ if ((word & 0xff) == 0) {
+ num += 8;
+ word >>= 8;
+ }
+ if ((word & 0xf) == 0) {
+ num += 4;
+ word >>= 4;
+ }
+ if ((word & 0x3) == 0) {
+ num += 2;
+ word >>= 2;
+ }
+ if ((word & 0x1) == 0)
+ num += 1;
+ return num;
+}
+
+#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */
diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
new file mode 100644
index 000000000..03f721a8a
--- /dev/null
+++ b/include/asm-generic/bitops/__fls.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS___FLS_H_
+#define _ASM_GENERIC_BITOPS___FLS_H_
+
+#include <asm/types.h>
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __fls(unsigned long word)
+{
+ int num = BITS_PER_LONG - 1;
+
+#if BITS_PER_LONG == 64
+ if (!(word & (~0ul << 32))) {
+ num -= 32;
+ word <<= 32;
+ }
+#endif
+ if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
+ num -= 16;
+ word <<= 16;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
+ num -= 8;
+ word <<= 8;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
+ num -= 4;
+ word <<= 4;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
+ num -= 2;
+ word <<= 2;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-1))))
+ num -= 1;
+ return num;
+}
+
+#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */
diff --git a/include/asm-generic/bitops/arch_hweight.h b/include/asm-generic/bitops/arch_hweight.h
new file mode 100644
index 000000000..c2705e1d2
--- /dev/null
+++ b/include/asm-generic/bitops/arch_hweight.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_
+#define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_
+
+#include <asm/types.h>
+
+static inline unsigned int __arch_hweight32(unsigned int w)
+{
+ return __sw_hweight32(w);
+}
+
+static inline unsigned int __arch_hweight16(unsigned int w)
+{
+ return __sw_hweight16(w);
+}
+
+static inline unsigned int __arch_hweight8(unsigned int w)
+{
+ return __sw_hweight8(w);
+}
+
+static inline unsigned long __arch_hweight64(__u64 w)
+{
+ return __sw_hweight64(w);
+}
+#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
new file mode 100644
index 000000000..dd90c9792
--- /dev/null
+++ b/include/asm-generic/bitops/atomic.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
+#define _ASM_GENERIC_BITOPS_ATOMIC_H_
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
+/*
+ * Implementation of atomic bitops using atomic-fetch ops.
+ * See Documentation/atomic_bitops.txt for details.
+ */
+
+static inline void set_bit(unsigned int nr, volatile unsigned long *p)
+{
+ p += BIT_WORD(nr);
+ atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
+}
+
+static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
+{
+ p += BIT_WORD(nr);
+ atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
+}
+
+static inline void change_bit(unsigned int nr, volatile unsigned long *p)
+{
+ p += BIT_WORD(nr);
+ atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
+}
+
+static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ if (READ_ONCE(*p) & mask)
+ return 1;
+
+ old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
+ return !!(old & mask);
+}
+
+static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ if (!(READ_ONCE(*p) & mask))
+ return 0;
+
+ old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+ return !!(old & mask);
+}
+
+static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+ return !!(old & mask);
+}
+
+#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
diff --git a/include/asm-generic/bitops/builtin-__ffs.h b/include/asm-generic/bitops/builtin-__ffs.h
new file mode 100644
index 000000000..87024da44
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-__ffs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_
+#define _ASM_GENERIC_BITOPS_BUILTIN___FFS_H_
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+ return __builtin_ctzl(word);
+}
+
+#endif
diff --git a/include/asm-generic/bitops/builtin-__fls.h b/include/asm-generic/bitops/builtin-__fls.h
new file mode 100644
index 000000000..43a5aa9af
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-__fls.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_
+#define _ASM_GENERIC_BITOPS_BUILTIN___FLS_H_
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __fls(unsigned long word)
+{
+ return (sizeof(word) * 8) - 1 - __builtin_clzl(word);
+}
+
+#endif
diff --git a/include/asm-generic/bitops/builtin-ffs.h b/include/asm-generic/bitops/builtin-ffs.h
new file mode 100644
index 000000000..458c85ebc
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-ffs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_
+#define _ASM_GENERIC_BITOPS_BUILTIN_FFS_H_
+
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static __always_inline int ffs(int x)
+{
+ return __builtin_ffs(x);
+}
+
+#endif
diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h
new file mode 100644
index 000000000..62daf9409
--- /dev/null
+++ b/include/asm-generic/bitops/builtin-fls.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_
+#define _ASM_GENERIC_BITOPS_BUILTIN_FLS_H_
+
+/**
+ * fls - find last (most-significant) bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+static __always_inline int fls(int x)
+{
+ return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
+}
+
+#endif
diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h
new file mode 100644
index 000000000..149faeeee
--- /dev/null
+++ b/include/asm-generic/bitops/const_hweight.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
+#define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
+
+/*
+ * Compile time versions of __arch_hweightN()
+ */
+#define __const_hweight8(w) \
+ ((unsigned int) \
+ ((!!((w) & (1ULL << 0))) + \
+ (!!((w) & (1ULL << 1))) + \
+ (!!((w) & (1ULL << 2))) + \
+ (!!((w) & (1ULL << 3))) + \
+ (!!((w) & (1ULL << 4))) + \
+ (!!((w) & (1ULL << 5))) + \
+ (!!((w) & (1ULL << 6))) + \
+ (!!((w) & (1ULL << 7)))))
+
+#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 ))
+#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16))
+#define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32))
+
+/*
+ * Generic interface.
+ */
+#define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w))
+#define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w))
+#define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w))
+#define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w))
+
+/*
+ * Interface for known constant arguments
+ */
+#define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w))
+#define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w))
+#define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w))
+#define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w))
+
+/*
+ * Type invariant interface to the compile time constant hweight functions.
+ */
+#define HWEIGHT(w) HWEIGHT64((u64)w)
+
+#endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */
diff --git a/include/asm-generic/bitops/ext2-atomic-setbit.h b/include/asm-generic/bitops/ext2-atomic-setbit.h
new file mode 100644
index 000000000..b041cbf0d
--- /dev/null
+++ b/include/asm-generic/bitops/ext2-atomic-setbit.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_
+#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_
+
+/*
+ * Atomic bitops based version of ext2 atomic bitops
+ */
+
+#define ext2_set_bit_atomic(l, nr, addr) test_and_set_bit_le(nr, addr)
+#define ext2_clear_bit_atomic(l, nr, addr) test_and_clear_bit_le(nr, addr)
+
+#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ */
diff --git a/include/asm-generic/bitops/ext2-atomic.h b/include/asm-generic/bitops/ext2-atomic.h
new file mode 100644
index 000000000..0cfc3180b
--- /dev/null
+++ b/include/asm-generic/bitops/ext2-atomic.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
+#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
+
+/*
+ * Spinlock based version of ext2 atomic bitops
+ */
+
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ ({ \
+ int ret; \
+ spin_lock(lock); \
+ ret = __test_and_set_bit_le(nr, addr); \
+ spin_unlock(lock); \
+ ret; \
+ })
+
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ ({ \
+ int ret; \
+ spin_lock(lock); \
+ ret = __test_and_clear_bit_le(nr, addr); \
+ spin_unlock(lock); \
+ ret; \
+ })
+
+#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h
new file mode 100644
index 000000000..e81868b2c
--- /dev/null
+++ b/include/asm-generic/bitops/ffs.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_FFS_H_
+#define _ASM_GENERIC_BITOPS_FFS_H_
+
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static inline int ffs(int x)
+{
+ int r = 1;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff)) {
+ x >>= 16;
+ r += 16;
+ }
+ if (!(x & 0xff)) {
+ x >>= 8;
+ r += 8;
+ }
+ if (!(x & 0xf)) {
+ x >>= 4;
+ r += 4;
+ }
+ if (!(x & 3)) {
+ x >>= 2;
+ r += 2;
+ }
+ if (!(x & 1)) {
+ x >>= 1;
+ r += 1;
+ }
+ return r;
+}
+
+#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */
diff --git a/include/asm-generic/bitops/ffz.h b/include/asm-generic/bitops/ffz.h
new file mode 100644
index 000000000..0d010085f
--- /dev/null
+++ b/include/asm-generic/bitops/ffz.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
+#define _ASM_GENERIC_BITOPS_FFZ_H_
+
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+#define ffz(x) __ffs(~(x))
+
+#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
new file mode 100644
index 000000000..8a1ee1001
--- /dev/null
+++ b/include/asm-generic/bitops/find.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_FIND_H_
+#define _ASM_GENERIC_BITOPS_FIND_H_
+
+#ifndef find_next_bit
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
+ size, unsigned long offset);
+#endif
+
+#ifndef find_next_and_bit
+/**
+ * find_next_and_bit - find the next set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+extern unsigned long find_next_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset);
+#endif
+
+#ifndef find_next_zero_bit
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number of the next zero bit
+ * If no bits are zero, returns @size.
+ */
+extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+#endif
+
+#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first set bit.
+ * If no bits are set, returns @size.
+ */
+extern unsigned long find_first_bit(const unsigned long *addr,
+ unsigned long size);
+
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first cleared bit.
+ * If no bits are zero, returns @size.
+ */
+extern unsigned long find_first_zero_bit(const unsigned long *addr,
+ unsigned long size);
+#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
+
+#ifndef find_first_bit
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+#endif
+#ifndef find_first_zero_bit
+#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
+#endif
+
+#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
+
+#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
new file mode 100644
index 000000000..753aecaab
--- /dev/null
+++ b/include/asm-generic/bitops/fls.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_FLS_H_
+#define _ASM_GENERIC_BITOPS_FLS_H_
+
+/**
+ * fls - find last (most-significant) bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+
+static __always_inline int fls(int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
+#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
new file mode 100644
index 000000000..866f2b230
--- /dev/null
+++ b/include/asm-generic/bitops/fls64.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
+#define _ASM_GENERIC_BITOPS_FLS64_H_
+
+#include <asm/types.h>
+
+/**
+ * fls64 - find last set bit in a 64-bit word
+ * @x: the word to search
+ *
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffsll, but returns the position of the most significant set bit.
+ *
+ * fls64(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 64.
+ */
+#if BITS_PER_LONG == 32
+static __always_inline int fls64(__u64 x)
+{
+ __u32 h = x >> 32;
+ if (h)
+ return fls(h) + 32;
+ return fls(x);
+}
+#elif BITS_PER_LONG == 64
+static __always_inline int fls64(__u64 x)
+{
+ if (x == 0)
+ return 0;
+ return __fls(x) + 1;
+}
+#else
+#error BITS_PER_LONG not 32 or 64
+#endif
+
+#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h
new file mode 100644
index 000000000..6bf1bba83
--- /dev/null
+++ b/include/asm-generic/bitops/hweight.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
+#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
+
+#include <asm-generic/bitops/arch_hweight.h>
+#include <asm-generic/bitops/const_hweight.h>
+
+#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
new file mode 100644
index 000000000..188d3eba3
--- /dev/null
+++ b/include/asm-generic/bitops/le.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_LE_H_
+#define _ASM_GENERIC_BITOPS_LE_H_
+
+#include <asm/types.h>
+#include <asm/byteorder.h>
+
+#if defined(__LITTLE_ENDIAN)
+
+#define BITOP_LE_SWIZZLE 0
+
+static inline unsigned long find_next_zero_bit_le(const void *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_zero_bit(addr, size, offset);
+}
+
+static inline unsigned long find_next_bit_le(const void *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_bit(addr, size, offset);
+}
+
+static inline unsigned long find_first_zero_bit_le(const void *addr,
+ unsigned long size)
+{
+ return find_first_zero_bit(addr, size);
+}
+
+#elif defined(__BIG_ENDIAN)
+
+#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
+
+#ifndef find_next_zero_bit_le
+extern unsigned long find_next_zero_bit_le(const void *addr,
+ unsigned long size, unsigned long offset);
+#endif
+
+#ifndef find_next_bit_le
+extern unsigned long find_next_bit_le(const void *addr,
+ unsigned long size, unsigned long offset);
+#endif
+
+#ifndef find_first_zero_bit_le
+#define find_first_zero_bit_le(addr, size) \
+ find_next_zero_bit_le((addr), (size), 0)
+#endif
+
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+
+static inline int test_bit_le(int nr, const void *addr)
+{
+ return test_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline void set_bit_le(int nr, void *addr)
+{
+ set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline void clear_bit_le(int nr, void *addr)
+{
+ clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline void __set_bit_le(int nr, void *addr)
+{
+ __set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline void __clear_bit_le(int nr, void *addr)
+{
+ __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline int test_and_set_bit_le(int nr, void *addr)
+{
+ return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline int test_and_clear_bit_le(int nr, void *addr)
+{
+ return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline int __test_and_set_bit_le(int nr, void *addr)
+{
+ return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline int __test_and_clear_bit_le(int nr, void *addr)
+{
+ return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+#endif /* _ASM_GENERIC_BITOPS_LE_H_ */
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
new file mode 100644
index 000000000..3ae021368
--- /dev/null
+++ b/include/asm-generic/bitops/lock.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
+#define _ASM_GENERIC_BITOPS_LOCK_H_
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and provides acquire barrier semantics if
+ * the returned value is 0.
+ * It can be used to implement bit locks.
+ */
+static inline int test_and_set_bit_lock(unsigned int nr,
+ volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ if (READ_ONCE(*p) & mask)
+ return 1;
+
+ old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+ return !!(old & mask);
+}
+
+
+/**
+ * clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ */
+static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+{
+ p += BIT_WORD(nr);
+ atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+}
+
+/**
+ * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
+ * the bits in the word are protected by this lock some archs can use weaker
+ * ops to safely unlock.
+ *
+ * See for example x86's implementation.
+ */
+static inline void __clear_bit_unlock(unsigned int nr,
+ volatile unsigned long *p)
+{
+ unsigned long old;
+
+ p += BIT_WORD(nr);
+ old = READ_ONCE(*p);
+ old &= ~BIT_MASK(nr);
+ atomic_long_set_release((atomic_long_t *)p, old);
+}
+
+/**
+ * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ * byte is negative, for unlock.
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * This is a bit of a one-trick-pony for the filemap code, which clears
+ * PG_locked and tests PG_waiters,
+ */
+#ifndef clear_bit_unlock_is_negative_byte
+static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr,
+ volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
+ return !!(old & BIT(7));
+}
+#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+#endif
+
+#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h
new file mode 100644
index 000000000..7e10c4b50
--- /dev/null
+++ b/include/asm-generic/bitops/non-atomic.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
+#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
+
+#include <asm/types.h>
+
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p |= mask;
+}
+
+static inline void __clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p &= ~mask;
+}
+
+/**
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __change_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p ^= mask;
+}
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+/* WARNING: non atomic and it can be reordered! */
+static inline int __test_and_change_bit(int nr,
+ volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline int test_bit(int nr, const volatile unsigned long *addr)
+{
+ return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+}
+
+#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h
new file mode 100644
index 000000000..86470cfce
--- /dev/null
+++ b/include/asm-generic/bitops/sched.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BITOPS_SCHED_H_
+#define _ASM_GENERIC_BITOPS_SCHED_H_
+
+#include <linux/compiler.h> /* unlikely() */
+#include <asm/types.h>
+
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 100-bit bitmap. It's guaranteed that at least
+ * one of the 100 bits is cleared.
+ */
+static inline int sched_find_first_bit(const unsigned long *b)
+{
+#if BITS_PER_LONG == 64
+ if (b[0])
+ return __ffs(b[0]);
+ return __ffs(b[1]) + 64;
+#elif BITS_PER_LONG == 32
+ if (b[0])
+ return __ffs(b[0]);
+ if (b[1])
+ return __ffs(b[1]) + 32;
+ if (b[2])
+ return __ffs(b[2]) + 64;
+ return __ffs(b[3]) + 96;
+#else
+#error BITS_PER_LONG not defined
+#endif
+}
+
+#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */
diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h
new file mode 100644
index 000000000..3905c1c93
--- /dev/null
+++ b/include/asm-generic/bitsperlong.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_BITS_PER_LONG
+#define __ASM_GENERIC_BITS_PER_LONG
+
+#include <uapi/asm-generic/bitsperlong.h>
+
+
+#ifdef CONFIG_64BIT
+#define BITS_PER_LONG 64
+#else
+#define BITS_PER_LONG 32
+#endif /* CONFIG_64BIT */
+
+/*
+ * FIXME: The check currently breaks x86-64 build, so it's
+ * temporarily disabled. Please fix x86-64 and reenable
+ */
+#if 0 && BITS_PER_LONG != __BITS_PER_LONG
+#error Inconsistent word size. Check asm/bitsperlong.h
+#endif
+
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG 64
+#endif
+
+#endif /* __ASM_GENERIC_BITS_PER_LONG */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
new file mode 100644
index 000000000..d4fb510a4
--- /dev/null
+++ b/include/asm-generic/bug.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_BUG_H
+#define _ASM_GENERIC_BUG_H
+
+#include <linux/compiler.h>
+
+#define CUT_HERE "------------[ cut here ]------------\n"
+
+#ifdef CONFIG_GENERIC_BUG
+#define BUGFLAG_WARNING (1 << 0)
+#define BUGFLAG_ONCE (1 << 1)
+#define BUGFLAG_DONE (1 << 2)
+#define BUGFLAG_TAINT(taint) ((taint) << 8)
+#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
+#endif
+
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+
+#ifdef CONFIG_BUG
+
+#ifdef CONFIG_GENERIC_BUG
+struct bug_entry {
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ unsigned long bug_addr;
+#else
+ signed int bug_addr_disp;
+#endif
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ const char *file;
+#else
+ signed int file_disp;
+#endif
+ unsigned short line;
+#endif
+ unsigned short flags;
+};
+#endif /* CONFIG_GENERIC_BUG */
+
+/*
+ * Don't use BUG() or BUG_ON() unless there's really no way out; one
+ * example might be detecting data structure corruption in the middle
+ * of an operation that can't be backed out of. If the (sub)system
+ * can somehow continue operating, perhaps with reduced functionality,
+ * it's probably not BUG-worthy.
+ *
+ * If you're tempted to BUG(), think again: is completely giving up
+ * really the *only* solution? There are usually better options, where
+ * users don't need to reboot ASAP and can mostly shut down cleanly.
+ */
+#ifndef HAVE_ARCH_BUG
+#define BUG() do { \
+ printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
+ barrier_before_unreachable(); \
+ panic("BUG!"); \
+} while (0)
+#endif
+
+#ifndef HAVE_ARCH_BUG_ON
+#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
+#endif
+
+#ifdef __WARN_FLAGS
+#define __WARN_TAINT(taint) __WARN_FLAGS(BUGFLAG_TAINT(taint))
+#define __WARN_ONCE_TAINT(taint) __WARN_FLAGS(BUGFLAG_ONCE|BUGFLAG_TAINT(taint))
+
+#define WARN_ON_ONCE(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_ONCE_TAINT(TAINT_WARN); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+/*
+ * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
+ * significant kernel issues that need prompt attention if they should ever
+ * appear at runtime.
+ *
+ * Do not use these macros when checking for invalid external inputs
+ * (e.g. invalid system call arguments, or invalid data coming from
+ * network/devices), and on transient conditions like ENOMEM or EAGAIN.
+ * These macros should be used for recoverable kernel issues only.
+ * For invalid external inputs, transient conditions, etc use
+ * pr_err[_once/_ratelimited]() followed by dump_stack(), if necessary.
+ * Do not include "BUG"/"WARNING" in format strings manually to make these
+ * conditions distinguishable from kernel issues.
+ *
+ * Use the versions with printk format strings to provide better diagnostics.
+ */
+#ifndef __WARN_TAINT
+extern __printf(3, 4)
+void warn_slowpath_fmt(const char *file, const int line,
+ const char *fmt, ...);
+extern __printf(4, 5)
+void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint,
+ const char *fmt, ...);
+extern void warn_slowpath_null(const char *file, const int line);
+#define WANT_WARN_ON_SLOWPATH
+#define __WARN() warn_slowpath_null(__FILE__, __LINE__)
+#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg)
+#define __WARN_printf_taint(taint, arg...) \
+ warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
+#else
+extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
+#define __WARN() do { \
+ printk(KERN_WARNING CUT_HERE); __WARN_TAINT(TAINT_WARN); \
+} while (0)
+#define __WARN_printf(arg...) __WARN_printf_taint(TAINT_WARN, arg)
+#define __WARN_printf_taint(taint, arg...) \
+ do { __warn_printk(arg); __WARN_TAINT(taint); } while (0)
+#endif
+
+/* used internally by panic.c */
+struct warn_args;
+struct pt_regs;
+
+void __warn(const char *file, int line, void *caller, unsigned taint,
+ struct pt_regs *regs, struct warn_args *args);
+
+#ifndef WARN_ON
+#define WARN_ON(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN(); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#ifndef WARN
+#define WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_printf(format); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#define WARN_TAINT(condition, taint, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_printf_taint(taint, format); \
+ unlikely(__ret_warn_on); \
+})
+
+#ifndef WARN_ON_ONCE
+#define WARN_ON_ONCE(condition) ({ \
+ static bool __section(.data.once) __warned; \
+ int __ret_warn_once = !!(condition); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
+#endif
+
+#define WARN_ONCE(condition, format...) ({ \
+ static bool __section(.data.once) __warned; \
+ int __ret_warn_once = !!(condition); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ __warned = true; \
+ WARN(1, format); \
+ } \
+ unlikely(__ret_warn_once); \
+})
+
+#define WARN_TAINT_ONCE(condition, taint, format...) ({ \
+ static bool __section(.data.once) __warned; \
+ int __ret_warn_once = !!(condition); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ __warned = true; \
+ WARN_TAINT(1, taint, format); \
+ } \
+ unlikely(__ret_warn_once); \
+})
+
+#else /* !CONFIG_BUG */
+#ifndef HAVE_ARCH_BUG
+#define BUG() do {} while (1)
+#endif
+
+#ifndef HAVE_ARCH_BUG_ON
+#define BUG_ON(condition) do { if (condition) BUG(); } while (0)
+#endif
+
+#ifndef HAVE_ARCH_WARN_ON
+#define WARN_ON(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#ifndef WARN
+#define WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ no_printk(format); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#define WARN_ON_ONCE(condition) WARN_ON(condition)
+#define WARN_ONCE(condition, format...) WARN(condition, format)
+#define WARN_TAINT(condition, taint, format...) WARN(condition, format)
+#define WARN_TAINT_ONCE(condition, taint, format...) WARN(condition, format)
+
+#endif
+
+/*
+ * WARN_ON_SMP() is for cases that the warning is either
+ * meaningless for !SMP or may even cause failures.
+ * This is usually used for cases that we have
+ * WARN_ON(!spin_is_locked(&lock)) checks, as spin_is_locked()
+ * returns 0 for uniprocessor settings.
+ * It can also be used with values that are only defined
+ * on SMP:
+ *
+ * struct foo {
+ * [...]
+ * #ifdef CONFIG_SMP
+ * int bar;
+ * #endif
+ * };
+ *
+ * void func(struct foo *zoot)
+ * {
+ * WARN_ON_SMP(!zoot->bar);
+ *
+ * For CONFIG_SMP, WARN_ON_SMP() should act the same as WARN_ON(),
+ * and should be a nop and return false for uniprocessor.
+ *
+ * if (WARN_ON_SMP(x)) returns true only when CONFIG_SMP is set
+ * and x is true.
+ */
+#ifdef CONFIG_SMP
+# define WARN_ON_SMP(x) WARN_ON(x)
+#else
+/*
+ * Use of ({0;}) because WARN_ON_SMP(x) may be used either as
+ * a stand alone line statement or as a condition in an if ()
+ * statement.
+ * A simple "0" would cause gcc to give a "statement has no effect"
+ * warning.
+ */
+# define WARN_ON_SMP(x) ({0;})
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/include/asm-generic/bugs.h b/include/asm-generic/bugs.h
new file mode 100644
index 000000000..69021830f
--- /dev/null
+++ b/include/asm-generic/bugs.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_BUGS_H
+#define __ASM_GENERIC_BUGS_H
+/*
+ * This file is included by 'init/main.c' to check for
+ * architecture-dependent bugs.
+ */
+
+static inline void check_bugs(void) { }
+
+#endif /* __ASM_GENERIC_BUGS_H */
diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
new file mode 100644
index 000000000..60386e164
--- /dev/null
+++ b/include/asm-generic/cache.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_CACHE_H
+#define __ASM_GENERIC_CACHE_H
+/*
+ * 32 bytes appears to be the most common cache line size,
+ * so make that the default here. Architectures with larger
+ * cache lines need to provide their own cache.h.
+ */
+
+#define L1_CACHE_SHIFT 5
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#endif /* __ASM_GENERIC_CACHE_H */
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
new file mode 100644
index 000000000..0dd47a6db
--- /dev/null
+++ b/include/asm-generic/cacheflush.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_CACHEFLUSH_H
+#define __ASM_CACHEFLUSH_H
+
+/* Keep includes the same across arches. */
+#include <linux/mm.h>
+
+/*
+ * The cache doesn't need to be flushed when TLB entries change when
+ * the cache is mapped to physical memory, not virtual memory
+ */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+#define flush_icache_range(start, end) do { } while (0)
+#define flush_icache_page(vma,pg) do { } while (0)
+#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ flush_icache_user_range(vma, page, vaddr, len); \
+ } while (0)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+#endif /* __ASM_CACHEFLUSH_H */
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
new file mode 100644
index 000000000..34785c0f5
--- /dev/null
+++ b/include/asm-generic/checksum.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_CHECKSUM_H
+#define __ASM_GENERIC_CHECKSUM_H
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum);
+
+/*
+ * the same as csum_partial_copy, but copies from user space.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *csum_err);
+
+#ifndef csum_partial_copy_nocheck
+#define csum_partial_copy_nocheck(src, dst, len, sum) \
+ csum_partial_copy((src), (dst), (len), (sum))
+#endif
+
+#ifndef ip_fast_csum
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+#endif
+
+#ifndef csum_fold
+/*
+ * Fold a partial checksum
+ */
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+ sum = (sum & 0xffff) + (sum >> 16);
+ sum = (sum & 0xffff) + (sum >> 16);
+ return (__force __sum16)~sum;
+}
+#endif
+
+#ifndef csum_tcpudp_nofold
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+extern __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum);
+#endif
+
+#ifndef csum_tcpudp_magic
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+#endif
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+extern __sum16 ip_compute_csum(const void *buff, int len);
+
+#endif /* __ASM_GENERIC_CHECKSUM_H */
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
new file mode 100644
index 000000000..f17f14f84
--- /dev/null
+++ b/include/asm-generic/cmpxchg-local.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_CMPXCHG_LOCAL_H
+#define __ASM_GENERIC_CMPXCHG_LOCAL_H
+
+#include <linux/types.h>
+#include <linux/irqflags.h>
+
+extern unsigned long wrong_size_cmpxchg(volatile void *ptr)
+ __noreturn;
+
+/*
+ * Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned
+ * long parameter, supporting various types of architectures.
+ */
+static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
+ unsigned long old, unsigned long new, int size)
+{
+ unsigned long flags, prev;
+
+ /*
+ * Sanity checking, compile-time.
+ */
+ if (size == 8 && sizeof(unsigned long) != 8)
+ wrong_size_cmpxchg(ptr);
+
+ raw_local_irq_save(flags);
+ switch (size) {
+ case 1: prev = *(u8 *)ptr;
+ if (prev == old)
+ *(u8 *)ptr = (u8)new;
+ break;
+ case 2: prev = *(u16 *)ptr;
+ if (prev == old)
+ *(u16 *)ptr = (u16)new;
+ break;
+ case 4: prev = *(u32 *)ptr;
+ if (prev == old)
+ *(u32 *)ptr = (u32)new;
+ break;
+ case 8: prev = *(u64 *)ptr;
+ if (prev == old)
+ *(u64 *)ptr = (u64)new;
+ break;
+ default:
+ wrong_size_cmpxchg(ptr);
+ }
+ raw_local_irq_restore(flags);
+ return prev;
+}
+
+/*
+ * Generic version of __cmpxchg64_local. Takes an u64 parameter.
+ */
+static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
+ u64 old, u64 new)
+{
+ u64 prev;
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ prev = *(u64 *)ptr;
+ if (prev == old)
+ *(u64 *)ptr = new;
+ raw_local_irq_restore(flags);
+ return prev;
+}
+
+#endif
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
new file mode 100644
index 000000000..9a24510cd
--- /dev/null
+++ b/include/asm-generic/cmpxchg.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Generic UP xchg and cmpxchg using interrupt disablement. Does not
+ * support SMP.
+ */
+
+#ifndef __ASM_GENERIC_CMPXCHG_H
+#define __ASM_GENERIC_CMPXCHG_H
+
+#ifdef CONFIG_SMP
+#error "Cannot use generic cmpxchg on SMP"
+#endif
+
+#include <linux/types.h>
+#include <linux/irqflags.h>
+
+#ifndef xchg
+
+/*
+ * This function doesn't exist, so you'll get a linker error if
+ * something tries to do an invalidly-sized xchg().
+ */
+extern void __xchg_called_with_bad_pointer(void);
+
+static inline
+unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+{
+ unsigned long ret, flags;
+
+ switch (size) {
+ case 1:
+#ifdef __xchg_u8
+ return __xchg_u8(x, ptr);
+#else
+ local_irq_save(flags);
+ ret = *(volatile u8 *)ptr;
+ *(volatile u8 *)ptr = x;
+ local_irq_restore(flags);
+ return ret;
+#endif /* __xchg_u8 */
+
+ case 2:
+#ifdef __xchg_u16
+ return __xchg_u16(x, ptr);
+#else
+ local_irq_save(flags);
+ ret = *(volatile u16 *)ptr;
+ *(volatile u16 *)ptr = x;
+ local_irq_restore(flags);
+ return ret;
+#endif /* __xchg_u16 */
+
+ case 4:
+#ifdef __xchg_u32
+ return __xchg_u32(x, ptr);
+#else
+ local_irq_save(flags);
+ ret = *(volatile u32 *)ptr;
+ *(volatile u32 *)ptr = x;
+ local_irq_restore(flags);
+ return ret;
+#endif /* __xchg_u32 */
+
+#ifdef CONFIG_64BIT
+ case 8:
+#ifdef __xchg_u64
+ return __xchg_u64(x, ptr);
+#else
+ local_irq_save(flags);
+ ret = *(volatile u64 *)ptr;
+ *(volatile u64 *)ptr = x;
+ local_irq_restore(flags);
+ return ret;
+#endif /* __xchg_u64 */
+#endif /* CONFIG_64BIT */
+
+ default:
+ __xchg_called_with_bad_pointer();
+ return x;
+ }
+}
+
+#define xchg(ptr, x) ({ \
+ ((__typeof__(*(ptr))) \
+ __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
+})
+
+#endif /* xchg */
+
+/*
+ * Atomic compare and exchange.
+ */
+#include <asm-generic/cmpxchg-local.h>
+
+#ifndef cmpxchg_local
+#define cmpxchg_local(ptr, o, n) ({ \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr)))); \
+})
+#endif
+
+#ifndef cmpxchg64_local
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#endif
+
+#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+
+#endif /* __ASM_GENERIC_CMPXCHG_H */
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
new file mode 100644
index 000000000..28819451b
--- /dev/null
+++ b/include/asm-generic/compat.h
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* This is an empty stub for 32-bit-only architectures */
diff --git a/include/asm-generic/current.h b/include/asm-generic/current.h
new file mode 100644
index 000000000..3a2e224b9
--- /dev/null
+++ b/include/asm-generic/current.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_CURRENT_H
+#define __ASM_GENERIC_CURRENT_H
+
+#include <linux/thread_info.h>
+
+#define get_current() (current_thread_info()->task)
+#define current get_current()
+
+#endif /* __ASM_GENERIC_CURRENT_H */
diff --git a/include/asm-generic/delay.h b/include/asm-generic/delay.h
new file mode 100644
index 000000000..e448ac614
--- /dev/null
+++ b/include/asm-generic/delay.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_DELAY_H
+#define __ASM_GENERIC_DELAY_H
+
+/* Undefined functions to get compile-time errors */
+extern void __bad_udelay(void);
+extern void __bad_ndelay(void);
+
+extern void __udelay(unsigned long usecs);
+extern void __ndelay(unsigned long nsecs);
+extern void __const_udelay(unsigned long xloops);
+extern void __delay(unsigned long loops);
+
+/*
+ * The weird n/20000 thing suppresses a "comparison is always false due to
+ * limited range of data type" warning with non-const 8-bit arguments.
+ */
+
+/* 0x10c7 is 2**32 / 1000000 (rounded up) */
+#define udelay(n) \
+ ({ \
+ if (__builtin_constant_p(n)) { \
+ if ((n) / 20000 >= 1) \
+ __bad_udelay(); \
+ else \
+ __const_udelay((n) * 0x10c7ul); \
+ } else { \
+ __udelay(n); \
+ } \
+ })
+
+/* 0x5 is 2**32 / 1000000000 (rounded up) */
+#define ndelay(n) \
+ ({ \
+ if (__builtin_constant_p(n)) { \
+ if ((n) / 20000 >= 1) \
+ __bad_ndelay(); \
+ else \
+ __const_udelay((n) * 5ul); \
+ } else { \
+ __ndelay(n); \
+ } \
+ })
+
+#endif /* __ASM_GENERIC_DELAY_H */
diff --git a/include/asm-generic/device.h b/include/asm-generic/device.h
new file mode 100644
index 000000000..d7c76bba6
--- /dev/null
+++ b/include/asm-generic/device.h
@@ -0,0 +1,15 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#ifndef _ASM_GENERIC_DEVICE_H
+#define _ASM_GENERIC_DEVICE_H
+
+struct dev_archdata {
+};
+
+struct pdev_archdata {
+};
+
+#endif /* _ASM_GENERIC_DEVICE_H */
diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h
new file mode 100644
index 000000000..dc9726fda
--- /dev/null
+++ b/include/asm-generic/div64.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_DIV64_H
+#define _ASM_GENERIC_DIV64_H
+/*
+ * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
+ * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
+ *
+ * Optimization for constant divisors on 32-bit machines:
+ * Copyright (C) 2006-2015 Nicolas Pitre
+ *
+ * The semantics of do_div() are:
+ *
+ * uint32_t do_div(uint64_t *n, uint32_t base)
+ * {
+ * uint32_t remainder = *n % base;
+ * *n = *n / base;
+ * return remainder;
+ * }
+ *
+ * NOTE: macro parameter n is evaluated multiple times,
+ * beware of side effects!
+ */
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+#if BITS_PER_LONG == 64
+
+/**
+ * do_div - returns 2 values: calculate remainder and update new dividend
+ * @n: pointer to uint64_t dividend (will be updated)
+ * @base: uint32_t divisor
+ *
+ * Summary:
+ * ``uint32_t remainder = *n % base;``
+ * ``*n = *n / base;``
+ *
+ * Return: (uint32_t)remainder
+ *
+ * NOTE: macro parameter @n is evaluated multiple times,
+ * beware of side effects!
+ */
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ __rem = ((uint64_t)(n)) % __base; \
+ (n) = ((uint64_t)(n)) / __base; \
+ __rem; \
+ })
+
+#elif BITS_PER_LONG == 32
+
+#include <linux/log2.h>
+
+/*
+ * If the divisor happens to be constant, we determine the appropriate
+ * inverse at compile time to turn the division into a few inline
+ * multiplications which ought to be much faster. And yet only if compiling
+ * with a sufficiently recent gcc version to perform proper 64-bit constant
+ * propagation.
+ *
+ * (It is unfortunate that gcc doesn't perform all this internally.)
+ */
+
+#ifndef __div64_const32_is_OK
+#define __div64_const32_is_OK (__GNUC__ >= 4)
+#endif
+
+#define __div64_const32(n, ___b) \
+({ \
+ /* \
+ * Multiplication by reciprocal of b: n / b = n * (p / b) / p \
+ * \
+ * We rely on the fact that most of this code gets optimized \
+ * away at compile time due to constant propagation and only \
+ * a few multiplication instructions should remain. \
+ * Hence this monstrous macro (static inline doesn't always \
+ * do the trick here). \
+ */ \
+ uint64_t ___res, ___x, ___t, ___m, ___n = (n); \
+ uint32_t ___p, ___bias; \
+ \
+ /* determine MSB of b */ \
+ ___p = 1 << ilog2(___b); \
+ \
+ /* compute m = ((p << 64) + b - 1) / b */ \
+ ___m = (~0ULL / ___b) * ___p; \
+ ___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b; \
+ \
+ /* one less than the dividend with highest result */ \
+ ___x = ~0ULL / ___b * ___b - 1; \
+ \
+ /* test our ___m with res = m * x / (p << 64) */ \
+ ___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32; \
+ ___t = ___res += (___m & 0xffffffff) * (___x >> 32); \
+ ___res += (___x & 0xffffffff) * (___m >> 32); \
+ ___t = (___res < ___t) ? (1ULL << 32) : 0; \
+ ___res = (___res >> 32) + ___t; \
+ ___res += (___m >> 32) * (___x >> 32); \
+ ___res /= ___p; \
+ \
+ /* Now sanitize and optimize what we've got. */ \
+ if (~0ULL % (___b / (___b & -___b)) == 0) { \
+ /* special case, can be simplified to ... */ \
+ ___n /= (___b & -___b); \
+ ___m = ~0ULL / (___b / (___b & -___b)); \
+ ___p = 1; \
+ ___bias = 1; \
+ } else if (___res != ___x / ___b) { \
+ /* \
+ * We can't get away without a bias to compensate \
+ * for bit truncation errors. To avoid it we'd need an \
+ * additional bit to represent m which would overflow \
+ * a 64-bit variable. \
+ * \
+ * Instead we do m = p / b and n / b = (n * m + m) / p. \
+ */ \
+ ___bias = 1; \
+ /* Compute m = (p << 64) / b */ \
+ ___m = (~0ULL / ___b) * ___p; \
+ ___m += ((~0ULL % ___b + 1) * ___p) / ___b; \
+ } else { \
+ /* \
+ * Reduce m / p, and try to clear bit 31 of m when \
+ * possible, otherwise that'll need extra overflow \
+ * handling later. \
+ */ \
+ uint32_t ___bits = -(___m & -___m); \
+ ___bits |= ___m >> 32; \
+ ___bits = (~___bits) << 1; \
+ /* \
+ * If ___bits == 0 then setting bit 31 is unavoidable. \
+ * Simply apply the maximum possible reduction in that \
+ * case. Otherwise the MSB of ___bits indicates the \
+ * best reduction we should apply. \
+ */ \
+ if (!___bits) { \
+ ___p /= (___m & -___m); \
+ ___m /= (___m & -___m); \
+ } else { \
+ ___p >>= ilog2(___bits); \
+ ___m >>= ilog2(___bits); \
+ } \
+ /* No bias needed. */ \
+ ___bias = 0; \
+ } \
+ \
+ /* \
+ * Now we have a combination of 2 conditions: \
+ * \
+ * 1) whether or not we need to apply a bias, and \
+ * \
+ * 2) whether or not there might be an overflow in the cross \
+ * product determined by (___m & ((1 << 63) | (1 << 31))). \
+ * \
+ * Select the best way to do (m_bias + m * n) / (1 << 64). \
+ * From now on there will be actual runtime code generated. \
+ */ \
+ ___res = __arch_xprod_64(___m, ___n, ___bias); \
+ \
+ ___res /= ___p; \
+})
+
+#ifndef __arch_xprod_64
+/*
+ * Default C implementation for __arch_xprod_64()
+ *
+ * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
+ * Semantic: retval = ((bias ? m : 0) + m * n) >> 64
+ *
+ * The product is a 128-bit value, scaled down to 64 bits.
+ * Assuming constant propagation to optimize away unused conditional code.
+ * Architectures may provide their own optimized assembly implementation.
+ */
+static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
+{
+ uint32_t m_lo = m;
+ uint32_t m_hi = m >> 32;
+ uint32_t n_lo = n;
+ uint32_t n_hi = n >> 32;
+ uint64_t res, tmp;
+
+ if (!bias) {
+ res = ((uint64_t)m_lo * n_lo) >> 32;
+ } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
+ /* there can't be any overflow here */
+ res = (m + (uint64_t)m_lo * n_lo) >> 32;
+ } else {
+ res = m + (uint64_t)m_lo * n_lo;
+ tmp = (res < m) ? (1ULL << 32) : 0;
+ res = (res >> 32) + tmp;
+ }
+
+ if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
+ /* there can't be any overflow here */
+ res += (uint64_t)m_lo * n_hi;
+ res += (uint64_t)m_hi * n_lo;
+ res >>= 32;
+ } else {
+ tmp = res += (uint64_t)m_lo * n_hi;
+ res += (uint64_t)m_hi * n_lo;
+ tmp = (res < tmp) ? (1ULL << 32) : 0;
+ res = (res >> 32) + tmp;
+ }
+
+ res += (uint64_t)m_hi * n_hi;
+
+ return res;
+}
+#endif
+
+#ifndef __div64_32
+extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
+#endif
+
+/* The unnecessary pointer compare is there
+ * to check for type safety (n must be 64bit)
+ */
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
+ if (__builtin_constant_p(__base) && \
+ is_power_of_2(__base)) { \
+ __rem = (n) & (__base - 1); \
+ (n) >>= ilog2(__base); \
+ } else if (__div64_const32_is_OK && \
+ __builtin_constant_p(__base) && \
+ __base != 0) { \
+ uint32_t __res_lo, __n_lo = (n); \
+ (n) = __div64_const32(n, __base); \
+ /* the remainder can be computed with 32-bit regs */ \
+ __res_lo = (n); \
+ __rem = __n_lo - __res_lo * __base; \
+ } else if (likely(((n) >> 32) == 0)) { \
+ __rem = (uint32_t)(n) % __base; \
+ (n) = (uint32_t)(n) / __base; \
+ } else \
+ __rem = __div64_32(&(n), __base); \
+ __rem; \
+ })
+
+#else /* BITS_PER_LONG == ?? */
+
+# error do_div() does not yet support the C64
+
+#endif /* BITS_PER_LONG */
+
+#endif /* _ASM_GENERIC_DIV64_H */
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
new file mode 100644
index 000000000..f24b0f9a4
--- /dev/null
+++ b/include/asm-generic/dma-contiguous.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_DMA_CONTIGUOUS_H
+#define _ASM_GENERIC_DMA_CONTIGUOUS_H
+
+#include <linux/types.h>
+
+static inline void
+dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
+
+#endif
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
new file mode 100644
index 000000000..ad2868263
--- /dev/null
+++ b/include/asm-generic/dma-mapping.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_DMA_MAPPING_H
+#define _ASM_GENERIC_DMA_MAPPING_H
+
+static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+{
+ /*
+ * Use the non-coherent ops if available. If an architecture wants a
+ * more fine-grained selection of operations it will have to implement
+ * get_arch_dma_ops itself or use the per-device dma_ops.
+ */
+#ifdef CONFIG_DMA_NONCOHERENT_OPS
+ return &dma_noncoherent_ops;
+#else
+ return &dma_direct_ops;
+#endif
+}
+
+#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/dma.h b/include/asm-generic/dma.h
new file mode 100644
index 000000000..43d0c8af8
--- /dev/null
+++ b/include/asm-generic/dma.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_DMA_H
+#define __ASM_GENERIC_DMA_H
+/*
+ * This file traditionally describes the i8237 PC style DMA controller.
+ * Most architectures don't have these any more and can get the minimal
+ * implementation from kernel/dma.c by not defining MAX_DMA_CHANNELS.
+ *
+ * Some code relies on seeing MAX_DMA_ADDRESS though.
+ */
+#define MAX_DMA_ADDRESS PAGE_OFFSET
+
+extern int request_dma(unsigned int dmanr, const char *device_id);
+extern void free_dma(unsigned int dmanr);
+
+#endif /* __ASM_GENERIC_DMA_H */
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
new file mode 100644
index 000000000..9def22e6e
--- /dev/null
+++ b/include/asm-generic/early_ioremap.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_EARLY_IOREMAP_H_
+#define _ASM_EARLY_IOREMAP_H_
+
+#include <linux/types.h>
+
+/*
+ * early_ioremap() and early_iounmap() are for temporary early boot-time
+ * mappings, before the real ioremap() is functional.
+ */
+extern void __iomem *early_ioremap(resource_size_t phys_addr,
+ unsigned long size);
+extern void *early_memremap(resource_size_t phys_addr,
+ unsigned long size);
+extern void *early_memremap_ro(resource_size_t phys_addr,
+ unsigned long size);
+extern void *early_memremap_prot(resource_size_t phys_addr,
+ unsigned long size, unsigned long prot_val);
+extern void early_iounmap(void __iomem *addr, unsigned long size);
+extern void early_memunmap(void *addr, unsigned long size);
+
+/*
+ * Weak function called by early_ioremap_reset(). It does nothing, but
+ * architectures may provide their own version to do any needed cleanups.
+ */
+extern void early_ioremap_shutdown(void);
+
+#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU)
+/* Arch-specific initialization */
+extern void early_ioremap_init(void);
+
+/* Generic initialization called by architecture code */
+extern void early_ioremap_setup(void);
+
+/*
+ * Called as last step in paging_init() so library can act
+ * accordingly for subsequent map/unmap requests.
+ */
+extern void early_ioremap_reset(void);
+
+/*
+ * Early copy from unmapped memory to kernel mapped memory.
+ */
+extern void copy_from_early_mem(void *dest, phys_addr_t src,
+ unsigned long size);
+
+#else
+static inline void early_ioremap_init(void) { }
+static inline void early_ioremap_setup(void) { }
+static inline void early_ioremap_reset(void) { }
+#endif
+
+#endif /* _ASM_EARLY_IOREMAP_H_ */
diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
new file mode 100644
index 000000000..445de38b7
--- /dev/null
+++ b/include/asm-generic/emergency-restart.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
+#define _ASM_GENERIC_EMERGENCY_RESTART_H
+
+static inline void machine_emergency_restart(void)
+{
+ machine_restart(NULL);
+}
+
+#endif /* _ASM_GENERIC_EMERGENCY_RESTART_H */
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
new file mode 100644
index 000000000..296c65442
--- /dev/null
+++ b/include/asm-generic/error-injection.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_ERROR_INJECTION_H
+#define _ASM_GENERIC_ERROR_INJECTION_H
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+enum {
+ EI_ETYPE_NONE, /* Dummy value for undefined case */
+ EI_ETYPE_NULL, /* Return NULL if failure */
+ EI_ETYPE_ERRNO, /* Return -ERRNO if failure */
+ EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */
+};
+
+struct error_injection_entry {
+ unsigned long addr;
+ int etype;
+};
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+/*
+ * Whitelist ganerating macro. Specify functions which can be
+ * error-injectable using this macro.
+ */
+#define ALLOW_ERROR_INJECTION(fname, _etype) \
+static struct error_injection_entry __used \
+ __attribute__((__section__("_error_injection_whitelist"))) \
+ _eil_addr_##fname = { \
+ .addr = (unsigned long)fname, \
+ .etype = EI_ETYPE_##_etype, \
+ };
+#else
+#define ALLOW_ERROR_INJECTION(fname, _etype)
+#endif
+#endif
+
+#endif /* _ASM_GENERIC_ERROR_INJECTION_H */
diff --git a/include/asm-generic/exec.h b/include/asm-generic/exec.h
new file mode 100644
index 000000000..32c0a216f
--- /dev/null
+++ b/include/asm-generic/exec.h
@@ -0,0 +1,19 @@
+/* Generic process execution definitions.
+ *
+ * It should be possible to use these on really simple architectures,
+ * but it serves more as a starting point for new ports.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef __ASM_GENERIC_EXEC_H
+#define __ASM_GENERIC_EXEC_H
+
+#define arch_align_stack(x) (x)
+
+#endif /* __ASM_GENERIC_EXEC_H */
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
new file mode 100644
index 000000000..4d73e6e3c
--- /dev/null
+++ b/include/asm-generic/export.h
@@ -0,0 +1,93 @@
+#ifndef __ASM_GENERIC_EXPORT_H
+#define __ASM_GENERIC_EXPORT_H
+
+#ifndef KSYM_FUNC
+#define KSYM_FUNC(x) x
+#endif
+#ifdef CONFIG_64BIT
+#ifndef KSYM_ALIGN
+#define KSYM_ALIGN 8
+#endif
+#else
+#ifndef KSYM_ALIGN
+#define KSYM_ALIGN 4
+#endif
+#endif
+#ifndef KCRC_ALIGN
+#define KCRC_ALIGN 4
+#endif
+
+.macro __put, val, name
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+ .long \val - ., \name - .
+#elif defined(CONFIG_64BIT)
+ .quad \val, \name
+#else
+ .long \val, \name
+#endif
+.endm
+
+/*
+ * note on .section use: @progbits vs %progbits nastiness doesn't matter,
+ * since we immediately emit into those sections anyway.
+ */
+.macro ___EXPORT_SYMBOL name,val,sec
+#ifdef CONFIG_MODULES
+ .globl __ksymtab_\name
+ .section ___ksymtab\sec+\name,"a"
+ .balign KSYM_ALIGN
+__ksymtab_\name:
+ __put \val, __kstrtab_\name
+ .previous
+ .section __ksymtab_strings,"a"
+__kstrtab_\name:
+ .asciz "\name"
+ .previous
+#ifdef CONFIG_MODVERSIONS
+ .section ___kcrctab\sec+\name,"a"
+ .balign KCRC_ALIGN
+__kcrctab_\name:
+#if defined(CONFIG_MODULE_REL_CRCS)
+ .long __crc_\name - .
+#else
+ .long __crc_\name
+#endif
+ .weak __crc_\name
+ .previous
+#endif
+#endif
+.endm
+#undef __put
+
+#if defined(__KSYM_DEPS__)
+
+#define __EXPORT_SYMBOL(sym, val, sec) === __KSYM_##sym ===
+
+#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
+
+#include <linux/kconfig.h>
+#include <generated/autoksyms.h>
+
+#define __EXPORT_SYMBOL(sym, val, sec) \
+ __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym))
+#define __cond_export_sym(sym, val, sec, conf) \
+ ___cond_export_sym(sym, val, sec, conf)
+#define ___cond_export_sym(sym, val, sec, enabled) \
+ __cond_export_sym_##enabled(sym, val, sec)
+#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
+#define __cond_export_sym_0(sym, val, sec) /* nothing */
+
+#else
+#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
+#endif
+
+#define EXPORT_SYMBOL(name) \
+ __EXPORT_SYMBOL(name, KSYM_FUNC(name),)
+#define EXPORT_SYMBOL_GPL(name) \
+ __EXPORT_SYMBOL(name, KSYM_FUNC(name), _gpl)
+#define EXPORT_DATA_SYMBOL(name) \
+ __EXPORT_SYMBOL(name, name,)
+#define EXPORT_DATA_SYMBOL_GPL(name) \
+ __EXPORT_SYMBOL(name, name,_gpl)
+
+#endif
diff --git a/include/asm-generic/extable.h b/include/asm-generic/extable.h
new file mode 100644
index 000000000..f9618bd07
--- /dev/null
+++ b/include/asm-generic/extable.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_EXTABLE_H
+#define __ASM_GENERIC_EXTABLE_H
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry
+{
+ unsigned long insn, fixup;
+};
+
+
+struct pt_regs;
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif
diff --git a/include/asm-generic/fb.h b/include/asm-generic/fb.h
new file mode 100644
index 000000000..f9f18101e
--- /dev/null
+++ b/include/asm-generic/fb.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_FB_H_
+#define __ASM_GENERIC_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* __ASM_GENERIC_FB_H_ */
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
new file mode 100644
index 000000000..827e4d3bb
--- /dev/null
+++ b/include/asm-generic/fixmap.h
@@ -0,0 +1,103 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
+ * Break out common bits to asm-generic by Mark Salter, November 2013
+ */
+
+#ifndef __ASM_GENERIC_FIXMAP_H
+#define __ASM_GENERIC_FIXMAP_H
+
+#include <linux/bug.h>
+
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without translation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
+ return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+ return __virt_to_fix(vaddr);
+}
+
+/*
+ * Provide some reasonable defaults for page flags.
+ * Not all architectures use all of these different types and some
+ * architectures use different names.
+ */
+#ifndef FIXMAP_PAGE_NORMAL
+#define FIXMAP_PAGE_NORMAL PAGE_KERNEL
+#endif
+#if !defined(FIXMAP_PAGE_RO) && defined(PAGE_KERNEL_RO)
+#define FIXMAP_PAGE_RO PAGE_KERNEL_RO
+#endif
+#ifndef FIXMAP_PAGE_NOCACHE
+#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE
+#endif
+#ifndef FIXMAP_PAGE_IO
+#define FIXMAP_PAGE_IO PAGE_KERNEL_IO
+#endif
+#ifndef FIXMAP_PAGE_CLEAR
+#define FIXMAP_PAGE_CLEAR __pgprot(0)
+#endif
+
+#ifndef set_fixmap
+#define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, FIXMAP_PAGE_NORMAL)
+#endif
+
+#ifndef clear_fixmap
+#define clear_fixmap(idx) \
+ __set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR)
+#endif
+
+/* Return a pointer with offset calculated */
+#define __set_fixmap_offset(idx, phys, flags) \
+({ \
+ unsigned long ________addr; \
+ __set_fixmap(idx, phys, flags); \
+ ________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
+ ________addr; \
+})
+
+#define set_fixmap_offset(idx, phys) \
+ __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NORMAL)
+
+/*
+ * Some hardware wants to get fixmapped without caching.
+ */
+#define set_fixmap_nocache(idx, phys) \
+ __set_fixmap(idx, phys, FIXMAP_PAGE_NOCACHE)
+
+#define set_fixmap_offset_nocache(idx, phys) \
+ __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NOCACHE)
+
+/*
+ * Some fixmaps are for IO
+ */
+#define set_fixmap_io(idx, phys) \
+ __set_fixmap(idx, phys, FIXMAP_PAGE_IO)
+
+#define set_fixmap_offset_io(idx, phys) \
+ __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_GENERIC_FIXMAP_H */
diff --git a/include/asm-generic/ftrace.h b/include/asm-generic/ftrace.h
new file mode 100644
index 000000000..51abba9ea
--- /dev/null
+++ b/include/asm-generic/ftrace.h
@@ -0,0 +1,16 @@
+/*
+ * linux/include/asm-generic/ftrace.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_GENERIC_FTRACE_H__
+#define __ASM_GENERIC_FTRACE_H__
+
+/*
+ * Not all architectures need their own ftrace.h, the most
+ * common definitions are already in linux/ftrace.h.
+ */
+
+#endif /* __ASM_GENERIC_FTRACE_H__ */
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
new file mode 100644
index 000000000..8666fe7f3
--- /dev/null
+++ b/include/asm-generic/futex.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_FUTEX_H
+#define _ASM_GENERIC_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#ifndef CONFIG_SMP
+/*
+ * The following implementation only for uniprocessor machines.
+ * It relies on preempt_disable() ensuring mutual exclusion.
+ *
+ */
+
+/**
+ * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
+ * argument and comparison of the previous
+ * futex value with another constant.
+ *
+ * @encoded_op: encoded operation to execute
+ * @uaddr: pointer to user space address
+ *
+ * Return:
+ * 0 - On success
+ * -EFAULT - User access resulted in a page fault
+ * -EAGAIN - Atomic operation was unable to complete due to contention
+ * -ENOSYS - Operation not supported
+ */
+static inline int
+arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval, ret;
+ u32 tmp;
+
+ preempt_disable();
+ pagefault_disable();
+
+ ret = -EFAULT;
+ if (unlikely(get_user(oldval, uaddr) != 0))
+ goto out_pagefault_enable;
+
+ ret = 0;
+ tmp = oldval;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ tmp = oparg;
+ break;
+ case FUTEX_OP_ADD:
+ tmp += oparg;
+ break;
+ case FUTEX_OP_OR:
+ tmp |= oparg;
+ break;
+ case FUTEX_OP_ANDN:
+ tmp &= ~oparg;
+ break;
+ case FUTEX_OP_XOR:
+ tmp ^= oparg;
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
+ ret = -EFAULT;
+
+out_pagefault_enable:
+ pagefault_enable();
+ preempt_enable();
+
+ if (ret == 0)
+ *oval = oldval;
+
+ return ret;
+}
+
+/**
+ * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the
+ * uaddr with newval if the current value is
+ * oldval.
+ * @uval: pointer to store content of @uaddr
+ * @uaddr: pointer to user space address
+ * @oldval: old value
+ * @newval: new value to store to @uaddr
+ *
+ * Return:
+ * 0 - On success
+ * -EFAULT - User access resulted in a page fault
+ * -EAGAIN - Atomic operation was unable to complete due to contention
+ * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
+ */
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ u32 val;
+
+ preempt_disable();
+ if (unlikely(get_user(val, uaddr) != 0)) {
+ preempt_enable();
+ return -EFAULT;
+ }
+
+ if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
+ preempt_enable();
+ return -EFAULT;
+ }
+
+ *uval = val;
+ preempt_enable();
+
+ return 0;
+}
+
+#else
+static inline int
+arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval = 0, ret;
+
+ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ case FUTEX_OP_ADD:
+ case FUTEX_OP_OR:
+ case FUTEX_OP_ANDN:
+ case FUTEX_OP_XOR:
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable();
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_SMP */
+#endif
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
new file mode 100644
index 000000000..e9f20b813
--- /dev/null
+++ b/include/asm-generic/getorder.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_GETORDER_H
+#define __ASM_GENERIC_GETORDER_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+#include <linux/log2.h>
+
+/**
+ * get_order - Determine the allocation order of a memory size
+ * @size: The size for which to get the order
+ *
+ * Determine the allocation order of a particular sized block of memory. This
+ * is on a logarithmic scale, where:
+ *
+ * 0 -> 2^0 * PAGE_SIZE and below
+ * 1 -> 2^1 * PAGE_SIZE to 2^0 * PAGE_SIZE + 1
+ * 2 -> 2^2 * PAGE_SIZE to 2^1 * PAGE_SIZE + 1
+ * 3 -> 2^3 * PAGE_SIZE to 2^2 * PAGE_SIZE + 1
+ * 4 -> 2^4 * PAGE_SIZE to 2^3 * PAGE_SIZE + 1
+ * ...
+ *
+ * The order returned is used to find the smallest allocation granule required
+ * to hold an object of the specified size.
+ *
+ * The result is undefined if the size is 0.
+ */
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+ if (__builtin_constant_p(size)) {
+ if (!size)
+ return BITS_PER_LONG - PAGE_SHIFT;
+
+ if (size < (1UL << PAGE_SHIFT))
+ return 0;
+
+ return ilog2((size) - 1) - PAGE_SHIFT + 1;
+ }
+
+ size--;
+ size >>= PAGE_SHIFT;
+#if BITS_PER_LONG == 32
+ return fls(size);
+#else
+ return fls64(size);
+#endif
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_GENERIC_GETORDER_H */
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
new file mode 100644
index 000000000..19eadac41
--- /dev/null
+++ b/include/asm-generic/gpio.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_GPIO_H
+#define _ASM_GENERIC_GPIO_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+
+#ifdef CONFIG_GPIOLIB
+
+#include <linux/compiler.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
+
+/* Platforms may implement their GPIO interface with library code,
+ * at a small performance cost for non-inlined operations and some
+ * extra memory (for code and for per-GPIO table entries).
+ *
+ * While the GPIO programming interface defines valid GPIO numbers
+ * to be in the range 0..MAX_INT, this library restricts them to the
+ * smaller range 0..ARCH_NR_GPIOS-1.
+ *
+ * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of
+ * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is
+ * actually an estimate of a board-specific value.
+ */
+
+#ifndef ARCH_NR_GPIOS
+#if defined(CONFIG_ARCH_NR_GPIO) && CONFIG_ARCH_NR_GPIO > 0
+#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
+#else
+#define ARCH_NR_GPIOS 512
+#endif
+#endif
+
+/*
+ * "valid" GPIO numbers are nonnegative and may be passed to
+ * setup routines like gpio_request(). only some valid numbers
+ * can successfully be requested and used.
+ *
+ * Invalid GPIO numbers are useful for indicating no-such-GPIO in
+ * platform data and other tables.
+ */
+
+static inline bool gpio_is_valid(int number)
+{
+ return number >= 0 && number < ARCH_NR_GPIOS;
+}
+
+struct device;
+struct gpio;
+struct seq_file;
+struct module;
+struct device_node;
+struct gpio_desc;
+
+/* caller holds gpio_lock *OR* gpio is marked as requested */
+static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
+{
+ return gpiod_to_chip(gpio_to_desc(gpio));
+}
+
+/* Always use the library code for GPIO management calls,
+ * or when sleeping may be involved.
+ */
+extern int gpio_request(unsigned gpio, const char *label);
+extern void gpio_free(unsigned gpio);
+
+static inline int gpio_direction_input(unsigned gpio)
+{
+ return gpiod_direction_input(gpio_to_desc(gpio));
+}
+static inline int gpio_direction_output(unsigned gpio, int value)
+{
+ return gpiod_direction_output_raw(gpio_to_desc(gpio), value);
+}
+
+static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
+{
+ return gpiod_set_debounce(gpio_to_desc(gpio), debounce);
+}
+
+static inline int gpio_get_value_cansleep(unsigned gpio)
+{
+ return gpiod_get_raw_value_cansleep(gpio_to_desc(gpio));
+}
+static inline void gpio_set_value_cansleep(unsigned gpio, int value)
+{
+ return gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value);
+}
+
+
+/* A platform's <asm/gpio.h> code may want to inline the I/O calls when
+ * the GPIO is constant and refers to some always-present controller,
+ * giving direct access to chip registers and tight bitbanging loops.
+ */
+static inline int __gpio_get_value(unsigned gpio)
+{
+ return gpiod_get_raw_value(gpio_to_desc(gpio));
+}
+static inline void __gpio_set_value(unsigned gpio, int value)
+{
+ return gpiod_set_raw_value(gpio_to_desc(gpio), value);
+}
+
+static inline int __gpio_cansleep(unsigned gpio)
+{
+ return gpiod_cansleep(gpio_to_desc(gpio));
+}
+
+static inline int __gpio_to_irq(unsigned gpio)
+{
+ return gpiod_to_irq(gpio_to_desc(gpio));
+}
+
+extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
+extern int gpio_request_array(const struct gpio *array, size_t num);
+extern void gpio_free_array(const struct gpio *array, size_t num);
+
+/*
+ * A sysfs interface can be exported by individual drivers if they want,
+ * but more typically is configured entirely from userspace.
+ */
+static inline int gpio_export(unsigned gpio, bool direction_may_change)
+{
+ return gpiod_export(gpio_to_desc(gpio), direction_may_change);
+}
+
+static inline int gpio_export_link(struct device *dev, const char *name,
+ unsigned gpio)
+{
+ return gpiod_export_link(dev, name, gpio_to_desc(gpio));
+}
+
+static inline void gpio_unexport(unsigned gpio)
+{
+ gpiod_unexport(gpio_to_desc(gpio));
+}
+
+#else /* !CONFIG_GPIOLIB */
+
+static inline bool gpio_is_valid(int number)
+{
+ /* only non-negative numbers are valid */
+ return number >= 0;
+}
+
+/* platforms that don't directly support access to GPIOs through I2C, SPI,
+ * or other blocking infrastructure can use these wrappers.
+ */
+
+static inline int gpio_cansleep(unsigned gpio)
+{
+ return 0;
+}
+
+static inline int gpio_get_value_cansleep(unsigned gpio)
+{
+ might_sleep();
+ return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value_cansleep(unsigned gpio, int value)
+{
+ might_sleep();
+ __gpio_set_value(gpio, value);
+}
+
+#endif /* !CONFIG_GPIOLIB */
+
+#endif /* _ASM_GENERIC_GPIO_H */
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h
new file mode 100644
index 000000000..d14214dfc
--- /dev/null
+++ b/include/asm-generic/hardirq.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_HARDIRQ_H
+#define __ASM_GENERIC_HARDIRQ_H
+
+#include <linux/cache.h>
+#include <linux/threads.h>
+
+typedef struct {
+ unsigned int __softirq_pending;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+#include <linux/irq.h>
+
+#ifndef ack_bad_irq
+static inline void ack_bad_irq(unsigned int irq)
+{
+ printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
+}
+#endif
+
+#endif /* __ASM_GENERIC_HARDIRQ_H */
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
new file mode 100644
index 000000000..9d0cde8ab
--- /dev/null
+++ b/include/asm-generic/hugetlb.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_HUGETLB_H
+#define _ASM_GENERIC_HUGETLB_H
+
+static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
+{
+ return mk_pte(page, pgprot);
+}
+
+static inline unsigned long huge_pte_write(pte_t pte)
+{
+ return pte_write(pte);
+}
+
+static inline unsigned long huge_pte_dirty(pte_t pte)
+{
+ return pte_dirty(pte);
+}
+
+static inline pte_t huge_pte_mkwrite(pte_t pte)
+{
+ return pte_mkwrite(pte);
+}
+
+static inline pte_t huge_pte_mkdirty(pte_t pte)
+{
+ return pte_mkdirty(pte);
+}
+
+static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return pte_modify(pte, newprot);
+}
+
+#ifndef huge_pte_clear
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz)
+{
+ pte_clear(mm, addr, ptep);
+}
+#endif
+
+#endif /* _ASM_GENERIC_HUGETLB_H */
diff --git a/include/asm-generic/hw_irq.h b/include/asm-generic/hw_irq.h
new file mode 100644
index 000000000..89036d7b4
--- /dev/null
+++ b/include/asm-generic/hw_irq.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_GENERIC_HW_IRQ_H
+#define __ASM_GENERIC_HW_IRQ_H
+/*
+ * hw_irq.h has internal declarations for the low-level interrupt
+ * controller, like the original i8259A.
+ * In general, this is not needed for new architectures.
+ */
+
+#endif /* __ASM_GENERIC_HW_IRQ_H */
diff --git a/include/asm-generic/ide_iops.h b/include/asm-generic/ide_iops.h
new file mode 100644
index 000000000..81dfa3ee5
--- /dev/null
+++ b/include/asm-generic/ide_iops.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Generic I/O and MEMIO string operations. */
+
+#define __ide_insw insw
+#define __ide_insl insl
+#define __ide_outsw outsw
+#define __ide_outsl outsl
+
+static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
+{
+ while (count--) {
+ *(u16 *)addr = readw(port);
+ addr += 2;
+ }
+}
+
+static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
+{
+ while (count--) {
+ *(u32 *)addr = readl(port);
+ addr += 4;
+ }
+}
+
+static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
+{
+ while (count--) {
+ writew(*(u16 *)addr, port);
+ addr += 2;
+ }
+}
+
+static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
+{
+ while (count--) {
+ writel(*(u32 *)addr, port);
+ addr += 4;
+ }
+}
diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
new file mode 100644
index 000000000..a248545f1
--- /dev/null
+++ b/include/asm-generic/int-ll64.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * asm-generic/int-ll64.h
+ *
+ * Integer declarations for architectures which use "long long"
+ * for 64-bit types.
+ */
+#ifndef _ASM_GENERIC_INT_LL64_H
+#define _ASM_GENERIC_INT_LL64_H
+
+#include <uapi/asm-generic/int-ll64.h>
+
+
+#ifndef __ASSEMBLY__
+
+typedef __s8 s8;
+typedef __u8 u8;
+typedef __s16 s16;
+typedef __u16 u16;
+typedef __s32 s32;
+typedef __u32 u32;
+typedef __s64 s64;
+typedef __u64 u64;
+
+#define S8_C(x) x
+#define U8_C(x) x ## U
+#define S16_C(x) x
+#define U16_C(x) x ## U
+#define S32_C(x) x
+#define U32_C(x) x ## U
+#define S64_C(x) x ## LL
+#define U64_C(x) x ## ULL
+
+#else /* __ASSEMBLY__ */
+
+#define S8_C(x) x
+#define U8_C(x) x
+#define S16_C(x) x
+#define U16_C(x) x
+#define S32_C(x) x
+#define U32_C(x) x
+#define S64_C(x) x
+#define U64_C(x) x
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_GENERIC_INT_LL64_H */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
new file mode 100644
index 000000000..d356f8029
--- /dev/null
+++ b/include/asm-generic/io.h
@@ -0,0 +1,1140 @@
+/* Generic I/O port emulation.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef __ASM_GENERIC_IO_H
+#define __ASM_GENERIC_IO_H
+
+#include <asm/page.h> /* I/O is all done through memory accesses */
+#include <linux/string.h> /* for memset() and memcpy() */
+#include <linux/types.h>
+
+#ifdef CONFIG_GENERIC_IOMAP
+#include <asm-generic/iomap.h>
+#endif
+
+#include <asm-generic/pci_iomap.h>
+
+#ifndef mmiowb
+#define mmiowb() do {} while (0)
+#endif
+
+#ifndef __io_br
+#define __io_br() barrier()
+#endif
+
+/* prevent prefetching of coherent DMA data ahead of a dma-complete */
+#ifndef __io_ar
+#ifdef rmb
+#define __io_ar() rmb()
+#else
+#define __io_ar() barrier()
+#endif
+#endif
+
+/* flush writes to coherent DMA data before possibly triggering a DMA read */
+#ifndef __io_bw
+#ifdef wmb
+#define __io_bw() wmb()
+#else
+#define __io_bw() barrier()
+#endif
+#endif
+
+/* serialize device access against a spin_unlock, usually handled there. */
+#ifndef __io_aw
+#define __io_aw() barrier()
+#endif
+
+#ifndef __io_pbw
+#define __io_pbw() __io_bw()
+#endif
+
+#ifndef __io_paw
+#define __io_paw() __io_aw()
+#endif
+
+#ifndef __io_pbr
+#define __io_pbr() __io_br()
+#endif
+
+#ifndef __io_par
+#define __io_par() __io_ar()
+#endif
+
+
+/*
+ * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
+ *
+ * On some architectures memory mapped IO needs to be accessed differently.
+ * On the simple architectures, we just read/write the memory location
+ * directly.
+ */
+
+#ifndef __raw_readb
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ return *(const volatile u8 __force *)addr;
+}
+#endif
+
+#ifndef __raw_readw
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ return *(const volatile u16 __force *)addr;
+}
+#endif
+
+#ifndef __raw_readl
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ return *(const volatile u32 __force *)addr;
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef __raw_readq
+#define __raw_readq __raw_readq
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+ return *(const volatile u64 __force *)addr;
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef __raw_writeb
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
+{
+ *(volatile u8 __force *)addr = value;
+}
+#endif
+
+#ifndef __raw_writew
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 value, volatile void __iomem *addr)
+{
+ *(volatile u16 __force *)addr = value;
+}
+#endif
+
+#ifndef __raw_writel
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 value, volatile void __iomem *addr)
+{
+ *(volatile u32 __force *)addr = value;
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef __raw_writeq
+#define __raw_writeq __raw_writeq
+static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
+{
+ *(volatile u64 __force *)addr = value;
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+/*
+ * {read,write}{b,w,l,q}() access little endian memory and return result in
+ * native endianness.
+ */
+
+#ifndef readb
+#define readb readb
+static inline u8 readb(const volatile void __iomem *addr)
+{
+ u8 val;
+
+ __io_br();
+ val = __raw_readb(addr);
+ __io_ar();
+ return val;
+}
+#endif
+
+#ifndef readw
+#define readw readw
+static inline u16 readw(const volatile void __iomem *addr)
+{
+ u16 val;
+
+ __io_br();
+ val = __le16_to_cpu(__raw_readw(addr));
+ __io_ar();
+ return val;
+}
+#endif
+
+#ifndef readl
+#define readl readl
+static inline u32 readl(const volatile void __iomem *addr)
+{
+ u32 val;
+
+ __io_br();
+ val = __le32_to_cpu(__raw_readl(addr));
+ __io_ar();
+ return val;
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef readq
+#define readq readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+ u64 val;
+
+ __io_br();
+ val = __le64_to_cpu(__raw_readq(addr));
+ __io_ar();
+ return val;
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef writeb
+#define writeb writeb
+static inline void writeb(u8 value, volatile void __iomem *addr)
+{
+ __io_bw();
+ __raw_writeb(value, addr);
+ __io_aw();
+}
+#endif
+
+#ifndef writew
+#define writew writew
+static inline void writew(u16 value, volatile void __iomem *addr)
+{
+ __io_bw();
+ __raw_writew(cpu_to_le16(value), addr);
+ __io_aw();
+}
+#endif
+
+#ifndef writel
+#define writel writel
+static inline void writel(u32 value, volatile void __iomem *addr)
+{
+ __io_bw();
+ __raw_writel(__cpu_to_le32(value), addr);
+ __io_aw();
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef writeq
+#define writeq writeq
+static inline void writeq(u64 value, volatile void __iomem *addr)
+{
+ __io_bw();
+ __raw_writeq(__cpu_to_le64(value), addr);
+ __io_aw();
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+/*
+ * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
+ * are not guaranteed to provide ordering against spinlocks or memory
+ * accesses.
+ */
+#ifndef readb_relaxed
+#define readb_relaxed readb_relaxed
+static inline u8 readb_relaxed(const volatile void __iomem *addr)
+{
+ return __raw_readb(addr);
+}
+#endif
+
+#ifndef readw_relaxed
+#define readw_relaxed readw_relaxed
+static inline u16 readw_relaxed(const volatile void __iomem *addr)
+{
+ return __le16_to_cpu(__raw_readw(addr));
+}
+#endif
+
+#ifndef readl_relaxed
+#define readl_relaxed readl_relaxed
+static inline u32 readl_relaxed(const volatile void __iomem *addr)
+{
+ return __le32_to_cpu(__raw_readl(addr));
+}
+#endif
+
+#if defined(readq) && !defined(readq_relaxed)
+#define readq_relaxed readq_relaxed
+static inline u64 readq_relaxed(const volatile void __iomem *addr)
+{
+ return __le64_to_cpu(__raw_readq(addr));
+}
+#endif
+
+#ifndef writeb_relaxed
+#define writeb_relaxed writeb_relaxed
+static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
+{
+ __raw_writeb(value, addr);
+}
+#endif
+
+#ifndef writew_relaxed
+#define writew_relaxed writew_relaxed
+static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+{
+ __raw_writew(cpu_to_le16(value), addr);
+}
+#endif
+
+#ifndef writel_relaxed
+#define writel_relaxed writel_relaxed
+static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+{
+ __raw_writel(__cpu_to_le32(value), addr);
+}
+#endif
+
+#if defined(writeq) && !defined(writeq_relaxed)
+#define writeq_relaxed writeq_relaxed
+static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
+{
+ __raw_writeq(__cpu_to_le64(value), addr);
+}
+#endif
+
+/*
+ * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
+ * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
+ */
+#ifndef readsb
+#define readsb readsb
+static inline void readsb(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u8 *buf = buffer;
+
+ do {
+ u8 x = __raw_readb(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef readsw
+#define readsw readsw
+static inline void readsw(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u16 *buf = buffer;
+
+ do {
+ u16 x = __raw_readw(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef readsl
+#define readsl readsl
+static inline void readsl(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u32 *buf = buffer;
+
+ do {
+ u32 x = __raw_readl(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef readsq
+#define readsq readsq
+static inline void readsq(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u64 *buf = buffer;
+
+ do {
+ u64 x = __raw_readq(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef writesb
+#define writesb writesb
+static inline void writesb(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u8 *buf = buffer;
+
+ do {
+ __raw_writeb(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef writesw
+#define writesw writesw
+static inline void writesw(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u16 *buf = buffer;
+
+ do {
+ __raw_writew(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+
+#ifndef writesl
+#define writesl writesl
+static inline void writesl(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u32 *buf = buffer;
+
+ do {
+ __raw_writel(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef writesq
+#define writesq writesq
+static inline void writesq(volatile void __iomem *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u64 *buf = buffer;
+
+ do {
+ __raw_writeq(*buf++, addr);
+ } while (--count);
+ }
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef PCI_IOBASE
+#define PCI_IOBASE ((void __iomem *)0)
+#endif
+
+#ifndef IO_SPACE_LIMIT
+#define IO_SPACE_LIMIT 0xffff
+#endif
+
+#include <linux/logic_pio.h>
+
+/*
+ * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
+ * implemented on hardware that needs an additional delay for I/O accesses to
+ * take effect.
+ */
+
+#ifndef inb
+#define inb inb
+static inline u8 inb(unsigned long addr)
+{
+ u8 val;
+
+ __io_pbr();
+ val = __raw_readb(PCI_IOBASE + addr);
+ __io_par();
+ return val;
+}
+#endif
+
+#ifndef inw
+#define inw inw
+static inline u16 inw(unsigned long addr)
+{
+ u16 val;
+
+ __io_pbr();
+ val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr));
+ __io_par();
+ return val;
+}
+#endif
+
+#ifndef inl
+#define inl inl
+static inline u32 inl(unsigned long addr)
+{
+ u32 val;
+
+ __io_pbr();
+ val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr));
+ __io_par();
+ return val;
+}
+#endif
+
+#ifndef outb
+#define outb outb
+static inline void outb(u8 value, unsigned long addr)
+{
+ __io_pbw();
+ __raw_writeb(value, PCI_IOBASE + addr);
+ __io_paw();
+}
+#endif
+
+#ifndef outw
+#define outw outw
+static inline void outw(u16 value, unsigned long addr)
+{
+ __io_pbw();
+ __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr);
+ __io_paw();
+}
+#endif
+
+#ifndef outl
+#define outl outl
+static inline void outl(u32 value, unsigned long addr)
+{
+ __io_pbw();
+ __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr);
+ __io_paw();
+}
+#endif
+
+#ifndef inb_p
+#define inb_p inb_p
+static inline u8 inb_p(unsigned long addr)
+{
+ return inb(addr);
+}
+#endif
+
+#ifndef inw_p
+#define inw_p inw_p
+static inline u16 inw_p(unsigned long addr)
+{
+ return inw(addr);
+}
+#endif
+
+#ifndef inl_p
+#define inl_p inl_p
+static inline u32 inl_p(unsigned long addr)
+{
+ return inl(addr);
+}
+#endif
+
+#ifndef outb_p
+#define outb_p outb_p
+static inline void outb_p(u8 value, unsigned long addr)
+{
+ outb(value, addr);
+}
+#endif
+
+#ifndef outw_p
+#define outw_p outw_p
+static inline void outw_p(u16 value, unsigned long addr)
+{
+ outw(value, addr);
+}
+#endif
+
+#ifndef outl_p
+#define outl_p outl_p
+static inline void outl_p(u32 value, unsigned long addr)
+{
+ outl(value, addr);
+}
+#endif
+
+/*
+ * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
+ * single I/O port multiple times.
+ */
+
+#ifndef insb
+#define insb insb
+static inline void insb(unsigned long addr, void *buffer, unsigned int count)
+{
+ readsb(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef insw
+#define insw insw
+static inline void insw(unsigned long addr, void *buffer, unsigned int count)
+{
+ readsw(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef insl
+#define insl insl
+static inline void insl(unsigned long addr, void *buffer, unsigned int count)
+{
+ readsl(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef outsb
+#define outsb outsb
+static inline void outsb(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ writesb(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef outsw
+#define outsw outsw
+static inline void outsw(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ writesw(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef outsl
+#define outsl outsl
+static inline void outsl(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ writesl(PCI_IOBASE + addr, buffer, count);
+}
+#endif
+
+#ifndef insb_p
+#define insb_p insb_p
+static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
+{
+ insb(addr, buffer, count);
+}
+#endif
+
+#ifndef insw_p
+#define insw_p insw_p
+static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
+{
+ insw(addr, buffer, count);
+}
+#endif
+
+#ifndef insl_p
+#define insl_p insl_p
+static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
+{
+ insl(addr, buffer, count);
+}
+#endif
+
+#ifndef outsb_p
+#define outsb_p outsb_p
+static inline void outsb_p(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ outsb(addr, buffer, count);
+}
+#endif
+
+#ifndef outsw_p
+#define outsw_p outsw_p
+static inline void outsw_p(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ outsw(addr, buffer, count);
+}
+#endif
+
+#ifndef outsl_p
+#define outsl_p outsl_p
+static inline void outsl_p(unsigned long addr, const void *buffer,
+ unsigned int count)
+{
+ outsl(addr, buffer, count);
+}
+#endif
+
+#ifndef CONFIG_GENERIC_IOMAP
+#ifndef ioread8
+#define ioread8 ioread8
+static inline u8 ioread8(const volatile void __iomem *addr)
+{
+ return readb(addr);
+}
+#endif
+
+#ifndef ioread16
+#define ioread16 ioread16
+static inline u16 ioread16(const volatile void __iomem *addr)
+{
+ return readw(addr);
+}
+#endif
+
+#ifndef ioread32
+#define ioread32 ioread32
+static inline u32 ioread32(const volatile void __iomem *addr)
+{
+ return readl(addr);
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef ioread64
+#define ioread64 ioread64
+static inline u64 ioread64(const volatile void __iomem *addr)
+{
+ return readq(addr);
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef iowrite8
+#define iowrite8 iowrite8
+static inline void iowrite8(u8 value, volatile void __iomem *addr)
+{
+ writeb(value, addr);
+}
+#endif
+
+#ifndef iowrite16
+#define iowrite16 iowrite16
+static inline void iowrite16(u16 value, volatile void __iomem *addr)
+{
+ writew(value, addr);
+}
+#endif
+
+#ifndef iowrite32
+#define iowrite32 iowrite32
+static inline void iowrite32(u32 value, volatile void __iomem *addr)
+{
+ writel(value, addr);
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef iowrite64
+#define iowrite64 iowrite64
+static inline void iowrite64(u64 value, volatile void __iomem *addr)
+{
+ writeq(value, addr);
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef ioread16be
+#define ioread16be ioread16be
+static inline u16 ioread16be(const volatile void __iomem *addr)
+{
+ return swab16(readw(addr));
+}
+#endif
+
+#ifndef ioread32be
+#define ioread32be ioread32be
+static inline u32 ioread32be(const volatile void __iomem *addr)
+{
+ return swab32(readl(addr));
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef ioread64be
+#define ioread64be ioread64be
+static inline u64 ioread64be(const volatile void __iomem *addr)
+{
+ return swab64(readq(addr));
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef iowrite16be
+#define iowrite16be iowrite16be
+static inline void iowrite16be(u16 value, void volatile __iomem *addr)
+{
+ writew(swab16(value), addr);
+}
+#endif
+
+#ifndef iowrite32be
+#define iowrite32be iowrite32be
+static inline void iowrite32be(u32 value, volatile void __iomem *addr)
+{
+ writel(swab32(value), addr);
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef iowrite64be
+#define iowrite64be iowrite64be
+static inline void iowrite64be(u64 value, volatile void __iomem *addr)
+{
+ writeq(swab64(value), addr);
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef ioread8_rep
+#define ioread8_rep ioread8_rep
+static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
+ unsigned int count)
+{
+ readsb(addr, buffer, count);
+}
+#endif
+
+#ifndef ioread16_rep
+#define ioread16_rep ioread16_rep
+static inline void ioread16_rep(const volatile void __iomem *addr,
+ void *buffer, unsigned int count)
+{
+ readsw(addr, buffer, count);
+}
+#endif
+
+#ifndef ioread32_rep
+#define ioread32_rep ioread32_rep
+static inline void ioread32_rep(const volatile void __iomem *addr,
+ void *buffer, unsigned int count)
+{
+ readsl(addr, buffer, count);
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef ioread64_rep
+#define ioread64_rep ioread64_rep
+static inline void ioread64_rep(const volatile void __iomem *addr,
+ void *buffer, unsigned int count)
+{
+ readsq(addr, buffer, count);
+}
+#endif
+#endif /* CONFIG_64BIT */
+
+#ifndef iowrite8_rep
+#define iowrite8_rep iowrite8_rep
+static inline void iowrite8_rep(volatile void __iomem *addr,
+ const void *buffer,
+ unsigned int count)
+{
+ writesb(addr, buffer, count);
+}
+#endif
+
+#ifndef iowrite16_rep
+#define iowrite16_rep iowrite16_rep
+static inline void iowrite16_rep(volatile void __iomem *addr,
+ const void *buffer,
+ unsigned int count)
+{
+ writesw(addr, buffer, count);
+}
+#endif
+
+#ifndef iowrite32_rep
+#define iowrite32_rep iowrite32_rep
+static inline void iowrite32_rep(volatile void __iomem *addr,
+ const void *buffer,
+ unsigned int count)
+{
+ writesl(addr, buffer, count);
+}
+#endif
+
+#ifdef CONFIG_64BIT
+#ifndef iowrite64_rep
+#define iowrite64_rep iowrite64_rep
+static inline void iowrite64_rep(volatile void __iomem *addr,
+ const void *buffer,
+ unsigned int count)
+{
+ writesq(addr, buffer, count);
+}
+#endif
+#endif /* CONFIG_64BIT */
+#endif /* CONFIG_GENERIC_IOMAP */
+
+#ifdef __KERNEL__
+
+#include <linux/vmalloc.h>
+#define __io_virt(x) ((void __force *)(x))
+
+#ifndef CONFIG_GENERIC_IOMAP
+struct pci_dev;
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+
+#ifndef pci_iounmap
+#define pci_iounmap pci_iounmap
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+}
+#endif
+#endif /* CONFIG_GENERIC_IOMAP */
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ * These are pretty trivial
+ */
+#ifndef virt_to_phys
+#define virt_to_phys virt_to_phys
+static inline unsigned long virt_to_phys(volatile void *address)
+{
+ return __pa((unsigned long)address);
+}
+#endif
+
+#ifndef phys_to_virt
+#define phys_to_virt phys_to_virt
+static inline void *phys_to_virt(unsigned long address)
+{
+ return __va(address);
+}
+#endif
+
+/**
+ * DOC: ioremap() and ioremap_*() variants
+ *
+ * If you have an IOMMU your architecture is expected to have both ioremap()
+ * and iounmap() implemented otherwise the asm-generic helpers will provide a
+ * direct mapping.
+ *
+ * There are ioremap_*() call variants, if you have no IOMMU we naturally will
+ * default to direct mapping for all of them, you can override these defaults.
+ * If you have an IOMMU you are highly encouraged to provide your own
+ * ioremap variant implementation as there currently is no safe architecture
+ * agnostic default. To avoid possible improper behaviour default asm-generic
+ * ioremap_*() variants all return NULL when an IOMMU is available. If you've
+ * defined your own ioremap_*() variant you must then declare your own
+ * ioremap_*() variant as defined to itself to avoid the default NULL return.
+ */
+
+#ifdef CONFIG_MMU
+
+#ifndef ioremap_uc
+#define ioremap_uc ioremap_uc
+static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
+{
+ return NULL;
+}
+#endif
+
+#else /* !CONFIG_MMU */
+
+/*
+ * Change "struct page" to physical address.
+ *
+ * This implementation is for the no-MMU case only... if you have an MMU
+ * you'll need to provide your own definitions.
+ */
+
+#ifndef ioremap
+#define ioremap ioremap
+static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
+{
+ return (void __iomem *)(unsigned long)offset;
+}
+#endif
+
+#ifndef __ioremap
+#define __ioremap __ioremap
+static inline void __iomem *__ioremap(phys_addr_t offset, size_t size,
+ unsigned long flags)
+{
+ return ioremap(offset, size);
+}
+#endif
+
+#ifndef iounmap
+#define iounmap iounmap
+
+static inline void iounmap(void __iomem *addr)
+{
+}
+#endif
+#endif /* CONFIG_MMU */
+#ifndef ioremap_nocache
+void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
+#define ioremap_nocache ioremap_nocache
+static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
+{
+ return ioremap(offset, size);
+}
+#endif
+
+#ifndef ioremap_uc
+#define ioremap_uc ioremap_uc
+static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
+#endif
+
+#ifndef ioremap_wc
+#define ioremap_wc ioremap_wc
+static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
+#endif
+
+#ifndef ioremap_wt
+#define ioremap_wt ioremap_wt
+static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
+#endif
+
+#ifdef CONFIG_HAS_IOPORT_MAP
+#ifndef CONFIG_GENERIC_IOMAP
+#ifndef ioport_map
+#define ioport_map ioport_map
+static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ port &= IO_SPACE_LIMIT;
+ return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
+}
+#endif
+
+#ifndef ioport_unmap
+#define ioport_unmap ioport_unmap
+static inline void ioport_unmap(void __iomem *p)
+{
+}
+#endif
+#else /* CONFIG_GENERIC_IOMAP */
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+extern void ioport_unmap(void __iomem *p);
+#endif /* CONFIG_GENERIC_IOMAP */
+#endif /* CONFIG_HAS_IOPORT_MAP */
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#ifndef xlate_dev_kmem_ptr
+#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
+static inline void *xlate_dev_kmem_ptr(void *addr)
+{
+ return addr;
+}
+#endif
+
+#ifndef xlate_dev_mem_ptr
+#define xlate_dev_mem_ptr xlate_dev_mem_ptr
+static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
+{
+ return __va(addr);
+}
+#endif
+
+#ifndef unxlate_dev_mem_ptr
+#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
+static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
+{
+}
+#endif
+
+#ifdef CONFIG_VIRT_TO_BUS
+#ifndef virt_to_bus
+static inline unsigned long virt_to_bus(void *address)
+{
+ return (unsigned long)address;
+}
+
+static inline void *bus_to_virt(unsigned long address)
+{
+ return (void *)address;
+}
+#endif
+#endif
+
+#ifndef memset_io
+#define memset_io memset_io
+/**
+ * memset_io Set a range of I/O memory to a constant value
+ * @addr: The beginning of the I/O-memory range to set
+ * @val: The value to set the memory to
+ * @count: The number of bytes to set
+ *
+ * Set a range of I/O memory to a given value.
+ */
+static inline void memset_io(volatile void __iomem *addr, int value,
+ size_t size)
+{
+ memset(__io_virt(addr), value, size);
+}
+#endif
+
+#ifndef memcpy_fromio
+#define memcpy_fromio memcpy_fromio
+/**
+ * memcpy_fromio Copy a block of data from I/O memory
+ * @dst: The (RAM) destination for the copy
+ * @src: The (I/O memory) source for the data
+ * @count: The number of bytes to copy
+ *
+ * Copy a block of data from I/O memory.
+ */
+static inline void memcpy_fromio(void *buffer,
+ const volatile void __iomem *addr,
+ size_t size)
+{
+ memcpy(buffer, __io_virt(addr), size);
+}
+#endif
+
+#ifndef memcpy_toio
+#define memcpy_toio memcpy_toio
+/**
+ * memcpy_toio Copy a block of data into I/O memory
+ * @dst: The (I/O memory) destination for the copy
+ * @src: The (RAM) source for the data
+ * @count: The number of bytes to copy
+ *
+ * Copy a block of data to I/O memory.
+ */
+static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
+ size_t size)
+{
+ memcpy(__io_virt(addr), buffer, size);
+}
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_GENERIC_IO_H */
diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h
new file mode 100644
index 000000000..9fda9ed00
--- /dev/null
+++ b/include/asm-generic/ioctl.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_IOCTL_H
+#define _ASM_GENERIC_IOCTL_H
+
+#include <uapi/asm-generic/ioctl.h>
+
+#ifdef __CHECKER__
+#define _IOC_TYPECHECK(t) (sizeof(t))
+#else
+/* provoke compile error for invalid uses of size argument */
+extern unsigned int __invalid_size_argument_for_IOC;
+#define _IOC_TYPECHECK(t) \
+ ((sizeof(t) == sizeof(t[1]) && \
+ sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
+ sizeof(t) : __invalid_size_argument_for_IOC)
+#endif
+
+#endif /* _ASM_GENERIC_IOCTL_H */
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
new file mode 100644
index 000000000..5b63b94ef
--- /dev/null
+++ b/include/asm-generic/iomap.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __GENERIC_IO_H
+#define __GENERIC_IO_H
+
+#include <linux/linkage.h>
+#include <asm/byteorder.h>
+
+/*
+ * These are the "generic" interfaces for doing new-style
+ * memory-mapped or PIO accesses. Architectures may do
+ * their own arch-optimized versions, these just act as
+ * wrappers around the old-style IO register access functions:
+ * read[bwl]/write[bwl]/in[bwl]/out[bwl]
+ *
+ * Don't include this directly, include it from <asm/io.h>.
+ */
+
+/*
+ * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
+ * access or a MMIO access, these functions don't care. The info is
+ * encoded in the hardware mapping set up by the mapping functions
+ * (or the cookie itself, depending on implementation and hw).
+ *
+ * The generic routines just encode the PIO/MMIO as part of the
+ * cookie, and coldly assume that the MMIO IO mappings are not
+ * in the low address range. Architectures for which this is not
+ * true can't use this generic implementation.
+ */
+extern unsigned int ioread8(void __iomem *);
+extern unsigned int ioread16(void __iomem *);
+extern unsigned int ioread16be(void __iomem *);
+extern unsigned int ioread32(void __iomem *);
+extern unsigned int ioread32be(void __iomem *);
+#ifdef CONFIG_64BIT
+extern u64 ioread64(void __iomem *);
+extern u64 ioread64be(void __iomem *);
+#endif
+
+extern void iowrite8(u8, void __iomem *);
+extern void iowrite16(u16, void __iomem *);
+extern void iowrite16be(u16, void __iomem *);
+extern void iowrite32(u32, void __iomem *);
+extern void iowrite32be(u32, void __iomem *);
+#ifdef CONFIG_64BIT
+extern void iowrite64(u64, void __iomem *);
+extern void iowrite64be(u64, void __iomem *);
+#endif
+
+/*
+ * "string" versions of the above. Note that they
+ * use native byte ordering for the accesses (on
+ * the assumption that IO and memory agree on a
+ * byte order, and CPU byteorder is irrelevant).
+ *
+ * They do _not_ update the port address. If you
+ * want MMIO that copies stuff laid out in MMIO
+ * memory across multiple ports, use "memcpy_toio()"
+ * and friends.
+ */
+extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count);
+extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count);
+extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count);
+
+extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
+
+#ifdef CONFIG_HAS_IOPORT_MAP
+/* Create a virtual mapping cookie for an IO port range */
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+extern void ioport_unmap(void __iomem *);
+#endif
+
+#ifndef ARCH_HAS_IOREMAP_WC
+#define ioremap_wc ioremap_nocache
+#endif
+
+#ifndef ARCH_HAS_IOREMAP_WT
+#define ioremap_wt ioremap_nocache
+#endif
+
+#ifdef CONFIG_PCI
+/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
+struct pci_dev;
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+#elif defined(CONFIG_GENERIC_IOMAP)
+struct pci_dev;
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{ }
+#endif
+
+#include <asm-generic/pci_iomap.h>
+
+#endif
diff --git a/include/asm-generic/irq.h b/include/asm-generic/irq.h
new file mode 100644
index 000000000..da21de991
--- /dev/null
+++ b/include/asm-generic/irq.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_IRQ_H
+#define __ASM_GENERIC_IRQ_H
+
+/*
+ * NR_IRQS is the upper bound of how many interrupts can be handled
+ * in the platform. It is used to size the static irq_map array,
+ * so don't make it too big.
+ */
+#ifndef NR_IRQS
+#define NR_IRQS 64
+#endif
+
+static inline int irq_canonicalize(int irq)
+{
+ return irq;
+}
+
+#endif /* __ASM_GENERIC_IRQ_H */
diff --git a/include/asm-generic/irq_regs.h b/include/asm-generic/irq_regs.h
new file mode 100644
index 000000000..6bf9355fa
--- /dev/null
+++ b/include/asm-generic/irq_regs.h
@@ -0,0 +1,37 @@
+/* Fallback per-CPU frame pointer holder
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_GENERIC_IRQ_REGS_H
+#define _ASM_GENERIC_IRQ_REGS_H
+
+#include <linux/percpu.h>
+
+/*
+ * Per-cpu current frame pointer - the location of the last exception frame on
+ * the stack
+ */
+DECLARE_PER_CPU(struct pt_regs *, __irq_regs);
+
+static inline struct pt_regs *get_irq_regs(void)
+{
+ return __this_cpu_read(__irq_regs);
+}
+
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+ struct pt_regs *old_regs;
+
+ old_regs = __this_cpu_read(__irq_regs);
+ __this_cpu_write(__irq_regs, new_regs);
+ return old_regs;
+}
+
+#endif /* _ASM_GENERIC_IRQ_REGS_H */
diff --git a/include/asm-generic/irq_work.h b/include/asm-generic/irq_work.h
new file mode 100644
index 000000000..d5dce06f7
--- /dev/null
+++ b/include/asm-generic/irq_work.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_IRQ_WORK_H
+#define __ASM_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return false;
+}
+
+#endif /* __ASM_IRQ_WORK_H */
+
diff --git a/include/asm-generic/irqflags.h b/include/asm-generic/irqflags.h
new file mode 100644
index 000000000..19ccbf483
--- /dev/null
+++ b/include/asm-generic/irqflags.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_IRQFLAGS_H
+#define __ASM_GENERIC_IRQFLAGS_H
+
+/*
+ * All architectures should implement at least the first two functions,
+ * usually inline assembly will be the best way.
+ */
+#ifndef ARCH_IRQ_DISABLED
+#define ARCH_IRQ_DISABLED 0
+#define ARCH_IRQ_ENABLED 1
+#endif
+
+/* read interrupt enabled status */
+#ifndef arch_local_save_flags
+unsigned long arch_local_save_flags(void);
+#endif
+
+/* set interrupt enabled status */
+#ifndef arch_local_irq_restore
+void arch_local_irq_restore(unsigned long flags);
+#endif
+
+/* get status and disable interrupts */
+#ifndef arch_local_irq_save
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+ flags = arch_local_save_flags();
+ arch_local_irq_restore(ARCH_IRQ_DISABLED);
+ return flags;
+}
+#endif
+
+/* test flags */
+#ifndef arch_irqs_disabled_flags
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return flags == ARCH_IRQ_DISABLED;
+}
+#endif
+
+/* unconditionally enable interrupts */
+#ifndef arch_local_irq_enable
+static inline void arch_local_irq_enable(void)
+{
+ arch_local_irq_restore(ARCH_IRQ_ENABLED);
+}
+#endif
+
+/* unconditionally disable interrupts */
+#ifndef arch_local_irq_disable
+static inline void arch_local_irq_disable(void)
+{
+ arch_local_irq_restore(ARCH_IRQ_DISABLED);
+}
+#endif
+
+/* test hardware interrupt enable bit */
+#ifndef arch_irqs_disabled
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+#endif
+
+#endif /* __ASM_GENERIC_IRQFLAGS_H */
diff --git a/include/asm-generic/kdebug.h b/include/asm-generic/kdebug.h
new file mode 100644
index 000000000..2b10b31b0
--- /dev/null
+++ b/include/asm-generic/kdebug.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_KDEBUG_H
+#define _ASM_GENERIC_KDEBUG_H
+
+enum die_val {
+ DIE_UNUSED,
+ DIE_OOPS = 1,
+};
+
+#endif /* _ASM_GENERIC_KDEBUG_H */
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
new file mode 100644
index 000000000..9f95b7b63
--- /dev/null
+++ b/include/asm-generic/kmap_types.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_KMAP_TYPES_H
+#define _ASM_GENERIC_KMAP_TYPES_H
+
+#ifdef __WITH_KM_FENCE
+# define KM_TYPE_NR 41
+#else
+# define KM_TYPE_NR 20
+#endif
+
+#endif
diff --git a/include/asm-generic/kprobes.h b/include/asm-generic/kprobes.h
new file mode 100644
index 000000000..4a982089c
--- /dev/null
+++ b/include/asm-generic/kprobes.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_KPROBES_H
+#define _ASM_GENERIC_KPROBES_H
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+#ifdef CONFIG_KPROBES
+/*
+ * Blacklist ganerating macro. Specify functions which is not probed
+ * by using this macro.
+ */
+# define __NOKPROBE_SYMBOL(fname) \
+static unsigned long __used \
+ __attribute__((__section__("_kprobe_blacklist"))) \
+ _kbl_addr_##fname = (unsigned long)fname;
+# define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname)
+/* Use this to forbid a kprobes attach on very low level functions */
+# define __kprobes __attribute__((__section__(".kprobes.text")))
+# define nokprobe_inline __always_inline
+#else
+# define NOKPROBE_SYMBOL(fname)
+# define __kprobes
+# define nokprobe_inline inline
+#endif
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
+
+#endif /* _ASM_GENERIC_KPROBES_H */
diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h
new file mode 100644
index 000000000..728e5c570
--- /dev/null
+++ b/include/asm-generic/kvm_para.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_KVM_PARA_H
+#define _ASM_GENERIC_KVM_PARA_H
+
+#include <uapi/asm-generic/kvm_para.h>
+
+
+/*
+ * This function is used by architectures that support kvm to avoid issuing
+ * false soft lockup messages.
+ */
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+static inline unsigned int kvm_arch_para_hints(void)
+{
+ return 0;
+}
+
+static inline bool kvm_para_available(void)
+{
+ return false;
+}
+
+#endif
diff --git a/include/asm-generic/linkage.h b/include/asm-generic/linkage.h
new file mode 100644
index 000000000..fef7a01e5
--- /dev/null
+++ b/include/asm-generic/linkage.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_GENERIC_LINKAGE_H
+#define __ASM_GENERIC_LINKAGE_H
+/*
+ * linux/linkage.h provides reasonable defaults.
+ * an architecture can override them by providing its own version.
+ */
+
+#endif /* __ASM_GENERIC_LINKAGE_H */
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
new file mode 100644
index 000000000..fca7f1d84
--- /dev/null
+++ b/include/asm-generic/local.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_LOCAL_H
+#define _ASM_GENERIC_LOCAL_H
+
+#include <linux/percpu.h>
+#include <linux/atomic.h>
+#include <asm/types.h>
+
+/*
+ * A signed long type for operations which are atomic for a single CPU.
+ * Usually used in combination with per-cpu variables.
+ *
+ * This is the default implementation, which uses atomic_long_t. Which is
+ * rather pointless. The whole point behind local_t is that some processors
+ * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
+ * running on this CPU. local_t allows exploitation of such capabilities.
+ */
+
+/* Implement in terms of atomics. */
+
+/* Don't use typedef: don't want them to be mixed with atomic_t's. */
+typedef struct
+{
+ atomic_long_t a;
+} local_t;
+
+#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+#define local_read(l) atomic_long_read(&(l)->a)
+#define local_set(l,i) atomic_long_set((&(l)->a),(i))
+#define local_inc(l) atomic_long_inc(&(l)->a)
+#define local_dec(l) atomic_long_dec(&(l)->a)
+#define local_add(i,l) atomic_long_add((i),(&(l)->a))
+#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
+
+#define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
+#define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
+#define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
+#define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
+#define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
+#define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
+#define local_inc_return(l) atomic_long_inc_return(&(l)->a)
+
+#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
+#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
+#define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
+#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
+
+/* Non-atomic variants, ie. preemption disabled and won't be touched
+ * in interrupt, etc. Some archs can optimize this case well. */
+#define __local_inc(l) local_set((l), local_read(l) + 1)
+#define __local_dec(l) local_set((l), local_read(l) - 1)
+#define __local_add(i,l) local_set((l), local_read(l) + (i))
+#define __local_sub(i,l) local_set((l), local_read(l) - (i))
+
+#endif /* _ASM_GENERIC_LOCAL_H */
diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h
new file mode 100644
index 000000000..765be0b7d
--- /dev/null
+++ b/include/asm-generic/local64.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_LOCAL64_H
+#define _ASM_GENERIC_LOCAL64_H
+
+#include <linux/percpu.h>
+#include <asm/types.h>
+
+/*
+ * A signed long type for operations which are atomic for a single CPU.
+ * Usually used in combination with per-cpu variables.
+ *
+ * This is the default implementation, which uses atomic64_t. Which is
+ * rather pointless. The whole point behind local64_t is that some processors
+ * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
+ * running on this CPU. local64_t allows exploitation of such capabilities.
+ */
+
+/* Implement in terms of atomics. */
+
+#if BITS_PER_LONG == 64
+
+#include <asm/local.h>
+
+typedef struct {
+ local_t a;
+} local64_t;
+
+#define LOCAL64_INIT(i) { LOCAL_INIT(i) }
+
+#define local64_read(l) local_read(&(l)->a)
+#define local64_set(l,i) local_set((&(l)->a),(i))
+#define local64_inc(l) local_inc(&(l)->a)
+#define local64_dec(l) local_dec(&(l)->a)
+#define local64_add(i,l) local_add((i),(&(l)->a))
+#define local64_sub(i,l) local_sub((i),(&(l)->a))
+
+#define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a))
+#define local64_dec_and_test(l) local_dec_and_test(&(l)->a)
+#define local64_inc_and_test(l) local_inc_and_test(&(l)->a)
+#define local64_add_negative(i, l) local_add_negative((i), (&(l)->a))
+#define local64_add_return(i, l) local_add_return((i), (&(l)->a))
+#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a))
+#define local64_inc_return(l) local_inc_return(&(l)->a)
+
+#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n))
+#define local64_xchg(l, n) local_xchg((&(l)->a), (n))
+#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u))
+#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a)
+
+/* Non-atomic variants, ie. preemption disabled and won't be touched
+ * in interrupt, etc. Some archs can optimize this case well. */
+#define __local64_inc(l) local64_set((l), local64_read(l) + 1)
+#define __local64_dec(l) local64_set((l), local64_read(l) - 1)
+#define __local64_add(i,l) local64_set((l), local64_read(l) + (i))
+#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i))
+
+#else /* BITS_PER_LONG != 64 */
+
+#include <linux/atomic.h>
+
+/* Don't use typedef: don't want them to be mixed with atomic_t's. */
+typedef struct {
+ atomic64_t a;
+} local64_t;
+
+#define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+#define local64_read(l) atomic64_read(&(l)->a)
+#define local64_set(l,i) atomic64_set((&(l)->a),(i))
+#define local64_inc(l) atomic64_inc(&(l)->a)
+#define local64_dec(l) atomic64_dec(&(l)->a)
+#define local64_add(i,l) atomic64_add((i),(&(l)->a))
+#define local64_sub(i,l) atomic64_sub((i),(&(l)->a))
+
+#define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a))
+#define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a)
+#define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a)
+#define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a))
+#define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a))
+#define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a))
+#define local64_inc_return(l) atomic64_inc_return(&(l)->a)
+
+#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n))
+#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n))
+#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u))
+#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a)
+
+/* Non-atomic variants, ie. preemption disabled and won't be touched
+ * in interrupt, etc. Some archs can optimize this case well. */
+#define __local64_inc(l) local64_set((l), local64_read(l) + 1)
+#define __local64_dec(l) local64_set((l), local64_read(l) - 1)
+#define __local64_add(i,l) local64_set((l), local64_read(l) + (i))
+#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i))
+
+#endif /* BITS_PER_LONG != 64 */
+
+#endif /* _ASM_GENERIC_LOCAL64_H */
diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h
new file mode 100644
index 000000000..10cd4ffc6
--- /dev/null
+++ b/include/asm-generic/mcs_spinlock.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_MCS_SPINLOCK_H
+#define __ASM_MCS_SPINLOCK_H
+
+/*
+ * Architectures can define their own:
+ *
+ * arch_mcs_spin_lock_contended(l)
+ * arch_mcs_spin_unlock_contended(l)
+ *
+ * See kernel/locking/mcs_spinlock.c.
+ */
+
+#endif /* __ASM_MCS_SPINLOCK_H */
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
new file mode 100644
index 000000000..7637fb46b
--- /dev/null
+++ b/include/asm-generic/memory_model.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MEMORY_MODEL_H
+#define __ASM_MEMORY_MODEL_H
+
+#include <linux/pfn.h>
+
+#ifndef __ASSEMBLY__
+
+#if defined(CONFIG_FLATMEM)
+
+#ifndef ARCH_PFN_OFFSET
+#define ARCH_PFN_OFFSET (0UL)
+#endif
+
+#elif defined(CONFIG_DISCONTIGMEM)
+
+#ifndef arch_pfn_to_nid
+#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn)
+#endif
+
+#ifndef arch_local_page_offset
+#define arch_local_page_offset(pfn, nid) \
+ ((pfn) - NODE_DATA(nid)->node_start_pfn)
+#endif
+
+#endif /* CONFIG_DISCONTIGMEM */
+
+/*
+ * supports 3 memory models.
+ */
+#if defined(CONFIG_FLATMEM)
+
+#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
+#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
+ ARCH_PFN_OFFSET)
+#elif defined(CONFIG_DISCONTIGMEM)
+
+#define __pfn_to_page(pfn) \
+({ unsigned long __pfn = (pfn); \
+ unsigned long __nid = arch_pfn_to_nid(__pfn); \
+ NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
+})
+
+#define __page_to_pfn(pg) \
+({ const struct page *__pg = (pg); \
+ struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
+ (unsigned long)(__pg - __pgdat->node_mem_map) + \
+ __pgdat->node_start_pfn; \
+})
+
+#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
+
+/* memmap is virtually contiguous. */
+#define __pfn_to_page(pfn) (vmemmap + (pfn))
+#define __page_to_pfn(page) (unsigned long)((page) - vmemmap)
+
+#elif defined(CONFIG_SPARSEMEM)
+/*
+ * Note: section's mem_map is encoded to reflect its start_pfn.
+ * section[i].section_mem_map == mem_map's address - start_pfn;
+ */
+#define __page_to_pfn(pg) \
+({ const struct page *__pg = (pg); \
+ int __sec = page_to_section(__pg); \
+ (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
+})
+
+#define __pfn_to_page(pfn) \
+({ unsigned long __pfn = (pfn); \
+ struct mem_section *__sec = __pfn_to_section(__pfn); \
+ __section_mem_map_addr(__sec) + __pfn; \
+})
+#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
+
+/*
+ * Convert a physical address to a Page Frame Number and back
+ */
+#define __phys_to_pfn(paddr) PHYS_PFN(paddr)
+#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
+
+#define page_to_pfn __page_to_pfn
+#define pfn_to_page __pfn_to_page
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/include/asm-generic/mm-arch-hooks.h b/include/asm-generic/mm-arch-hooks.h
new file mode 100644
index 000000000..5ff0e5193
--- /dev/null
+++ b/include/asm-generic/mm-arch-hooks.h
@@ -0,0 +1,16 @@
+/*
+ * Architecture specific mm hooks
+ */
+
+#ifndef _ASM_GENERIC_MM_ARCH_HOOKS_H
+#define _ASM_GENERIC_MM_ARCH_HOOKS_H
+
+/*
+ * This file should be included through arch/../include/asm/Kbuild for
+ * the architecture which doesn't need specific mm hooks.
+ *
+ * In that case, the generic hooks defined in include/linux/mm-arch-hooks.h
+ * are used.
+ */
+
+#endif /* _ASM_GENERIC_MM_ARCH_HOOKS_H */
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
new file mode 100644
index 000000000..8ac4e68a1
--- /dev/null
+++ b/include/asm-generic/mm_hooks.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap
+ * and arch_unmap to be included in asm-FOO/mmu_context.h for any
+ * arch FOO which doesn't need to hook these.
+ */
+#ifndef _ASM_GENERIC_MM_HOOKS_H
+#define _ASM_GENERIC_MM_HOOKS_H
+
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void arch_exit_mmap(struct mm_struct *mm)
+{
+}
+
+static inline void arch_unmap(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
+
+static inline void arch_bprm_mm_init(struct mm_struct *mm,
+ struct vm_area_struct *vma)
+{
+}
+
+static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
+ bool write, bool execute, bool foreign)
+{
+ /* by default, allow everything */
+ return true;
+}
+#endif /* _ASM_GENERIC_MM_HOOKS_H */
diff --git a/include/asm-generic/mmu.h b/include/asm-generic/mmu.h
new file mode 100644
index 000000000..061838037
--- /dev/null
+++ b/include/asm-generic/mmu.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MMU_H
+#define __ASM_GENERIC_MMU_H
+
+/*
+ * This is the mmu.h header for nommu implementations.
+ * Architectures with an MMU need something more complex.
+ */
+#ifndef __ASSEMBLY__
+typedef struct {
+ unsigned long end_brk;
+
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ unsigned long exec_fdpic_loadmap;
+ unsigned long interp_fdpic_loadmap;
+#endif
+} mm_context_t;
+#endif
+
+#endif /* __ASM_GENERIC_MMU_H */
diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h
new file mode 100644
index 000000000..6be9106fb
--- /dev/null
+++ b/include/asm-generic/mmu_context.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MMU_CONTEXT_H
+#define __ASM_GENERIC_MMU_CONTEXT_H
+
+/*
+ * Generic hooks for NOMMU architectures, which do not need to do
+ * anything special here.
+ */
+
+#include <asm-generic/mm_hooks.h>
+
+struct task_struct;
+struct mm_struct;
+
+static inline void enter_lazy_tlb(struct mm_struct *mm,
+ struct task_struct *tsk)
+{
+}
+
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+}
+
+static inline void deactivate_mm(struct task_struct *task,
+ struct mm_struct *mm)
+{
+}
+
+static inline void switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+}
+
+static inline void activate_mm(struct mm_struct *prev_mm,
+ struct mm_struct *next_mm)
+{
+}
+
+#endif /* __ASM_GENERIC_MMU_CONTEXT_H */
diff --git a/include/asm-generic/module.h b/include/asm-generic/module.h
new file mode 100644
index 000000000..98e1541b7
--- /dev/null
+++ b/include/asm-generic/module.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MODULE_H
+#define __ASM_GENERIC_MODULE_H
+
+/*
+ * Many architectures just need a simple module
+ * loader without arch specific data.
+ */
+#ifndef CONFIG_HAVE_MOD_ARCH_SPECIFIC
+struct mod_arch_specific
+{
+};
+#endif
+
+#ifdef CONFIG_64BIT
+#define Elf_Shdr Elf64_Shdr
+#define Elf_Phdr Elf64_Phdr
+#define Elf_Sym Elf64_Sym
+#define Elf_Dyn Elf64_Dyn
+#define Elf_Ehdr Elf64_Ehdr
+#define Elf_Addr Elf64_Addr
+#ifdef CONFIG_MODULES_USE_ELF_REL
+#define Elf_Rel Elf64_Rel
+#endif
+#ifdef CONFIG_MODULES_USE_ELF_RELA
+#define Elf_Rela Elf64_Rela
+#endif
+#define ELF_R_TYPE(X) ELF64_R_TYPE(X)
+#define ELF_R_SYM(X) ELF64_R_SYM(X)
+
+#else /* CONFIG_64BIT */
+
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Phdr Elf32_Phdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Dyn Elf32_Dyn
+#define Elf_Ehdr Elf32_Ehdr
+#define Elf_Addr Elf32_Addr
+#ifdef CONFIG_MODULES_USE_ELF_REL
+#define Elf_Rel Elf32_Rel
+#endif
+#ifdef CONFIG_MODULES_USE_ELF_RELA
+#define Elf_Rela Elf32_Rela
+#endif
+#define ELF_R_TYPE(X) ELF32_R_TYPE(X)
+#define ELF_R_SYM(X) ELF32_R_SYM(X)
+#endif
+
+#endif /* __ASM_GENERIC_MODULE_H */
diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h
new file mode 100644
index 000000000..e6795f088
--- /dev/null
+++ b/include/asm-generic/msi.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MSI_H
+#define __ASM_GENERIC_MSI_H
+
+#include <linux/types.h>
+
+#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS
+# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2
+#endif
+
+struct msi_desc;
+
+/**
+ * struct msi_alloc_info - Default structure for MSI interrupt allocation.
+ * @desc: Pointer to msi descriptor
+ * @hwirq: Associated hw interrupt number in the domain
+ * @scratchpad: Storage for implementation specific scratch data
+ *
+ * Architectures can provide their own implementation by not including
+ * asm-generic/msi.h into their arch specific header file.
+ */
+typedef struct msi_alloc_info {
+ struct msi_desc *desc;
+ irq_hw_number_t hwirq;
+ union {
+ unsigned long ul;
+ void *ptr;
+ } scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS];
+} msi_alloc_info_t;
+
+#define GENERIC_MSI_DOMAIN_OPS 1
+
+#endif
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
new file mode 100644
index 000000000..27bf3377b
--- /dev/null
+++ b/include/asm-generic/page.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_PAGE_H
+#define __ASM_GENERIC_PAGE_H
+/*
+ * Generic page.h implementation, for NOMMU architectures.
+ * This provides the dummy definitions for the memory management.
+ */
+
+#ifdef CONFIG_MMU
+#error need to prove a real asm/page.h
+#endif
+
+
+/* PAGE_SHIFT determines the page size */
+
+#define PAGE_SHIFT 12
+#ifdef __ASSEMBLY__
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#else
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#endif
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#include <asm/setup.h>
+
+#ifndef __ASSEMBLY__
+
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
+#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct {
+ unsigned long pte;
+} pte_t;
+typedef struct {
+ unsigned long pmd[16];
+} pmd_t;
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((&x)->pmd[0])
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+extern unsigned long memory_start;
+extern unsigned long memory_end;
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_KERNEL_RAM_BASE_ADDRESS
+#define PAGE_OFFSET (CONFIG_KERNEL_RAM_BASE_ADDRESS)
+#else
+#define PAGE_OFFSET (0)
+#endif
+
+#ifndef ARCH_PFN_OFFSET
+#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
+#endif
+
+#ifndef __ASSEMBLY__
+
+#define __va(x) ((void *)((unsigned long) (x)))
+#define __pa(x) ((unsigned long) (x))
+
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+
+#define virt_to_page(addr) pfn_to_page(virt_to_pfn(addr))
+#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
+
+#ifndef page_to_phys
+#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+#endif
+
+#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
+#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
+ ((void *)(kaddr) < (void *)memory_end))
+
+#endif /* __ASSEMBLY__ */
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* __ASM_GENERIC_PAGE_H */
diff --git a/include/asm-generic/param.h b/include/asm-generic/param.h
new file mode 100644
index 000000000..8d3009dd2
--- /dev/null
+++ b/include/asm-generic/param.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_PARAM_H
+#define __ASM_GENERIC_PARAM_H
+
+#include <uapi/asm-generic/param.h>
+
+# undef HZ
+# define HZ CONFIG_HZ /* Internal kernel timer frequency */
+# define USER_HZ 100 /* some user interfaces are */
+# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
+#endif /* __ASM_GENERIC_PARAM_H */
diff --git a/include/asm-generic/parport.h b/include/asm-generic/parport.h
new file mode 100644
index 000000000..483991d61
--- /dev/null
+++ b/include/asm-generic/parport.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_PARPORT_H
+#define __ASM_GENERIC_PARPORT_H
+
+/*
+ * An ISA bus may have i8255 parallel ports at well-known
+ * locations in the I/O space, which are scanned by
+ * parport_pc_find_isa_ports.
+ *
+ * Without ISA support, the driver will only attach
+ * to devices on the PCI bus.
+ */
+
+static int parport_pc_find_isa_ports(int autoirq, int autodma);
+static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
+{
+#ifdef CONFIG_ISA
+ return parport_pc_find_isa_ports(autoirq, autodma);
+#else
+ return 0;
+#endif
+}
+
+#endif /* __ASM_GENERIC_PARPORT_H */
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
new file mode 100644
index 000000000..6bb3cd3d6
--- /dev/null
+++ b/include/asm-generic/pci.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * linux/include/asm-generic/pci.h
+ *
+ * Copyright (C) 2003 Russell King
+ */
+#ifndef _ASM_GENERIC_PCI_H
+#define _ASM_GENERIC_PCI_H
+
+#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ return channel ? 15 : 14;
+}
+#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
+
+#endif /* _ASM_GENERIC_PCI_H */
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
new file mode 100644
index 000000000..d4f16dcc2
--- /dev/null
+++ b/include/asm-generic/pci_iomap.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Generic I/O port emulation.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+#ifndef __ASM_GENERIC_PCI_IOMAP_H
+#define __ASM_GENERIC_PCI_IOMAP_H
+
+struct pci_dev;
+#ifdef CONFIG_PCI
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+extern void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max);
+extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
+ unsigned long offset,
+ unsigned long maxlen);
+extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
+ unsigned long offset,
+ unsigned long maxlen);
+/* Create a virtual mapping cookie for a port on a given PCI device.
+ * Do not call this directly, it exists to make it easier for architectures
+ * to override */
+#ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP
+extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port,
+ unsigned int nr);
+#else
+#define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr))
+#endif
+
+#elif defined(CONFIG_GENERIC_PCI_IOMAP)
+static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
+{
+ return NULL;
+}
+
+static inline void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max)
+{
+ return NULL;
+}
+static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ return NULL;
+}
+static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __ASM_GENERIC_IO_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
new file mode 100644
index 000000000..1817a8415
--- /dev/null
+++ b/include/asm-generic/percpu.h
@@ -0,0 +1,448 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_PERCPU_H_
+#define _ASM_GENERIC_PERCPU_H_
+
+#include <linux/compiler.h>
+#include <linux/threads.h>
+#include <linux/percpu-defs.h>
+
+#ifdef CONFIG_SMP
+
+/*
+ * per_cpu_offset() is the offset that has to be added to a
+ * percpu variable to get to the instance for a certain processor.
+ *
+ * Most arches use the __per_cpu_offset array for those offsets but
+ * some arches have their own ways of determining the offset (x86_64, s390).
+ */
+#ifndef __per_cpu_offset
+extern unsigned long __per_cpu_offset[NR_CPUS];
+
+#define per_cpu_offset(x) (__per_cpu_offset[x])
+#endif
+
+/*
+ * Determine the offset for the currently active processor.
+ * An arch may define __my_cpu_offset to provide a more effective
+ * means of obtaining the offset to the per cpu variables of the
+ * current processor.
+ */
+#ifndef __my_cpu_offset
+#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
+#endif
+#ifdef CONFIG_DEBUG_PREEMPT
+#define my_cpu_offset per_cpu_offset(smp_processor_id())
+#else
+#define my_cpu_offset __my_cpu_offset
+#endif
+
+/*
+ * Arch may define arch_raw_cpu_ptr() to provide more efficient address
+ * translations for raw_cpu_ptr().
+ */
+#ifndef arch_raw_cpu_ptr
+#define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
+#endif
+
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+extern void setup_per_cpu_areas(void);
+#endif
+
+#endif /* SMP */
+
+#ifndef PER_CPU_BASE_SECTION
+#ifdef CONFIG_SMP
+#define PER_CPU_BASE_SECTION ".data..percpu"
+#else
+#define PER_CPU_BASE_SECTION ".data"
+#endif
+#endif
+
+#ifndef PER_CPU_ATTRIBUTES
+#define PER_CPU_ATTRIBUTES
+#endif
+
+#ifndef PER_CPU_DEF_ATTRIBUTES
+#define PER_CPU_DEF_ATTRIBUTES
+#endif
+
+#define raw_cpu_generic_read(pcp) \
+({ \
+ *raw_cpu_ptr(&(pcp)); \
+})
+
+#define raw_cpu_generic_to_op(pcp, val, op) \
+do { \
+ *raw_cpu_ptr(&(pcp)) op val; \
+} while (0)
+
+#define raw_cpu_generic_add_return(pcp, val) \
+({ \
+ typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \
+ \
+ *__p += val; \
+ *__p; \
+})
+
+#define raw_cpu_generic_xchg(pcp, nval) \
+({ \
+ typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \
+ typeof(pcp) __ret; \
+ __ret = *__p; \
+ *__p = nval; \
+ __ret; \
+})
+
+#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
+({ \
+ typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \
+ typeof(pcp) __ret; \
+ __ret = *__p; \
+ if (__ret == (oval)) \
+ *__p = nval; \
+ __ret; \
+})
+
+#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+({ \
+ typeof(&(pcp1)) __p1 = raw_cpu_ptr(&(pcp1)); \
+ typeof(&(pcp2)) __p2 = raw_cpu_ptr(&(pcp2)); \
+ int __ret = 0; \
+ if (*__p1 == (oval1) && *__p2 == (oval2)) { \
+ *__p1 = nval1; \
+ *__p2 = nval2; \
+ __ret = 1; \
+ } \
+ (__ret); \
+})
+
+#define __this_cpu_generic_read_nopreempt(pcp) \
+({ \
+ typeof(pcp) __ret; \
+ preempt_disable_notrace(); \
+ __ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
+ preempt_enable_notrace(); \
+ __ret; \
+})
+
+#define __this_cpu_generic_read_noirq(pcp) \
+({ \
+ typeof(pcp) __ret; \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ __ret = raw_cpu_generic_read(pcp); \
+ raw_local_irq_restore(__flags); \
+ __ret; \
+})
+
+#define this_cpu_generic_read(pcp) \
+({ \
+ typeof(pcp) __ret; \
+ if (__native_word(pcp)) \
+ __ret = __this_cpu_generic_read_nopreempt(pcp); \
+ else \
+ __ret = __this_cpu_generic_read_noirq(pcp); \
+ __ret; \
+})
+
+#define this_cpu_generic_to_op(pcp, val, op) \
+do { \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ raw_cpu_generic_to_op(pcp, val, op); \
+ raw_local_irq_restore(__flags); \
+} while (0)
+
+
+#define this_cpu_generic_add_return(pcp, val) \
+({ \
+ typeof(pcp) __ret; \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ __ret = raw_cpu_generic_add_return(pcp, val); \
+ raw_local_irq_restore(__flags); \
+ __ret; \
+})
+
+#define this_cpu_generic_xchg(pcp, nval) \
+({ \
+ typeof(pcp) __ret; \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ __ret = raw_cpu_generic_xchg(pcp, nval); \
+ raw_local_irq_restore(__flags); \
+ __ret; \
+})
+
+#define this_cpu_generic_cmpxchg(pcp, oval, nval) \
+({ \
+ typeof(pcp) __ret; \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \
+ raw_local_irq_restore(__flags); \
+ __ret; \
+})
+
+#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+({ \
+ int __ret; \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ __ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \
+ oval1, oval2, nval1, nval2); \
+ raw_local_irq_restore(__flags); \
+ __ret; \
+})
+
+#ifndef raw_cpu_read_1
+#define raw_cpu_read_1(pcp) raw_cpu_generic_read(pcp)
+#endif
+#ifndef raw_cpu_read_2
+#define raw_cpu_read_2(pcp) raw_cpu_generic_read(pcp)
+#endif
+#ifndef raw_cpu_read_4
+#define raw_cpu_read_4(pcp) raw_cpu_generic_read(pcp)
+#endif
+#ifndef raw_cpu_read_8
+#define raw_cpu_read_8(pcp) raw_cpu_generic_read(pcp)
+#endif
+
+#ifndef raw_cpu_write_1
+#define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef raw_cpu_write_2
+#define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef raw_cpu_write_4
+#define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef raw_cpu_write_8
+#define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op(pcp, val, =)
+#endif
+
+#ifndef raw_cpu_add_1
+#define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef raw_cpu_add_2
+#define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef raw_cpu_add_4
+#define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef raw_cpu_add_8
+#define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op(pcp, val, +=)
+#endif
+
+#ifndef raw_cpu_and_1
+#define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef raw_cpu_and_2
+#define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef raw_cpu_and_4
+#define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef raw_cpu_and_8
+#define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op(pcp, val, &=)
+#endif
+
+#ifndef raw_cpu_or_1
+#define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef raw_cpu_or_2
+#define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef raw_cpu_or_4
+#define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef raw_cpu_or_8
+#define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op(pcp, val, |=)
+#endif
+
+#ifndef raw_cpu_add_return_1
+#define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef raw_cpu_add_return_2
+#define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef raw_cpu_add_return_4
+#define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef raw_cpu_add_return_8
+#define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val)
+#endif
+
+#ifndef raw_cpu_xchg_1
+#define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef raw_cpu_xchg_2
+#define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef raw_cpu_xchg_4
+#define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef raw_cpu_xchg_8
+#define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
+#endif
+
+#ifndef raw_cpu_cmpxchg_1
+#define raw_cpu_cmpxchg_1(pcp, oval, nval) \
+ raw_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef raw_cpu_cmpxchg_2
+#define raw_cpu_cmpxchg_2(pcp, oval, nval) \
+ raw_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef raw_cpu_cmpxchg_4
+#define raw_cpu_cmpxchg_4(pcp, oval, nval) \
+ raw_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef raw_cpu_cmpxchg_8
+#define raw_cpu_cmpxchg_8(pcp, oval, nval) \
+ raw_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+
+#ifndef raw_cpu_cmpxchg_double_1
+#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef raw_cpu_cmpxchg_double_2
+#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef raw_cpu_cmpxchg_double_4
+#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef raw_cpu_cmpxchg_double_8
+#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+
+#ifndef this_cpu_read_1
+#define this_cpu_read_1(pcp) this_cpu_generic_read(pcp)
+#endif
+#ifndef this_cpu_read_2
+#define this_cpu_read_2(pcp) this_cpu_generic_read(pcp)
+#endif
+#ifndef this_cpu_read_4
+#define this_cpu_read_4(pcp) this_cpu_generic_read(pcp)
+#endif
+#ifndef this_cpu_read_8
+#define this_cpu_read_8(pcp) this_cpu_generic_read(pcp)
+#endif
+
+#ifndef this_cpu_write_1
+#define this_cpu_write_1(pcp, val) this_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef this_cpu_write_2
+#define this_cpu_write_2(pcp, val) this_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef this_cpu_write_4
+#define this_cpu_write_4(pcp, val) this_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef this_cpu_write_8
+#define this_cpu_write_8(pcp, val) this_cpu_generic_to_op(pcp, val, =)
+#endif
+
+#ifndef this_cpu_add_1
+#define this_cpu_add_1(pcp, val) this_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef this_cpu_add_2
+#define this_cpu_add_2(pcp, val) this_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef this_cpu_add_4
+#define this_cpu_add_4(pcp, val) this_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef this_cpu_add_8
+#define this_cpu_add_8(pcp, val) this_cpu_generic_to_op(pcp, val, +=)
+#endif
+
+#ifndef this_cpu_and_1
+#define this_cpu_and_1(pcp, val) this_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef this_cpu_and_2
+#define this_cpu_and_2(pcp, val) this_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef this_cpu_and_4
+#define this_cpu_and_4(pcp, val) this_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef this_cpu_and_8
+#define this_cpu_and_8(pcp, val) this_cpu_generic_to_op(pcp, val, &=)
+#endif
+
+#ifndef this_cpu_or_1
+#define this_cpu_or_1(pcp, val) this_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef this_cpu_or_2
+#define this_cpu_or_2(pcp, val) this_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef this_cpu_or_4
+#define this_cpu_or_4(pcp, val) this_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef this_cpu_or_8
+#define this_cpu_or_8(pcp, val) this_cpu_generic_to_op(pcp, val, |=)
+#endif
+
+#ifndef this_cpu_add_return_1
+#define this_cpu_add_return_1(pcp, val) this_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef this_cpu_add_return_2
+#define this_cpu_add_return_2(pcp, val) this_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef this_cpu_add_return_4
+#define this_cpu_add_return_4(pcp, val) this_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef this_cpu_add_return_8
+#define this_cpu_add_return_8(pcp, val) this_cpu_generic_add_return(pcp, val)
+#endif
+
+#ifndef this_cpu_xchg_1
+#define this_cpu_xchg_1(pcp, nval) this_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef this_cpu_xchg_2
+#define this_cpu_xchg_2(pcp, nval) this_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef this_cpu_xchg_4
+#define this_cpu_xchg_4(pcp, nval) this_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef this_cpu_xchg_8
+#define this_cpu_xchg_8(pcp, nval) this_cpu_generic_xchg(pcp, nval)
+#endif
+
+#ifndef this_cpu_cmpxchg_1
+#define this_cpu_cmpxchg_1(pcp, oval, nval) \
+ this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef this_cpu_cmpxchg_2
+#define this_cpu_cmpxchg_2(pcp, oval, nval) \
+ this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef this_cpu_cmpxchg_4
+#define this_cpu_cmpxchg_4(pcp, oval, nval) \
+ this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef this_cpu_cmpxchg_8
+#define this_cpu_cmpxchg_8(pcp, oval, nval) \
+ this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+
+#ifndef this_cpu_cmpxchg_double_1
+#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef this_cpu_cmpxchg_double_2
+#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef this_cpu_cmpxchg_double_4
+#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef this_cpu_cmpxchg_double_8
+#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+
+#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
new file mode 100644
index 000000000..948714c15
--- /dev/null
+++ b/include/asm-generic/pgalloc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_PGALLOC_H
+#define __ASM_GENERIC_PGALLOC_H
+/*
+ * an empty file is enough for a nommu architecture
+ */
+#ifdef CONFIG_MMU
+#error need to implement an architecture specific asm/pgalloc.h
+#endif
+
+#define check_pgt_cache() do { } while (0)
+
+#endif /* __ASM_GENERIC_PGALLOC_H */
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
new file mode 100644
index 000000000..1d6dd38c0
--- /dev/null
+++ b/include/asm-generic/pgtable-nop4d-hack.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PGTABLE_NOP4D_HACK_H
+#define _PGTABLE_NOP4D_HACK_H
+
+#ifndef __ASSEMBLY__
+#include <asm-generic/5level-fixup.h>
+
+#define __PAGETABLE_PUD_FOLDED 1
+
+/*
+ * Having the pud type consist of a pgd gets the size right, and allows
+ * us to conceptually access the pgd entry that this pud is folded into
+ * without casting.
+ */
+typedef struct { pgd_t pgd; } pud_t;
+
+#define PUD_SHIFT PGDIR_SHIFT
+#define PTRS_PER_PUD 1
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pud is never bad, and a pud always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+static inline int pgd_present(pgd_t pgd) { return 1; }
+static inline void pgd_clear(pgd_t *pgd) { }
+#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
+
+#define pgd_populate(mm, pgd, pud) do { } while (0)
+/*
+ * (puds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval })
+
+static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+{
+ return (pud_t *)pgd;
+}
+
+#define pud_val(x) (pgd_val((x).pgd))
+#define __pud(x) ((pud_t) { __pgd(x) })
+
+#define pgd_page(pgd) (pud_page((pud_t){ pgd }))
+#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd }))
+
+/*
+ * allocating and freeing a pud is trivial: the 1-entry pud is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+#define pud_alloc_one(mm, address) NULL
+#define pud_free(mm, x) do { } while (0)
+#define __pud_free_tlb(tlb, x, a) do { } while (0)
+
+#undef pud_addr_end
+#define pud_addr_end(addr, end) (end)
+
+#endif /* __ASSEMBLY__ */
+#endif /* _PGTABLE_NOP4D_HACK_H */
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
new file mode 100644
index 000000000..04cb91379
--- /dev/null
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PGTABLE_NOP4D_H
+#define _PGTABLE_NOP4D_H
+
+#ifndef __ASSEMBLY__
+
+#define __PAGETABLE_P4D_FOLDED 1
+
+typedef struct { pgd_t pgd; } p4d_t;
+
+#define P4D_SHIFT PGDIR_SHIFT
+#define MAX_PTRS_PER_P4D 1
+#define PTRS_PER_P4D 1
+#define P4D_SIZE (1UL << P4D_SHIFT)
+#define P4D_MASK (~(P4D_SIZE-1))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the p4d is never bad, and a p4d always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+static inline int pgd_present(pgd_t pgd) { return 1; }
+static inline void pgd_clear(pgd_t *pgd) { }
+#define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd))
+
+#define pgd_populate(mm, pgd, p4d) do { } while (0)
+/*
+ * (p4ds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pgd(pgdptr, pgdval) set_p4d((p4d_t *)(pgdptr), (p4d_t) { pgdval })
+
+static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+{
+ return (p4d_t *)pgd;
+}
+
+#define p4d_val(x) (pgd_val((x).pgd))
+#define __p4d(x) ((p4d_t) { __pgd(x) })
+
+#define pgd_page(pgd) (p4d_page((p4d_t){ pgd }))
+#define pgd_page_vaddr(pgd) (p4d_page_vaddr((p4d_t){ pgd }))
+
+/*
+ * allocating and freeing a p4d is trivial: the 1-entry p4d is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+#define p4d_alloc_one(mm, address) NULL
+#define p4d_free(mm, x) do { } while (0)
+#define __p4d_free_tlb(tlb, x, a) do { } while (0)
+
+#undef p4d_addr_end
+#define p4d_addr_end(addr, end) (end)
+
+#endif /* __ASSEMBLY__ */
+#endif /* _PGTABLE_NOP4D_H */
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
new file mode 100644
index 000000000..b85b8271a
--- /dev/null
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PGTABLE_NOPMD_H
+#define _PGTABLE_NOPMD_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm-generic/pgtable-nopud.h>
+
+struct mm_struct;
+
+#define __PAGETABLE_PMD_FOLDED 1
+
+/*
+ * Having the pmd type consist of a pud gets the size right, and allows
+ * us to conceptually access the pud entry that this pmd is folded into
+ * without casting.
+ */
+typedef struct { pud_t pud; } pmd_t;
+
+#define PMD_SHIFT PUD_SHIFT
+#define PTRS_PER_PMD 1
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+
+/*
+ * The "pud_xxx()" functions here are trivial for a folded two-level
+ * setup: the pmd is never bad, and a pmd always exists (as it's folded
+ * into the pud entry)
+ */
+static inline int pud_none(pud_t pud) { return 0; }
+static inline int pud_bad(pud_t pud) { return 0; }
+static inline int pud_present(pud_t pud) { return 1; }
+static inline void pud_clear(pud_t *pud) { }
+#define pmd_ERROR(pmd) (pud_ERROR((pmd).pud))
+
+#define pud_populate(mm, pmd, pte) do { } while (0)
+
+/*
+ * (pmds are folded into puds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval })
+
+static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
+{
+ return (pmd_t *)pud;
+}
+
+#define pmd_val(x) (pud_val((x).pud))
+#define __pmd(x) ((pmd_t) { __pud(x) } )
+
+#define pud_page(pud) (pmd_page((pmd_t){ pud }))
+#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud }))
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pud, so has no extra memory associated with it.
+ */
+#define pmd_alloc_one(mm, address) NULL
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+}
+#define __pmd_free_tlb(tlb, x, a) do { } while (0)
+
+#undef pmd_addr_end
+#define pmd_addr_end(addr, end) (end)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _PGTABLE_NOPMD_H */
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
new file mode 100644
index 000000000..9bef475db
--- /dev/null
+++ b/include/asm-generic/pgtable-nopud.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PGTABLE_NOPUD_H
+#define _PGTABLE_NOPUD_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef __ARCH_USE_5LEVEL_HACK
+#include <asm-generic/pgtable-nop4d-hack.h>
+#else
+#include <asm-generic/pgtable-nop4d.h>
+
+#define __PAGETABLE_PUD_FOLDED 1
+
+/*
+ * Having the pud type consist of a p4d gets the size right, and allows
+ * us to conceptually access the p4d entry that this pud is folded into
+ * without casting.
+ */
+typedef struct { p4d_t p4d; } pud_t;
+
+#define PUD_SHIFT P4D_SHIFT
+#define PTRS_PER_PUD 1
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+
+/*
+ * The "p4d_xxx()" functions here are trivial for a folded two-level
+ * setup: the pud is never bad, and a pud always exists (as it's folded
+ * into the p4d entry)
+ */
+static inline int p4d_none(p4d_t p4d) { return 0; }
+static inline int p4d_bad(p4d_t p4d) { return 0; }
+static inline int p4d_present(p4d_t p4d) { return 1; }
+static inline void p4d_clear(p4d_t *p4d) { }
+#define pud_ERROR(pud) (p4d_ERROR((pud).p4d))
+
+#define p4d_populate(mm, p4d, pud) do { } while (0)
+/*
+ * (puds are folded into p4ds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_p4d(p4dptr, p4dval) set_pud((pud_t *)(p4dptr), (pud_t) { p4dval })
+
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+ return (pud_t *)p4d;
+}
+
+#define pud_val(x) (p4d_val((x).p4d))
+#define __pud(x) ((pud_t) { __p4d(x) })
+
+#define p4d_page(p4d) (pud_page((pud_t){ p4d }))
+#define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d }))
+
+/*
+ * allocating and freeing a pud is trivial: the 1-entry pud is
+ * inside the p4d, so has no extra memory associated with it.
+ */
+#define pud_alloc_one(mm, address) NULL
+#define pud_free(mm, x) do { } while (0)
+#define __pud_free_tlb(tlb, x, a) do { } while (0)
+
+#undef pud_addr_end
+#define pud_addr_end(addr, end) (end)
+
+#endif /* __ASSEMBLY__ */
+#endif /* !__ARCH_USE_5LEVEL_HACK */
+#endif /* _PGTABLE_NOPUD_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
new file mode 100644
index 000000000..1544331be
--- /dev/null
+++ b/include/asm-generic/pgtable.h
@@ -0,0 +1,1155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_PGTABLE_H
+#define _ASM_GENERIC_PGTABLE_H
+
+#include <linux/pfn.h>
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_MMU
+
+#include <linux/mm_types.h>
+#include <linux/bug.h>
+#include <linux/errno.h>
+
+#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
+ defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
+#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
+#endif
+
+/*
+ * On almost all architectures and configurations, 0 can be used as the
+ * upper ceiling to free_pgtables(): on many architectures it has the same
+ * effect as using TASK_SIZE. However, there is one configuration which
+ * must impose a more careful limit, to avoid freeing kernel pgtables.
+ */
+#ifndef USER_PGTABLES_CEILING
+#define USER_PGTABLES_CEILING 0UL
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty);
+extern int pudp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp,
+ pud_t entry, int dirty);
+#else
+static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty)
+{
+ BUILD_BUG();
+ return 0;
+}
+static inline int pudp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp,
+ pud_t entry, int dirty)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ int r = 1;
+ if (!pte_young(pte))
+ r = 0;
+ else
+ set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
+ return r;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+ int r = 1;
+ if (!pmd_young(pmd))
+ r = 0;
+ else
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
+ return r;
+}
+#else
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#else
+/*
+ * Despite relevant to THP only, this API is called from generic rmap code
+ * under PageTransHuge(), hence needs a dummy implementation for !THP
+ */
+static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ pte_clear(mm, address, ptep);
+ return pte;
+}
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+ pmd_clear(pmdp);
+ return pmd;
+}
+#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
+#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
+static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pud_t *pudp)
+{
+ pud_t pud = *pudp;
+
+ pud_clear(pudp);
+ return pud;
+}
+#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
+static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp,
+ int full)
+{
+ return pmdp_huge_get_and_clear(mm, address, pmdp);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
+static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp,
+ int full)
+{
+ return pudp_huge_get_and_clear(mm, address, pudp);
+}
+#endif
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep,
+ int full)
+{
+ pte_t pte;
+ pte = ptep_get_and_clear(mm, address, ptep);
+ return pte;
+}
+#endif
+
+/*
+ * Some architectures may be able to avoid expensive synchronization
+ * primitives when modifications are made to PTE's which are already
+ * not present, or in the process of an address space destruction.
+ */
+#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
+static inline void pte_clear_not_present_full(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep,
+ int full)
+{
+ pte_clear(mm, address, ptep);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
+extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
+extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp);
+extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pud_t *pudp);
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
+struct mm_struct;
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
+{
+ pte_t old_pte = *ptep;
+ set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
+}
+#endif
+
+#ifndef pte_savedwrite
+#define pte_savedwrite pte_write
+#endif
+
+#ifndef pte_mk_savedwrite
+#define pte_mk_savedwrite pte_mkwrite
+#endif
+
+#ifndef pte_clear_savedwrite
+#define pte_clear_savedwrite pte_wrprotect
+#endif
+
+#ifndef pmd_savedwrite
+#define pmd_savedwrite pmd_write
+#endif
+
+#ifndef pmd_mk_savedwrite
+#define pmd_mk_savedwrite pmd_mkwrite
+#endif
+
+#ifndef pmd_clear_savedwrite
+#define pmd_clear_savedwrite pmd_wrprotect
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t old_pmd = *pmdp;
+ set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
+}
+#else
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline void pudp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp)
+{
+ pud_t old_pud = *pudp;
+
+ set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
+}
+#else
+static inline void pudp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+#endif
+
+#ifndef pmdp_collapse_flush
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#else
+static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return *pmdp;
+}
+#define pmdp_collapse_flush pmdp_collapse_flush
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
+extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable);
+#endif
+
+#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
+extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * This is an implementation of pmdp_establish() that is only suitable for an
+ * architecture that doesn't have hardware dirty/accessed bits. In this case we
+ * can't race with CPU which sets these bits and non-atomic aproach is fine.
+ */
+static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+ pmd_t old_pmd = *pmdp;
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+ return old_pmd;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_INVALIDATE
+extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp);
+#endif
+
+#ifndef __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ return pte_val(pte_a) == pte_val(pte_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTE_UNUSED
+/*
+ * Some architectures provide facilities to virtualization guests
+ * so that they can flag allocated pages as unused. This allows the
+ * host to transparently reclaim unused pages. This function returns
+ * whether the pte's page is unused.
+ */
+static inline int pte_unused(pte_t pte)
+{
+ return 0;
+}
+#endif
+
+#ifndef pte_access_permitted
+#define pte_access_permitted(pte, write) \
+ (pte_present(pte) && (!(write) || pte_write(pte)))
+#endif
+
+#ifndef pmd_access_permitted
+#define pmd_access_permitted(pmd, write) \
+ (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
+#endif
+
+#ifndef pud_access_permitted
+#define pud_access_permitted(pud, write) \
+ (pud_present(pud) && (!(write) || pud_write(pud)))
+#endif
+
+#ifndef p4d_access_permitted
+#define p4d_access_permitted(p4d, write) \
+ (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
+#endif
+
+#ifndef pgd_access_permitted
+#define pgd_access_permitted(pgd, write) \
+ (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
+#endif
+
+#ifndef __HAVE_ARCH_PMD_SAME
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+ return pmd_val(pmd_a) == pmd_val(pmd_b);
+}
+
+static inline int pud_same(pud_t pud_a, pud_t pud_b)
+{
+ return pud_val(pud_a) == pud_val(pud_b);
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+ BUILD_BUG();
+ return 0;
+}
+
+static inline int pud_same(pud_t pud_a, pud_t pud_b)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_DO_SWAP_PAGE
+/*
+ * Some architectures support metadata associated with a page. When a
+ * page is being swapped out, this metadata must be saved so it can be
+ * restored when the page is swapped back in. SPARC M7 and newer
+ * processors support an ADI (Application Data Integrity) tag for the
+ * page as metadata for the page. arch_do_swap_page() can restore this
+ * metadata when a page is swapped back in.
+ */
+static inline void arch_do_swap_page(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t pte, pte_t oldpte)
+{
+
+}
+#endif
+
+#ifndef __HAVE_ARCH_UNMAP_ONE
+/*
+ * Some architectures support metadata associated with a page. When a
+ * page is being swapped out, this metadata must be saved so it can be
+ * restored when the page is swapped back in. SPARC M7 and newer
+ * processors support an ADI (Application Data Integrity) tag for the
+ * page as metadata for the page. arch_unmap_one() can save this
+ * metadata on a swap-out of a page.
+ */
+static inline int arch_unmap_one(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t orig_pte)
+{
+ return 0;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
+#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
+#endif
+
+#ifndef __HAVE_ARCH_MOVE_PTE
+#define move_pte(pte, prot, old_addr, new_addr) (pte)
+#endif
+
+#ifndef pte_accessible
+# define pte_accessible(mm, pte) ((void)(pte), 1)
+#endif
+
+#ifndef flush_tlb_fix_spurious_fault
+#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
+#endif
+
+#ifndef pgprot_noncached
+#define pgprot_noncached(prot) (prot)
+#endif
+
+#ifndef pgprot_writecombine
+#define pgprot_writecombine pgprot_noncached
+#endif
+
+#ifndef pgprot_writethrough
+#define pgprot_writethrough pgprot_noncached
+#endif
+
+#ifndef pgprot_device
+#define pgprot_device pgprot_noncached
+#endif
+
+#ifndef pgprot_modify
+#define pgprot_modify pgprot_modify
+static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+{
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
+ newprot = pgprot_noncached(newprot);
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
+ newprot = pgprot_writecombine(newprot);
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
+ newprot = pgprot_device(newprot);
+ return newprot;
+}
+#endif
+
+/*
+ * When walking page tables, get the address of the next boundary,
+ * or the end address of the range if that comes earlier. Although no
+ * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
+ */
+
+#define pgd_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+
+#ifndef p4d_addr_end
+#define p4d_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+#ifndef pud_addr_end
+#define pud_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+#ifndef pmd_addr_end
+#define pmd_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+/*
+ * When walking page tables, we usually want to skip any p?d_none entries;
+ * and any p?d_bad entries - reporting the error before resetting to none.
+ * Do the tests inline, but report and clear the bad entry in mm/memory.c.
+ */
+void pgd_clear_bad(pgd_t *);
+void p4d_clear_bad(p4d_t *);
+void pud_clear_bad(pud_t *);
+void pmd_clear_bad(pmd_t *);
+
+static inline int pgd_none_or_clear_bad(pgd_t *pgd)
+{
+ if (pgd_none(*pgd))
+ return 1;
+ if (unlikely(pgd_bad(*pgd))) {
+ pgd_clear_bad(pgd);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int p4d_none_or_clear_bad(p4d_t *p4d)
+{
+ if (p4d_none(*p4d))
+ return 1;
+ if (unlikely(p4d_bad(*p4d))) {
+ p4d_clear_bad(p4d);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pud_none_or_clear_bad(pud_t *pud)
+{
+ if (pud_none(*pud))
+ return 1;
+ if (unlikely(pud_bad(*pud))) {
+ pud_clear_bad(pud);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pmd_none_or_clear_bad(pmd_t *pmd)
+{
+ if (pmd_none(*pmd))
+ return 1;
+ if (unlikely(pmd_bad(*pmd))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+ return 0;
+}
+
+static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep)
+{
+ /*
+ * Get the current pte state, but zero it out to make it
+ * non-present, preventing the hardware from asynchronously
+ * updating it.
+ */
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ /*
+ * The pte is non-present, so there's no hardware state to
+ * preserve.
+ */
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
+/*
+ * Start a pte protection read-modify-write transaction, which
+ * protects against asynchronous hardware modifications to the pte.
+ * The intention is not to prevent the hardware from making pte
+ * updates, but to prevent any updates it may make from being lost.
+ *
+ * This does not protect against other software modifications of the
+ * pte; the appropriate pte lock must be held over the transation.
+ *
+ * Note that this interface is intended to be batchable, meaning that
+ * ptep_modify_prot_commit may not actually update the pte, but merely
+ * queue the update to be done at some later time. The update must be
+ * actually committed before the pte lock is released, however.
+ */
+static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep)
+{
+ return __ptep_modify_prot_start(mm, addr, ptep);
+}
+
+/*
+ * Commit an update to a pte, leaving any hardware-controlled bits in
+ * the PTE unmodified.
+ */
+static inline void ptep_modify_prot_commit(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ __ptep_modify_prot_commit(mm, addr, ptep, pte);
+}
+#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
+#endif /* CONFIG_MMU */
+
+/*
+ * No-op macros that just return the current protection value. Defined here
+ * because these macros can be used used even if CONFIG_MMU is not defined.
+ */
+#ifndef pgprot_encrypted
+#define pgprot_encrypted(prot) (prot)
+#endif
+
+#ifndef pgprot_decrypted
+#define pgprot_decrypted(prot) (prot)
+#endif
+
+/*
+ * A facility to provide lazy MMU batching. This allows PTE updates and
+ * page invalidations to be delayed until a call to leave lazy MMU mode
+ * is issued. Some architectures may benefit from doing this, and it is
+ * beneficial for both shadow and direct mode hypervisors, which may batch
+ * the PTE updates which happen during this window. Note that using this
+ * interface requires that read hazards be removed from the code. A read
+ * hazard could result in the direct mode hypervisor case, since the actual
+ * write to the page tables may not yet have taken place, so reads though
+ * a raw PTE pointer after it has been modified are not guaranteed to be
+ * up to date. This mode can only be entered and left under the protection of
+ * the page table locks for all page tables which may be modified. In the UP
+ * case, this is required so that preemption is disabled, and in the SMP case,
+ * it must synchronize the delayed page table writes properly on other CPUs.
+ */
+#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() do {} while (0)
+#define arch_leave_lazy_mmu_mode() do {} while (0)
+#define arch_flush_lazy_mmu_mode() do {} while (0)
+#endif
+
+/*
+ * A facility to provide batching of the reload of page tables and
+ * other process state with the actual context switch code for
+ * paravirtualized guests. By convention, only one of the batched
+ * update (lazy) modes (CPU, MMU) should be active at any given time,
+ * entry should never be nested, and entry and exits should always be
+ * paired. This is for sanity of maintaining and reasoning about the
+ * kernel code. In this case, the exit (end of the context switch) is
+ * in architecture-specific code, and so doesn't need a generic
+ * definition.
+ */
+#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
+#define arch_start_context_switch(prev) do {} while (0)
+#endif
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
+static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline int pmd_swp_soft_dirty(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+#endif
+#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
+static inline int pte_soft_dirty(pte_t pte)
+{
+ return 0;
+}
+
+static inline int pmd_soft_dirty(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pte_t pte_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pte_t pte_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+ return 0;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline int pmd_swp_soft_dirty(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+#endif
+
+#ifndef __HAVE_PFNMAP_TRACKING
+/*
+ * Interfaces that can be used by architecture code to keep track of
+ * memory type of pfn mappings specified by the remap_pfn_range,
+ * vm_insert_pfn.
+ */
+
+/*
+ * track_pfn_remap is called when a _new_ pfn mapping is being established
+ * by remap_pfn_range() for physical range indicated by pfn and size.
+ */
+static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn, unsigned long addr,
+ unsigned long size)
+{
+ return 0;
+}
+
+/*
+ * track_pfn_insert is called when a _new_ single pfn is established
+ * by vm_insert_pfn().
+ */
+static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ pfn_t pfn)
+{
+}
+
+/*
+ * track_pfn_copy is called when vma that is covering the pfnmap gets
+ * copied through copy_page_range().
+ */
+static inline int track_pfn_copy(struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+/*
+ * untrack_pfn is called while unmapping a pfnmap for a region.
+ * untrack can be called for a specific region indicated by pfn and size or
+ * can be for the entire vma (in which case pfn, size are zero).
+ */
+static inline void untrack_pfn(struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size)
+{
+}
+
+/*
+ * untrack_pfn_moved is called while mremapping a pfnmap for a new region.
+ */
+static inline void untrack_pfn_moved(struct vm_area_struct *vma)
+{
+}
+#else
+extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn, unsigned long addr,
+ unsigned long size);
+extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ pfn_t pfn);
+extern int track_pfn_copy(struct vm_area_struct *vma);
+extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
+ unsigned long size);
+extern void untrack_pfn_moved(struct vm_area_struct *vma);
+#endif
+
+#ifdef __HAVE_COLOR_ZERO_PAGE
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ extern unsigned long zero_pfn;
+ unsigned long offset_from_zero_pfn = pfn - zero_pfn;
+ return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
+}
+
+#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+
+#else
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ extern unsigned long zero_pfn;
+ return pfn == zero_pfn;
+}
+
+static inline unsigned long my_zero_pfn(unsigned long addr)
+{
+ extern unsigned long zero_pfn;
+ return zero_pfn;
+}
+#endif
+
+#ifdef CONFIG_MMU
+
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return 0;
+}
+#ifndef pmd_write
+static inline int pmd_write(pmd_t pmd)
+{
+ BUG();
+ return 0;
+}
+#endif /* pmd_write */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifndef pud_write
+static inline int pud_write(pud_t pud)
+{
+ BUG();
+ return 0;
+}
+#endif /* pud_write */
+
+#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
+ (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
+static inline int pud_trans_huge(pud_t pud)
+{
+ return 0;
+}
+#endif
+
+#ifndef pmd_read_atomic
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+ /*
+ * Depend on compiler for an atomic pmd read. NOTE: this is
+ * only going to work, if the pmdval_t isn't larger than
+ * an unsigned long.
+ */
+ return *pmdp;
+}
+#endif
+
+#ifndef arch_needs_pgtable_deposit
+#define arch_needs_pgtable_deposit() (false)
+#endif
+/*
+ * This function is meant to be used by sites walking pagetables with
+ * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
+ * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
+ * into a null pmd and the transhuge page fault can convert a null pmd
+ * into an hugepmd or into a regular pmd (if the hugepage allocation
+ * fails). While holding the mmap_sem in read mode the pmd becomes
+ * stable and stops changing under us only if it's not null and not a
+ * transhuge pmd. When those races occurs and this function makes a
+ * difference vs the standard pmd_none_or_clear_bad, the result is
+ * undefined so behaving like if the pmd was none is safe (because it
+ * can return none anyway). The compiler level barrier() is critically
+ * important to compute the two checks atomically on the same pmdval.
+ *
+ * For 32bit kernels with a 64bit large pmd_t this automatically takes
+ * care of reading the pmd atomically to avoid SMP race conditions
+ * against pmd_populate() when the mmap_sem is hold for reading by the
+ * caller (a special atomic read not done by "gcc" as in the generic
+ * version above, is also needed when THP is disabled because the page
+ * fault can populate the pmd from under us).
+ */
+static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
+{
+ pmd_t pmdval = pmd_read_atomic(pmd);
+ /*
+ * The barrier will stabilize the pmdval in a register or on
+ * the stack so that it will stop changing under the code.
+ *
+ * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
+ * pmd_read_atomic is allowed to return a not atomic pmdval
+ * (for example pointing to an hugepage that has never been
+ * mapped in the pmd). The below checks will only care about
+ * the low part of the pmd with 32bit PAE x86 anyway, with the
+ * exception of pmd_none(). So the important thing is that if
+ * the low part of the pmd is found null, the high part will
+ * be also null or the pmd_none() check below would be
+ * confused.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ barrier();
+#endif
+ /*
+ * !pmd_present() checks for pmd migration entries
+ *
+ * The complete check uses is_pmd_migration_entry() in linux/swapops.h
+ * But using that requires moving current function and pmd_trans_unstable()
+ * to linux/swapops.h to resovle dependency, which is too much code move.
+ *
+ * !pmd_present() is equivalent to is_pmd_migration_entry() currently,
+ * because !pmd_present() pages can only be under migration not swapped
+ * out.
+ *
+ * pmd_none() is preseved for future condition checks on pmd migration
+ * entries and not confusing with this function name, although it is
+ * redundant with !pmd_present().
+ */
+ if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
+ (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
+ return 1;
+ if (unlikely(pmd_bad(pmdval))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * This is a noop if Transparent Hugepage Support is not built into
+ * the kernel. Otherwise it is equivalent to
+ * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
+ * places that already verified the pmd is not none and they want to
+ * walk ptes while holding the mmap sem in read mode (write mode don't
+ * need this). If THP is not enabled, the pmd can't go away under the
+ * code even if MADV_DONTNEED runs, but if THP is enabled we need to
+ * run a pmd_trans_unstable before walking the ptes after
+ * split_huge_page_pmd returns (because it may have run when the pmd
+ * become null, but then a page fault can map in a THP and not a
+ * regular page).
+ */
+static inline int pmd_trans_unstable(pmd_t *pmd)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return pmd_none_or_trans_huge_or_clear_bad(pmd);
+#else
+ return 0;
+#endif
+}
+
+#ifndef CONFIG_NUMA_BALANCING
+/*
+ * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
+ * the only case the kernel cares is for NUMA balancing and is only ever set
+ * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
+ * _PAGE_PROTNONE so by by default, implement the helper as "always no". It
+ * is the responsibility of the caller to distinguish between PROT_NONE
+ * protections and NUMA hinting fault protections.
+ */
+static inline int pte_protnone(pte_t pte)
+{
+ return 0;
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+ return 0;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#endif /* CONFIG_MMU */
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#ifndef __PAGETABLE_P4D_FOLDED
+int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
+int p4d_clear_huge(p4d_t *p4d);
+#else
+static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int p4d_clear_huge(p4d_t *p4d)
+{
+ return 0;
+}
+#endif /* !__PAGETABLE_P4D_FOLDED */
+
+int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
+int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
+int pud_clear_huge(pud_t *pud);
+int pmd_clear_huge(pmd_t *pmd);
+int pud_free_pmd_page(pud_t *pud, unsigned long addr);
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
+#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int p4d_clear_huge(p4d_t *p4d)
+{
+ return 0;
+}
+static inline int pud_clear_huge(pud_t *pud)
+{
+ return 0;
+}
+static inline int pmd_clear_huge(pmd_t *pmd)
+{
+ return 0;
+}
+static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+ return 0;
+}
+static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+ return 0;
+}
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+
+#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * ARCHes with special requirements for evicting THP backing TLB entries can
+ * implement this. Otherwise also, it can help optimize normal TLB flush in
+ * THP regime. stock flush_tlb_range() typically has optimization to nuke the
+ * entire TLB TLB if flush span is greater than a threshold, which will
+ * likely be true for a single huge page. Thus a single thp flush will
+ * invalidate the entire TLB which is not desitable.
+ * e.g. see arch/arc: flush_pmd_tlb_range
+ */
+#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#else
+#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
+#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
+#endif
+#endif
+
+struct file;
+int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t *vma_prot);
+
+#ifndef CONFIG_X86_ESPFIX64
+static inline void init_espfix_bsp(void) { }
+#endif
+
+#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
+static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
+{
+ return true;
+}
+
+static inline bool arch_has_pfn_modify_check(void)
+{
+ return false;
+}
+#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
+
+/*
+ * Architecture PAGE_KERNEL_* fallbacks
+ *
+ * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
+ * because they really don't support them, or the port needs to be updated to
+ * reflect the required functionality. Below are a set of relatively safe
+ * fallbacks, as best effort, which we can count on in lieu of the architectures
+ * not defining them on their own yet.
+ */
+
+#ifndef PAGE_KERNEL_RO
+# define PAGE_KERNEL_RO PAGE_KERNEL
+#endif
+
+#ifndef PAGE_KERNEL_EXEC
+# define PAGE_KERNEL_EXEC PAGE_KERNEL
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+/*
+ * ZSMALLOC needs to know the highest PFN on 32-bit architectures
+ * with physical address space extension, but falls back to
+ * BITS_PER_LONG otherwise.
+ */
+#error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
+#else
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+#endif
+#endif
+
+#ifndef has_transparent_hugepage
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define has_transparent_hugepage() 1
+#else
+#define has_transparent_hugepage() 0
+#endif
+#endif
+
+/*
+ * On some architectures it depends on the mm if the p4d/pud or pmd
+ * layer of the page table hierarchy is folded or not.
+ */
+#ifndef mm_p4d_folded
+#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
+#endif
+
+#ifndef mm_pud_folded
+#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
+#endif
+
+#ifndef mm_pmd_folded
+#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
+#endif
+
+#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
new file mode 100644
index 000000000..c3046c920
--- /dev/null
+++ b/include/asm-generic/preempt.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_PREEMPT_H
+#define __ASM_PREEMPT_H
+
+#include <linux/thread_info.h>
+
+#define PREEMPT_ENABLED (0)
+
+static __always_inline int preempt_count(void)
+{
+ return READ_ONCE(current_thread_info()->preempt_count);
+}
+
+static __always_inline volatile int *preempt_count_ptr(void)
+{
+ return &current_thread_info()->preempt_count;
+}
+
+static __always_inline void preempt_count_set(int pc)
+{
+ *preempt_count_ptr() = pc;
+}
+
+/*
+ * must be macros to avoid header recursion hell
+ */
+#define init_task_preempt_count(p) do { \
+ task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
+} while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+ task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
+} while (0)
+
+static __always_inline void set_preempt_need_resched(void)
+{
+}
+
+static __always_inline void clear_preempt_need_resched(void)
+{
+}
+
+static __always_inline bool test_preempt_need_resched(void)
+{
+ return false;
+}
+
+/*
+ * The various preempt_count add/sub methods
+ */
+
+static __always_inline void __preempt_count_add(int val)
+{
+ *preempt_count_ptr() += val;
+}
+
+static __always_inline void __preempt_count_sub(int val)
+{
+ *preempt_count_ptr() -= val;
+}
+
+static __always_inline bool __preempt_count_dec_and_test(void)
+{
+ /*
+ * Because of load-store architectures cannot do per-cpu atomic
+ * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
+ * lost.
+ */
+ return !--*preempt_count_ptr() && tif_need_resched();
+}
+
+/*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+static __always_inline bool should_resched(int preempt_offset)
+{
+ return unlikely(preempt_count() == preempt_offset &&
+ tif_need_resched());
+}
+
+#ifdef CONFIG_PREEMPT
+extern asmlinkage void preempt_schedule(void);
+#define __preempt_schedule() preempt_schedule()
+extern asmlinkage void preempt_schedule_notrace(void);
+#define __preempt_schedule_notrace() preempt_schedule_notrace()
+#endif /* CONFIG_PREEMPT */
+
+#endif /* __ASM_PREEMPT_H */
diff --git a/include/asm-generic/ptrace.h b/include/asm-generic/ptrace.h
new file mode 100644
index 000000000..82e674f6b
--- /dev/null
+++ b/include/asm-generic/ptrace.h
@@ -0,0 +1,74 @@
+/*
+ * Common low level (register) ptrace helpers
+ *
+ * Copyright 2004-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __ASM_GENERIC_PTRACE_H__
+#define __ASM_GENERIC_PTRACE_H__
+
+#ifndef __ASSEMBLY__
+
+/* Helpers for working with the instruction pointer */
+#ifndef GET_IP
+#define GET_IP(regs) ((regs)->pc)
+#endif
+#ifndef SET_IP
+#define SET_IP(regs, val) (GET_IP(regs) = (val))
+#endif
+
+static inline unsigned long instruction_pointer(struct pt_regs *regs)
+{
+ return GET_IP(regs);
+}
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ SET_IP(regs, val);
+}
+
+#ifndef profile_pc
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+
+/* Helpers for working with the user stack pointer */
+#ifndef GET_USP
+#define GET_USP(regs) ((regs)->usp)
+#endif
+#ifndef SET_USP
+#define SET_USP(regs, val) (GET_USP(regs) = (val))
+#endif
+
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ return GET_USP(regs);
+}
+static inline void user_stack_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ SET_USP(regs, val);
+}
+
+/* Helpers for working with the frame pointer */
+#ifndef GET_FP
+#define GET_FP(regs) ((regs)->fp)
+#endif
+#ifndef SET_FP
+#define SET_FP(regs, val) (GET_FP(regs) = (val))
+#endif
+
+static inline unsigned long frame_pointer(struct pt_regs *regs)
+{
+ return GET_FP(regs);
+}
+static inline void frame_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ SET_FP(regs, val);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
new file mode 100644
index 000000000..0f7062bd5
--- /dev/null
+++ b/include/asm-generic/qrwlock.h
@@ -0,0 +1,138 @@
+/*
+ * Queue read/write lock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
+ *
+ * Authors: Waiman Long <waiman.long@hp.com>
+ */
+#ifndef __ASM_GENERIC_QRWLOCK_H
+#define __ASM_GENERIC_QRWLOCK_H
+
+#include <linux/atomic.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+#include <asm-generic/qrwlock_types.h>
+
+/*
+ * Writer states & reader shift and bias.
+ */
+#define _QW_WAITING 0x100 /* A writer is waiting */
+#define _QW_LOCKED 0x0ff /* A writer holds the lock */
+#define _QW_WMASK 0x1ff /* Writer mask */
+#define _QR_SHIFT 9 /* Reader count shift */
+#define _QR_BIAS (1U << _QR_SHIFT)
+
+/*
+ * External function declarations
+ */
+extern void queued_read_lock_slowpath(struct qrwlock *lock);
+extern void queued_write_lock_slowpath(struct qrwlock *lock);
+
+/**
+ * queued_read_trylock - try to acquire read lock of a queue rwlock
+ * @lock : Pointer to queue rwlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static inline int queued_read_trylock(struct qrwlock *lock)
+{
+ u32 cnts;
+
+ cnts = atomic_read(&lock->cnts);
+ if (likely(!(cnts & _QW_WMASK))) {
+ cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
+ if (likely(!(cnts & _QW_WMASK)))
+ return 1;
+ atomic_sub(_QR_BIAS, &lock->cnts);
+ }
+ return 0;
+}
+
+/**
+ * queued_write_trylock - try to acquire write lock of a queue rwlock
+ * @lock : Pointer to queue rwlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static inline int queued_write_trylock(struct qrwlock *lock)
+{
+ u32 cnts;
+
+ cnts = atomic_read(&lock->cnts);
+ if (unlikely(cnts))
+ return 0;
+
+ return likely(atomic_cmpxchg_acquire(&lock->cnts,
+ cnts, cnts | _QW_LOCKED) == cnts);
+}
+/**
+ * queued_read_lock - acquire read lock of a queue rwlock
+ * @lock: Pointer to queue rwlock structure
+ */
+static inline void queued_read_lock(struct qrwlock *lock)
+{
+ u32 cnts;
+
+ cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
+ if (likely(!(cnts & _QW_WMASK)))
+ return;
+
+ /* The slowpath will decrement the reader count, if necessary. */
+ queued_read_lock_slowpath(lock);
+}
+
+/**
+ * queued_write_lock - acquire write lock of a queue rwlock
+ * @lock : Pointer to queue rwlock structure
+ */
+static inline void queued_write_lock(struct qrwlock *lock)
+{
+ /* Optimize for the unfair lock case where the fair flag is 0. */
+ if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
+ return;
+
+ queued_write_lock_slowpath(lock);
+}
+
+/**
+ * queued_read_unlock - release read lock of a queue rwlock
+ * @lock : Pointer to queue rwlock structure
+ */
+static inline void queued_read_unlock(struct qrwlock *lock)
+{
+ /*
+ * Atomically decrement the reader count
+ */
+ (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
+}
+
+/**
+ * queued_write_unlock - release write lock of a queue rwlock
+ * @lock : Pointer to queue rwlock structure
+ */
+static inline void queued_write_unlock(struct qrwlock *lock)
+{
+ smp_store_release(&lock->wlocked, 0);
+}
+
+/*
+ * Remapping rwlock architecture specific functions to the corresponding
+ * queue rwlock functions.
+ */
+#define arch_read_lock(l) queued_read_lock(l)
+#define arch_write_lock(l) queued_write_lock(l)
+#define arch_read_trylock(l) queued_read_trylock(l)
+#define arch_write_trylock(l) queued_write_trylock(l)
+#define arch_read_unlock(l) queued_read_unlock(l)
+#define arch_write_unlock(l) queued_write_unlock(l)
+
+#endif /* __ASM_GENERIC_QRWLOCK_H */
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
new file mode 100644
index 000000000..c36f1d5a2
--- /dev/null
+++ b/include/asm-generic/qrwlock_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_QRWLOCK_TYPES_H
+#define __ASM_GENERIC_QRWLOCK_TYPES_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/spinlock_types.h>
+
+/*
+ * The queue read/write lock data structure
+ */
+
+typedef struct qrwlock {
+ union {
+ atomic_t cnts;
+ struct {
+#ifdef __LITTLE_ENDIAN
+ u8 wlocked; /* Locked for write? */
+ u8 __lstate[3];
+#else
+ u8 __lstate[3];
+ u8 wlocked; /* Locked for write? */
+#endif
+ };
+ };
+ arch_spinlock_t wait_lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { \
+ { .cnts = ATOMIC_INIT(0), }, \
+ .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+}
+
+#endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
new file mode 100644
index 000000000..9cc457597
--- /dev/null
+++ b/include/asm-generic/qspinlock.h
@@ -0,0 +1,123 @@
+/*
+ * Queued spinlock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
+ *
+ * Authors: Waiman Long <waiman.long@hpe.com>
+ */
+#ifndef __ASM_GENERIC_QSPINLOCK_H
+#define __ASM_GENERIC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+
+/**
+ * queued_spin_is_locked - is the spinlock locked?
+ * @lock: Pointer to queued spinlock structure
+ * Return: 1 if it is locked, 0 otherwise
+ */
+static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
+{
+ /*
+ * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
+ * isn't immediately observable.
+ */
+ return atomic_read(&lock->val);
+}
+
+/**
+ * queued_spin_value_unlocked - is the spinlock structure unlocked?
+ * @lock: queued spinlock structure
+ * Return: 1 if it is unlocked, 0 otherwise
+ *
+ * N.B. Whenever there are tasks waiting for the lock, it is considered
+ * locked wrt the lockref code to avoid lock stealing by the lockref
+ * code and change things underneath the lock. This also allows some
+ * optimizations to be applied without conflict with lockref.
+ */
+static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
+{
+ return !atomic_read(&lock.val);
+}
+
+/**
+ * queued_spin_is_contended - check if the lock is contended
+ * @lock : Pointer to queued spinlock structure
+ * Return: 1 if lock contended, 0 otherwise
+ */
+static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
+{
+ return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
+}
+/**
+ * queued_spin_trylock - try to acquire the queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int queued_spin_trylock(struct qspinlock *lock)
+{
+ if (!atomic_read(&lock->val) &&
+ (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
+ return 1;
+ return 0;
+}
+
+extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+/**
+ * queued_spin_lock - acquire a queued spinlock
+ * @lock: Pointer to queued spinlock structure
+ */
+static __always_inline void queued_spin_lock(struct qspinlock *lock)
+{
+ u32 val;
+
+ val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
+ if (likely(val == 0))
+ return;
+ queued_spin_lock_slowpath(lock, val);
+}
+
+#ifndef queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static __always_inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ /*
+ * unlock() needs release semantics:
+ */
+ smp_store_release(&lock->locked, 0);
+}
+#endif
+
+#ifndef virt_spin_lock
+static __always_inline bool virt_spin_lock(struct qspinlock *lock)
+{
+ return false;
+}
+#endif
+
+/*
+ * Remapping spinlock architecture specific functions to the corresponding
+ * queued spinlock functions.
+ */
+#define arch_spin_is_locked(l) queued_spin_is_locked(l)
+#define arch_spin_is_contended(l) queued_spin_is_contended(l)
+#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
+#define arch_spin_lock(l) queued_spin_lock(l)
+#define arch_spin_trylock(l) queued_spin_trylock(l)
+#define arch_spin_unlock(l) queued_spin_unlock(l)
+
+#endif /* __ASM_GENERIC_QSPINLOCK_H */
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
new file mode 100644
index 000000000..d10f1e7d6
--- /dev/null
+++ b/include/asm-generic/qspinlock_types.h
@@ -0,0 +1,112 @@
+/*
+ * Queued spinlock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ *
+ * Authors: Waiman Long <waiman.long@hp.com>
+ */
+#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
+#define __ASM_GENERIC_QSPINLOCK_TYPES_H
+
+/*
+ * Including atomic.h with PARAVIRT on will cause compilation errors because
+ * of recursive header file incluson via paravirt_types.h. So don't include
+ * it if PARAVIRT is on.
+ */
+#ifndef CONFIG_PARAVIRT
+#include <linux/types.h>
+#include <linux/atomic.h>
+#endif
+
+typedef struct qspinlock {
+ union {
+ atomic_t val;
+
+ /*
+ * By using the whole 2nd least significant byte for the
+ * pending bit, we can allow better optimization of the lock
+ * acquisition for the pending bit holder.
+ */
+#ifdef __LITTLE_ENDIAN
+ struct {
+ u8 locked;
+ u8 pending;
+ };
+ struct {
+ u16 locked_pending;
+ u16 tail;
+ };
+#else
+ struct {
+ u16 tail;
+ u16 locked_pending;
+ };
+ struct {
+ u8 reserved[2];
+ u8 pending;
+ u8 locked;
+ };
+#endif
+ };
+} arch_spinlock_t;
+
+/*
+ * Initializier
+ */
+#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
+
+/*
+ * Bitfields in the atomic value:
+ *
+ * When NR_CPUS < 16K
+ * 0- 7: locked byte
+ * 8: pending
+ * 9-15: not used
+ * 16-17: tail index
+ * 18-31: tail cpu (+1)
+ *
+ * When NR_CPUS >= 16K
+ * 0- 7: locked byte
+ * 8: pending
+ * 9-10: tail index
+ * 11-31: tail cpu (+1)
+ */
+#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
+ << _Q_ ## type ## _OFFSET)
+#define _Q_LOCKED_OFFSET 0
+#define _Q_LOCKED_BITS 8
+#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
+
+#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
+#if CONFIG_NR_CPUS < (1U << 14)
+#define _Q_PENDING_BITS 8
+#else
+#define _Q_PENDING_BITS 1
+#endif
+#define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
+
+#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
+#define _Q_TAIL_IDX_BITS 2
+#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
+
+#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
+#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
+#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
+
+#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
+#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
+
+#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
+#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
+
+#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */
diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
new file mode 100644
index 000000000..8874f681b
--- /dev/null
+++ b/include/asm-generic/resource.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_RESOURCE_H
+#define _ASM_GENERIC_RESOURCE_H
+
+#include <uapi/asm-generic/resource.h>
+
+
+/*
+ * boot-time rlimit defaults for the init task:
+ */
+#define INIT_RLIMITS \
+{ \
+ [RLIMIT_CPU] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_FSIZE] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_DATA] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_STACK] = { _STK_LIM, RLIM_INFINITY }, \
+ [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \
+ [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_NPROC] = { 0, 0 }, \
+ [RLIMIT_NOFILE] = { INR_OPEN_CUR, INR_OPEN_MAX }, \
+ [RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \
+ [RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_SIGPENDING] = { 0, 0 }, \
+ [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \
+ [RLIMIT_NICE] = { 0, 0 }, \
+ [RLIMIT_RTPRIO] = { 0, 0 }, \
+ [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
+}
+
+#endif
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
new file mode 100644
index 000000000..93e67a055
--- /dev/null
+++ b/include/asm-generic/rwsem.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_RWSEM_H
+#define _ASM_GENERIC_RWSEM_H
+
+#ifndef _LINUX_RWSEM_H
+#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
+#endif
+
+#ifdef __KERNEL__
+
+/*
+ * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
+ * Adapted largely from include/asm-i386/rwsem.h
+ * by Paul Mackerras <paulus@samba.org>.
+ */
+
+/*
+ * the semaphore definition
+ */
+#ifdef CONFIG_64BIT
+# define RWSEM_ACTIVE_MASK 0xffffffffL
+#else
+# define RWSEM_ACTIVE_MASK 0x0000ffffL
+#endif
+
+#define RWSEM_UNLOCKED_VALUE 0x00000000L
+#define RWSEM_ACTIVE_BIAS 0x00000001L
+#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
+#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+
+/*
+ * lock for reading
+ */
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
+ rwsem_down_read_failed(sem);
+}
+
+static inline int __down_read_killable(struct rw_semaphore *sem)
+{
+ if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
+ if (IS_ERR(rwsem_down_read_failed_killable(sem)))
+ return -EINTR;
+ }
+
+ return 0;
+}
+
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ while ((tmp = atomic_long_read(&sem->count)) >= 0) {
+ if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
+ tmp + RWSEM_ACTIVE_READ_BIAS)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * lock for writing
+ */
+static inline void __down_write(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
+ &sem->count);
+ if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+ rwsem_down_write_failed(sem);
+}
+
+static inline int __down_write_killable(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
+ &sem->count);
+ if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+ if (IS_ERR(rwsem_down_write_failed_killable(sem)))
+ return -EINTR;
+ return 0;
+}
+
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
+ RWSEM_ACTIVE_WRITE_BIAS);
+ return tmp == RWSEM_UNLOCKED_VALUE;
+}
+
+/*
+ * unlock after reading
+ */
+static inline void __up_read(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ tmp = atomic_long_dec_return_release(&sem->count);
+ if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
+ rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void __up_write(struct rw_semaphore *sem)
+{
+ if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
+ &sem->count) < 0))
+ rwsem_wake(sem);
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+ long tmp;
+
+ /*
+ * When downgrading from exclusive to shared ownership,
+ * anything inside the write-locked region cannot leak
+ * into the read side. In contrast, anything in the
+ * read-locked region is ok to be re-ordered into the
+ * write side. As such, rely on RELEASE semantics.
+ */
+ tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
+ if (tmp < 0)
+ rwsem_downgrade_wake(sem);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_GENERIC_RWSEM_H */
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
new file mode 100644
index 000000000..e74072d23
--- /dev/null
+++ b/include/asm-generic/seccomp.h
@@ -0,0 +1,46 @@
+/*
+ * include/asm-generic/seccomp.h
+ *
+ * Copyright (C) 2014 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_GENERIC_SECCOMP_H
+#define _ASM_GENERIC_SECCOMP_H
+
+#include <linux/unistd.h>
+
+#if defined(CONFIG_COMPAT) && !defined(__NR_seccomp_read_32)
+#define __NR_seccomp_read_32 __NR_read
+#define __NR_seccomp_write_32 __NR_write
+#define __NR_seccomp_exit_32 __NR_exit
+#ifndef __NR_seccomp_sigreturn_32
+#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn
+#endif
+#endif /* CONFIG_COMPAT && ! already defined */
+
+#define __NR_seccomp_read __NR_read
+#define __NR_seccomp_write __NR_write
+#define __NR_seccomp_exit __NR_exit
+#ifndef __NR_seccomp_sigreturn
+#define __NR_seccomp_sigreturn __NR_rt_sigreturn
+#endif
+
+#ifdef CONFIG_COMPAT
+#ifndef get_compat_mode1_syscalls
+static inline const int *get_compat_mode1_syscalls(void)
+{
+ static const int mode1_syscalls_32[] = {
+ __NR_seccomp_read_32, __NR_seccomp_write_32,
+ __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
+ 0, /* null terminated */
+ };
+ return mode1_syscalls_32;
+}
+#endif
+#endif /* CONFIG_COMPAT */
+
+#endif /* _ASM_GENERIC_SECCOMP_H */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
new file mode 100644
index 000000000..ea5987bb0
--- /dev/null
+++ b/include/asm-generic/sections.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_SECTIONS_H_
+#define _ASM_GENERIC_SECTIONS_H_
+
+/* References to section boundaries */
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/*
+ * Usage guidelines:
+ * _text, _data: architecture specific, don't use them in arch-independent code
+ * [_stext, _etext]: contains .text.* sections, may also contain .rodata.*
+ * and/or .init.* sections
+ * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.*
+ * and/or .init.* sections.
+ * [__start_rodata, __end_rodata]: contains .rodata.* sections
+ * [__start_ro_after_init, __end_ro_after_init]:
+ * contains .data..ro_after_init section
+ * [__init_begin, __init_end]: contains .init.* sections, but .init.text.*
+ * may be out of this range on some architectures.
+ * [_sinittext, _einittext]: contains .init.text.* sections
+ * [__bss_start, __bss_stop]: contains BSS sections
+ *
+ * Following global variables are optional and may be unavailable on some
+ * architectures and/or kernel configurations.
+ * _text, _data
+ * __kprobes_text_start, __kprobes_text_end
+ * __entry_text_start, __entry_text_end
+ * __ctors_start, __ctors_end
+ * __irqentry_text_start, __irqentry_text_end
+ * __softirqentry_text_start, __softirqentry_text_end
+ * __start_opd, __end_opd
+ */
+extern char _text[], _stext[], _etext[];
+extern char _data[], _sdata[], _edata[];
+extern char __bss_start[], __bss_stop[];
+extern char __init_begin[], __init_end[];
+extern char _sinittext[], _einittext[];
+extern char __start_ro_after_init[], __end_ro_after_init[];
+extern char _end[];
+extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
+extern char __kprobes_text_start[], __kprobes_text_end[];
+extern char __entry_text_start[], __entry_text_end[];
+extern char __start_rodata[], __end_rodata[];
+extern char __irqentry_text_start[], __irqentry_text_end[];
+extern char __softirqentry_text_start[], __softirqentry_text_end[];
+extern char __start_once[], __end_once[];
+
+/* Start and end of .ctors section - used for constructor calls. */
+extern char __ctors_start[], __ctors_end[];
+
+/* Start and end of .opd section - used for function descriptors. */
+extern char __start_opd[], __end_opd[];
+
+/* Start and end of instrumentation protected text section */
+extern char __noinstr_text_start[], __noinstr_text_end[];
+
+extern __visible const void __nosave_begin, __nosave_end;
+
+/* Function descriptor handling (if any). Override in asm/sections.h */
+#ifndef dereference_function_descriptor
+#define dereference_function_descriptor(p) (p)
+#define dereference_kernel_function_descriptor(p) (p)
+#endif
+
+/* random extra sections (if any). Override
+ * in asm/sections.h */
+#ifndef arch_is_kernel_text
+static inline int arch_is_kernel_text(unsigned long addr)
+{
+ return 0;
+}
+#endif
+
+#ifndef arch_is_kernel_data
+static inline int arch_is_kernel_data(unsigned long addr)
+{
+ return 0;
+}
+#endif
+
+/**
+ * memory_contains - checks if an object is contained within a memory region
+ * @begin: virtual address of the beginning of the memory region
+ * @end: virtual address of the end of the memory region
+ * @virt: virtual address of the memory object
+ * @size: size of the memory object
+ *
+ * Returns: true if the object specified by @virt and @size is entirely
+ * contained within the memory region defined by @begin and @end, false
+ * otherwise.
+ */
+static inline bool memory_contains(void *begin, void *end, void *virt,
+ size_t size)
+{
+ return virt >= begin && virt + size <= end;
+}
+
+/**
+ * memory_intersects - checks if the region occupied by an object intersects
+ * with another memory region
+ * @begin: virtual address of the beginning of the memory regien
+ * @end: virtual address of the end of the memory region
+ * @virt: virtual address of the memory object
+ * @size: size of the memory object
+ *
+ * Returns: true if an object's memory region, specified by @virt and @size,
+ * intersects with the region specified by @begin and @end, false otherwise.
+ */
+static inline bool memory_intersects(void *begin, void *end, void *virt,
+ size_t size)
+{
+ void *vend = virt + size;
+
+ return (virt >= begin && virt < end) || (vend >= begin && vend < end);
+}
+
+/**
+ * init_section_contains - checks if an object is contained within the init
+ * section
+ * @virt: virtual address of the memory object
+ * @size: size of the memory object
+ *
+ * Returns: true if the object specified by @virt and @size is entirely
+ * contained within the init section, false otherwise.
+ */
+static inline bool init_section_contains(void *virt, size_t size)
+{
+ return memory_contains(__init_begin, __init_end, virt, size);
+}
+
+/**
+ * init_section_intersects - checks if the region occupied by an object
+ * intersects with the init section
+ * @virt: virtual address of the memory object
+ * @size: size of the memory object
+ *
+ * Returns: true if an object's memory region, specified by @virt and @size,
+ * intersects with the init section, false otherwise.
+ */
+static inline bool init_section_intersects(void *virt, size_t size)
+{
+ return memory_intersects(__init_begin, __init_end, virt, size);
+}
+
+#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-generic/segment.h b/include/asm-generic/segment.h
new file mode 100644
index 000000000..5580eace6
--- /dev/null
+++ b/include/asm-generic/segment.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_GENERIC_SEGMENT_H
+#define __ASM_GENERIC_SEGMENT_H
+/*
+ * Only here because we have some old header files that expect it...
+ *
+ * New architectures probably don't want to have their own version.
+ */
+
+#endif /* __ASM_GENERIC_SEGMENT_H */
diff --git a/include/asm-generic/serial.h b/include/asm-generic/serial.h
new file mode 100644
index 000000000..ca9f7b6be
--- /dev/null
+++ b/include/asm-generic/serial.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_SERIAL_H
+#define __ASM_GENERIC_SERIAL_H
+
+/*
+ * This should not be an architecture specific #define, oh well.
+ *
+ * Traditionally, it just describes i8250 and related serial ports
+ * that have this clock rate.
+ */
+
+#define BASE_BAUD (1843200 / 16)
+
+#endif /* __ASM_GENERIC_SERIAL_H */
diff --git a/include/asm-generic/set_memory.h b/include/asm-generic/set_memory.h
new file mode 100644
index 000000000..c86abf6bc
--- /dev/null
+++ b/include/asm-generic/set_memory.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SET_MEMORY_H
+#define __ASM_SET_MEMORY_H
+
+/*
+ * Functions to change memory attributes.
+ */
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+
+#endif
diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h
new file mode 100644
index 000000000..c53984fa9
--- /dev/null
+++ b/include/asm-generic/signal.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_SIGNAL_H
+#define __ASM_GENERIC_SIGNAL_H
+
+#include <uapi/asm-generic/signal.h>
+
+#ifndef __ASSEMBLY__
+#ifdef SA_RESTORER
+#endif
+
+#include <asm/sigcontext.h>
+#undef __HAVE_ARCH_SIG_BITOPS
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_GENERIC_SIGNAL_H */
diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h
new file mode 100644
index 000000000..d0343d58a
--- /dev/null
+++ b/include/asm-generic/simd.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/hardirq.h>
+
+/*
+ * may_use_simd - whether it is allowable at this time to issue SIMD
+ * instructions or access the SIMD register file
+ *
+ * As architectures typically don't preserve the SIMD register file when
+ * taking an interrupt, !in_interrupt() should be a reasonable default.
+ */
+static __must_check inline bool may_use_simd(void)
+{
+ return !in_interrupt();
+}
diff --git a/include/asm-generic/sizes.h b/include/asm-generic/sizes.h
new file mode 100644
index 000000000..1dcfad962
--- /dev/null
+++ b/include/asm-generic/sizes.h
@@ -0,0 +1,2 @@
+/* This is a placeholder, to be removed over time */
+#include <linux/sizes.h>
diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h
new file mode 100644
index 000000000..adaf6acab
--- /dev/null
+++ b/include/asm-generic/spinlock.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_SPINLOCK_H
+#define __ASM_GENERIC_SPINLOCK_H
+/*
+ * You need to implement asm/spinlock.h for SMP support. The generic
+ * version does not handle SMP.
+ */
+#ifdef CONFIG_SMP
+#error need an architecture specific asm/spinlock.h
+#endif
+
+#endif /* __ASM_GENERIC_SPINLOCK_H */
diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h
new file mode 100644
index 000000000..f88dcd8ed
--- /dev/null
+++ b/include/asm-generic/statfs.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _GENERIC_STATFS_H
+#define _GENERIC_STATFS_H
+
+#include <uapi/asm-generic/statfs.h>
+
+typedef __kernel_fsid_t fsid_t;
+#endif
diff --git a/include/asm-generic/string.h b/include/asm-generic/string.h
new file mode 100644
index 000000000..de5e02014
--- /dev/null
+++ b/include/asm-generic/string.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_GENERIC_STRING_H
+#define __ASM_GENERIC_STRING_H
+/*
+ * The kernel provides all required functions in lib/string.c
+ *
+ * Architectures probably want to provide at least their own optimized
+ * memcpy and memset functions though.
+ */
+
+#endif /* __ASM_GENERIC_STRING_H */
diff --git a/include/asm-generic/switch_to.h b/include/asm-generic/switch_to.h
new file mode 100644
index 000000000..986acc9d3
--- /dev/null
+++ b/include/asm-generic/switch_to.h
@@ -0,0 +1,30 @@
+/* Generic task switch macro wrapper.
+ *
+ * It should be possible to use these on really simple architectures,
+ * but it serves more as a starting point for new ports.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef __ASM_GENERIC_SWITCH_TO_H
+#define __ASM_GENERIC_SWITCH_TO_H
+
+#include <linux/thread_info.h>
+
+/*
+ * Context switching is now performed out-of-line in switch_to.S
+ */
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct task_struct *);
+
+#define switch_to(prev, next, last) \
+ do { \
+ ((last) = __switch_to((prev), (next))); \
+ } while (0)
+
+#endif /* __ASM_GENERIC_SWITCH_TO_H */
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
new file mode 100644
index 000000000..0c938a435
--- /dev/null
+++ b/include/asm-generic/syscall.h
@@ -0,0 +1,157 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ *
+ * This file is a stub providing documentation for what functions
+ * asm-ARCH/syscall.h files need to define. Most arch definitions
+ * will be simple inlines.
+ *
+ * All of these functions expect to be called with no locks,
+ * and only when the caller is sure that the task of interest
+ * cannot return to user mode while we are looking at it.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H 1
+
+struct task_struct;
+struct pt_regs;
+
+/**
+ * syscall_get_nr - find what system call a task is executing
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ *
+ * If @task is executing a system call or is at system call
+ * tracing about to attempt one, returns the system call number.
+ * If @task is not executing a system call, i.e. it's blocked
+ * inside the kernel for a fault or signal, returns -1.
+ *
+ * Note this returns int even on 64-bit machines. Only 32 bits of
+ * system call number can be meaningful. If the actual arch value
+ * is 64 bits, this truncates to 32 bits so 0xffffffff means -1.
+ *
+ * It's only valid to call this when @task is known to be blocked.
+ */
+int syscall_get_nr(struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * syscall_rollback - roll back registers after an aborted system call
+ * @task: task of interest, must be in system call exit tracing
+ * @regs: task_pt_regs() of @task
+ *
+ * It's only valid to call this when @task is stopped for system
+ * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT),
+ * after tracehook_report_syscall_entry() returned nonzero to prevent
+ * the system call from taking place.
+ *
+ * This rolls back the register state in @regs so it's as if the
+ * system call instruction was a no-op. The registers containing
+ * the system call number and arguments are as they were before the
+ * system call instruction. This may not be the same as what the
+ * register state looked like at system call entry tracing.
+ */
+void syscall_rollback(struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * syscall_get_error - check result of traced system call
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ *
+ * Returns 0 if the system call succeeded, or -ERRORCODE if it failed.
+ *
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+long syscall_get_error(struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * syscall_get_return_value - get the return value of a traced system call
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ *
+ * Returns the return value of the successful system call.
+ * This value is meaningless if syscall_get_error() returned nonzero.
+ *
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * syscall_set_return_value - change the return value of a traced system call
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ * @error: negative error code, or zero to indicate success
+ * @val: user return value if @error is zero
+ *
+ * This changes the results of the system call that user mode will see.
+ * If @error is zero, the user sees a successful system call with a
+ * return value of @val. If @error is nonzero, it's a negated errno
+ * code; the user sees a failed system call with this errno code.
+ *
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ int error, long val);
+
+/**
+ * syscall_get_arguments - extract system call parameter values
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ * @i: argument index [0,5]
+ * @n: number of arguments; n+i must be [1,6].
+ * @args: array filled with argument values
+ *
+ * Fetches @n arguments to the system call starting with the @i'th argument
+ * (from 0 through 5). Argument @i is stored in @args[0], and so on.
+ * An arch inline version is probably optimal when @i and @n are constants.
+ *
+ * It's only valid to call this when @task is stopped for tracing on
+ * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * It's invalid to call this with @i + @n > 6; we only support system calls
+ * taking up to 6 arguments.
+ */
+void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n, unsigned long *args);
+
+/**
+ * syscall_set_arguments - change system call parameter value
+ * @task: task of interest, must be in system call entry tracing
+ * @regs: task_pt_regs() of @task
+ * @i: argument index [0,5]
+ * @n: number of arguments; n+i must be [1,6].
+ * @args: array of argument values to store
+ *
+ * Changes @n arguments to the system call starting with the @i'th argument.
+ * Argument @i gets value @args[0], and so on.
+ * An arch inline version is probably optimal when @i and @n are constants.
+ *
+ * It's only valid to call this when @task is stopped for tracing on
+ * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * It's invalid to call this with @i + @n > 6; we only support system calls
+ * taking up to 6 arguments.
+ */
+void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args);
+
+/**
+ * syscall_get_arch - return the AUDIT_ARCH for the current system call
+ *
+ * Returns the AUDIT_ARCH_* based on the system call convention in use.
+ *
+ * It's only valid to call this when current is stopped on entry to a system
+ * call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %TIF_SECCOMP.
+ *
+ * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must
+ * provide an implementation of this.
+ */
+int syscall_get_arch(void);
+#endif /* _ASM_SYSCALL_H */
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h
new file mode 100644
index 000000000..933ca6581
--- /dev/null
+++ b/include/asm-generic/syscalls.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_SYSCALLS_H
+#define __ASM_GENERIC_SYSCALLS_H
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+
+/*
+ * Calling conventions for these system calls can differ, so
+ * it's possible to override them.
+ */
+
+#ifndef sys_mmap2
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+#endif
+
+#ifndef sys_mmap
+asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, off_t pgoff);
+#endif
+
+#ifndef sys_rt_sigreturn
+asmlinkage long sys_rt_sigreturn(struct pt_regs *regs);
+#endif
+
+#endif /* __ASM_GENERIC_SYSCALLS_H */
diff --git a/include/asm-generic/termios-base.h b/include/asm-generic/termios-base.h
new file mode 100644
index 000000000..59c5a3bd4
--- /dev/null
+++ b/include/asm-generic/termios-base.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* termios.h: generic termios/termio user copying/translation
+ */
+
+#ifndef _ASM_GENERIC_TERMIOS_BASE_H
+#define _ASM_GENERIC_TERMIOS_BASE_H
+
+#include <linux/uaccess.h>
+
+#ifndef __ARCH_TERMIO_GETPUT
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+static inline int user_termio_to_kernel_termios(struct ktermios *termios,
+ struct termio __user *termio)
+{
+ unsigned short tmp;
+
+ if (get_user(tmp, &termio->c_iflag) < 0)
+ goto fault;
+ termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp;
+
+ if (get_user(tmp, &termio->c_oflag) < 0)
+ goto fault;
+ termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp;
+
+ if (get_user(tmp, &termio->c_cflag) < 0)
+ goto fault;
+ termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp;
+
+ if (get_user(tmp, &termio->c_lflag) < 0)
+ goto fault;
+ termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp;
+
+ if (get_user(termios->c_line, &termio->c_line) < 0)
+ goto fault;
+
+ if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0)
+ goto fault;
+
+ return 0;
+
+ fault:
+ return -EFAULT;
+}
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+static inline int kernel_termios_to_user_termio(struct termio __user *termio,
+ struct ktermios *termios)
+{
+ if (put_user(termios->c_iflag, &termio->c_iflag) < 0 ||
+ put_user(termios->c_oflag, &termio->c_oflag) < 0 ||
+ put_user(termios->c_cflag, &termio->c_cflag) < 0 ||
+ put_user(termios->c_lflag, &termio->c_lflag) < 0 ||
+ put_user(termios->c_line, &termio->c_line) < 0 ||
+ copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0)
+ return -EFAULT;
+
+ return 0;
+}
+
+#ifndef user_termios_to_kernel_termios
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
+#endif
+
+#ifndef kernel_termios_to_user_termios
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+#endif
+
+#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+#endif /* __ARCH_TERMIO_GETPUT */
+
+#endif /* _ASM_GENERIC_TERMIOS_BASE_H */
diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h
new file mode 100644
index 000000000..b1398d0d4
--- /dev/null
+++ b/include/asm-generic/termios.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_TERMIOS_H
+#define _ASM_GENERIC_TERMIOS_H
+
+
+#include <linux/uaccess.h>
+#include <uapi/asm-generic/termios.h>
+
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+static inline int user_termio_to_kernel_termios(struct ktermios *termios,
+ const struct termio __user *termio)
+{
+ unsigned short tmp;
+
+ if (get_user(tmp, &termio->c_iflag) < 0)
+ goto fault;
+ termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp;
+
+ if (get_user(tmp, &termio->c_oflag) < 0)
+ goto fault;
+ termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp;
+
+ if (get_user(tmp, &termio->c_cflag) < 0)
+ goto fault;
+ termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp;
+
+ if (get_user(tmp, &termio->c_lflag) < 0)
+ goto fault;
+ termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp;
+
+ if (get_user(termios->c_line, &termio->c_line) < 0)
+ goto fault;
+
+ if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0)
+ goto fault;
+
+ return 0;
+
+ fault:
+ return -EFAULT;
+}
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+static inline int kernel_termios_to_user_termio(struct termio __user *termio,
+ struct ktermios *termios)
+{
+ if (put_user(termios->c_iflag, &termio->c_iflag) < 0 ||
+ put_user(termios->c_oflag, &termio->c_oflag) < 0 ||
+ put_user(termios->c_cflag, &termio->c_cflag) < 0 ||
+ put_user(termios->c_lflag, &termio->c_lflag) < 0 ||
+ put_user(termios->c_line, &termio->c_line) < 0 ||
+ copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0)
+ return -EFAULT;
+
+ return 0;
+}
+
+#ifdef TCGETS2
+static inline int user_termios_to_kernel_termios(struct ktermios *k,
+ struct termios2 __user *u)
+{
+ return copy_from_user(k, u, sizeof(struct termios2));
+}
+
+static inline int kernel_termios_to_user_termios(struct termios2 __user *u,
+ struct ktermios *k)
+{
+ return copy_to_user(u, k, sizeof(struct termios2));
+}
+
+static inline int user_termios_to_kernel_termios_1(struct ktermios *k,
+ struct termios __user *u)
+{
+ return copy_from_user(k, u, sizeof(struct termios));
+}
+
+static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
+ struct ktermios *k)
+{
+ return copy_to_user(u, k, sizeof(struct termios));
+}
+#else /* TCGETS2 */
+static inline int user_termios_to_kernel_termios(struct ktermios *k,
+ struct termios __user *u)
+{
+ return copy_from_user(k, u, sizeof(struct termios));
+}
+
+static inline int kernel_termios_to_user_termios(struct termios __user *u,
+ struct ktermios *k)
+{
+ return copy_to_user(u, k, sizeof(struct termios));
+}
+#endif /* TCGETS2 */
+
+#endif /* _ASM_GENERIC_TERMIOS_H */
diff --git a/include/asm-generic/timex.h b/include/asm-generic/timex.h
new file mode 100644
index 000000000..50ba9b5ce
--- /dev/null
+++ b/include/asm-generic/timex.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_TIMEX_H
+#define __ASM_GENERIC_TIMEX_H
+
+/*
+ * If you have a cycle counter, return the value here.
+ */
+typedef unsigned long cycles_t;
+#ifndef get_cycles
+static inline cycles_t get_cycles(void)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Architectures are encouraged to implement read_current_timer
+ * and define this in order to avoid the expensive delay loop
+ * calibration during boot.
+ */
+#undef ARCH_HAS_READ_CURRENT_TIMER
+
+#endif /* __ASM_GENERIC_TIMEX_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
new file mode 100644
index 000000000..db72ad398
--- /dev/null
+++ b/include/asm-generic/tlb.h
@@ -0,0 +1,317 @@
+/* include/asm-generic/tlb.h
+ *
+ * Generic TLB shootdown code
+ *
+ * Copyright 2001 Red Hat, Inc.
+ * Based on code from mm/memory.c Copyright Linus Torvalds and others.
+ *
+ * Copyright 2011 Red Hat, Inc., Peter Zijlstra
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_GENERIC__TLB_H
+#define _ASM_GENERIC__TLB_H
+
+#include <linux/mmu_notifier.h>
+#include <linux/swap.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+/*
+ * Semi RCU freeing of the page directories.
+ *
+ * This is needed by some architectures to implement software pagetable walkers.
+ *
+ * gup_fast() and other software pagetable walkers do a lockless page-table
+ * walk and therefore needs some synchronization with the freeing of the page
+ * directories. The chosen means to accomplish that is by disabling IRQs over
+ * the walk.
+ *
+ * Architectures that use IPIs to flush TLBs will then automagically DTRT,
+ * since we unlink the page, flush TLBs, free the page. Since the disabling of
+ * IRQs delays the completion of the TLB flush we can never observe an already
+ * freed page.
+ *
+ * Architectures that do not have this (PPC) need to delay the freeing by some
+ * other means, this is that means.
+ *
+ * What we do is batch the freed directory pages (tables) and RCU free them.
+ * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
+ * holds off grace periods.
+ *
+ * However, in order to batch these pages we need to allocate storage, this
+ * allocation is deep inside the MM code and can thus easily fail on memory
+ * pressure. To guarantee progress we fall back to single table freeing, see
+ * the implementation of tlb_remove_table_one().
+ *
+ */
+struct mmu_table_batch {
+ struct rcu_head rcu;
+ unsigned int nr;
+ void *tables[0];
+};
+
+#define MAX_TABLE_BATCH \
+ ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
+
+extern void tlb_table_flush(struct mmu_gather *tlb);
+extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+
+#endif
+
+/*
+ * If we can't allocate a page to make a big batch of page pointers
+ * to work on, then just handle a few from the on-stack structure.
+ */
+#define MMU_GATHER_BUNDLE 8
+
+struct mmu_gather_batch {
+ struct mmu_gather_batch *next;
+ unsigned int nr;
+ unsigned int max;
+ struct page *pages[0];
+};
+
+#define MAX_GATHER_BATCH \
+ ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
+
+/*
+ * Limit the maximum number of mmu_gather batches to reduce a risk of soft
+ * lockups for non-preemptible kernels on huge machines when a lot of memory
+ * is zapped during unmapping.
+ * 10K pages freed at once should be safe even without a preemption point.
+ */
+#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
+
+/* struct mmu_gather is an opaque type used by the mm code for passing around
+ * any data needed by arch specific code for tlb_remove_page.
+ */
+struct mmu_gather {
+ struct mm_struct *mm;
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ struct mmu_table_batch *batch;
+#endif
+ unsigned long start;
+ unsigned long end;
+ /* we are in the middle of an operation to clear
+ * a full mm and can make some optimizations */
+ unsigned int fullmm : 1,
+ /* we have performed an operation which
+ * requires a complete flush of the tlb */
+ need_flush_all : 1;
+
+ struct mmu_gather_batch *active;
+ struct mmu_gather_batch local;
+ struct page *__pages[MMU_GATHER_BUNDLE];
+ unsigned int batch_count;
+ int page_size;
+};
+
+#define HAVE_GENERIC_MMU_GATHER
+
+void arch_tlb_gather_mmu(struct mmu_gather *tlb,
+ struct mm_struct *mm, unsigned long start, unsigned long end);
+void tlb_flush_mmu(struct mmu_gather *tlb);
+void arch_tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end, bool force);
+void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
+ unsigned long size);
+extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
+ int page_size);
+
+static inline void __tlb_adjust_range(struct mmu_gather *tlb,
+ unsigned long address,
+ unsigned int range_size)
+{
+ tlb->start = min(tlb->start, address);
+ tlb->end = max(tlb->end, address + range_size);
+}
+
+static inline void __tlb_reset_range(struct mmu_gather *tlb)
+{
+ if (tlb->fullmm) {
+ tlb->start = tlb->end = ~0;
+ } else {
+ tlb->start = TASK_SIZE;
+ tlb->end = 0;
+ }
+}
+
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+{
+ if (!tlb->end)
+ return;
+
+ tlb_flush(tlb);
+ mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
+ __tlb_reset_range(tlb);
+}
+
+static inline void tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size)
+{
+ if (__tlb_remove_page_size(tlb, page, page_size))
+ tlb_flush_mmu(tlb);
+}
+
+static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
+}
+
+/* tlb_remove_page
+ * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
+ * required.
+ */
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ return tlb_remove_page_size(tlb, page, PAGE_SIZE);
+}
+
+#ifndef tlb_remove_check_page_size_change
+#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
+static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
+ unsigned int page_size)
+{
+ /*
+ * We don't care about page size change, just update
+ * mmu_gather page size here so that debug checks
+ * doesn't throw false warning.
+ */
+#ifdef CONFIG_DEBUG_VM
+ tlb->page_size = page_size;
+#endif
+}
+#endif
+
+/*
+ * In the case of tlb vma handling, we can optimise these away in the
+ * case where we're doing a full MM flush. When we're doing a munmap,
+ * the vmas are adjusted to only cover the region to be torn down.
+ */
+#ifndef tlb_start_vma
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#endif
+
+#define __tlb_end_vma(tlb, vma) \
+ do { \
+ if (!tlb->fullmm) \
+ tlb_flush_mmu_tlbonly(tlb); \
+ } while (0)
+
+#ifndef tlb_end_vma
+#define tlb_end_vma __tlb_end_vma
+#endif
+
+#ifndef __tlb_remove_tlb_entry
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+#endif
+
+/**
+ * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
+ *
+ * Record the fact that pte's were really unmapped by updating the range,
+ * so we can later optimise away the tlb invalidate. This helps when
+ * userspace is unmapping already-unmapped pages, which happens quite a lot.
+ */
+#define tlb_remove_tlb_entry(tlb, ptep, address) \
+ do { \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ __tlb_remove_tlb_entry(tlb, ptep, address); \
+ } while (0)
+
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ do { \
+ __tlb_adjust_range(tlb, address, huge_page_size(h)); \
+ __tlb_remove_tlb_entry(tlb, ptep, address); \
+ } while (0)
+
+/**
+ * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
+ * This is a nop so far, because only x86 needs it.
+ */
+#ifndef __tlb_remove_pmd_tlb_entry
+#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
+#endif
+
+#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
+ do { \
+ __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
+ __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
+ } while (0)
+
+/**
+ * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
+ * invalidation. This is a nop so far, because only x86 needs it.
+ */
+#ifndef __tlb_remove_pud_tlb_entry
+#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
+#endif
+
+#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
+ do { \
+ __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
+ __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
+ } while (0)
+
+/*
+ * For things like page tables caches (ie caching addresses "inside" the
+ * page tables, like x86 does), for legacy reasons, flushing an
+ * individual page had better flush the page table caches behind it. This
+ * is definitely how x86 works, for example. And if you have an
+ * architected non-legacy page table cache (which I'm not aware of
+ * anybody actually doing), you're going to have some architecturally
+ * explicit flushing for that, likely *separate* from a regular TLB entry
+ * flush, and thus you'd need more than just some range expansion..
+ *
+ * So if we ever find an architecture
+ * that would want something that odd, I think it is up to that
+ * architecture to do its own odd thing, not cause pain for others
+ * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
+ *
+ * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
+ */
+
+#ifndef pte_free_tlb
+#define pte_free_tlb(tlb, ptep, address) \
+ do { \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ __pte_free_tlb(tlb, ptep, address); \
+ } while (0)
+#endif
+
+#ifndef pmd_free_tlb
+#define pmd_free_tlb(tlb, pmdp, address) \
+ do { \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ __pmd_free_tlb(tlb, pmdp, address); \
+ } while (0)
+#endif
+
+#ifndef __ARCH_HAS_4LEVEL_HACK
+#ifndef pud_free_tlb
+#define pud_free_tlb(tlb, pudp, address) \
+ do { \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ __pud_free_tlb(tlb, pudp, address); \
+ } while (0)
+#endif
+#endif
+
+#ifndef __ARCH_HAS_5LEVEL_HACK
+#ifndef p4d_free_tlb
+#define p4d_free_tlb(tlb, pudp, address) \
+ do { \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ __p4d_free_tlb(tlb, pudp, address); \
+ } while (0)
+#endif
+#endif
+
+#define tlb_migrate_finish(mm) do {} while (0)
+
+#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/asm-generic/tlbflush.h b/include/asm-generic/tlbflush.h
new file mode 100644
index 000000000..dc2669289
--- /dev/null
+++ b/include/asm-generic/tlbflush.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_TLBFLUSH_H
+#define __ASM_GENERIC_TLBFLUSH_H
+/*
+ * This is a dummy tlbflush implementation that can be used on all
+ * nommu architectures.
+ * If you have an MMU, you need to write your own functions.
+ */
+#ifdef CONFIG_MMU
+#error need to implement an architecture specific asm/tlbflush.h
+#endif
+
+#include <linux/bug.h>
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ BUG();
+}
+
+
+#endif /* __ASM_GENERIC_TLBFLUSH_H */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
new file mode 100644
index 000000000..5aa8705df
--- /dev/null
+++ b/include/asm-generic/topology.h
@@ -0,0 +1,77 @@
+/*
+ * linux/include/asm-generic/topology.h
+ *
+ * Written by: Matthew Dobson, IBM Corporation
+ *
+ * Copyright (C) 2002, IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <colpatch@us.ibm.com>
+ */
+#ifndef _ASM_GENERIC_TOPOLOGY_H
+#define _ASM_GENERIC_TOPOLOGY_H
+
+#ifndef CONFIG_NUMA
+
+/* Other architectures wishing to use this simple topology API should fill
+ in the below functions as appropriate in their own <asm/topology.h> file. */
+#ifndef cpu_to_node
+#define cpu_to_node(cpu) ((void)(cpu),0)
+#endif
+#ifndef set_numa_node
+#define set_numa_node(node)
+#endif
+#ifndef set_cpu_numa_node
+#define set_cpu_numa_node(cpu, node)
+#endif
+#ifndef cpu_to_mem
+#define cpu_to_mem(cpu) ((void)(cpu),0)
+#endif
+
+#ifndef cpumask_of_node
+ #ifdef CONFIG_NEED_MULTIPLE_NODES
+ #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
+ #else
+ #define cpumask_of_node(node) ((void)(node), cpu_online_mask)
+ #endif
+#endif
+#ifndef pcibus_to_node
+#define pcibus_to_node(bus) ((void)(bus), -1)
+#endif
+
+#ifndef cpumask_of_pcibus
+#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
+ cpu_all_mask : \
+ cpumask_of_node(pcibus_to_node(bus)))
+#endif
+
+#endif /* CONFIG_NUMA */
+
+#if !defined(CONFIG_NUMA) || !defined(CONFIG_HAVE_MEMORYLESS_NODES)
+
+#ifndef set_numa_mem
+#define set_numa_mem(node)
+#endif
+#ifndef set_cpu_numa_mem
+#define set_cpu_numa_mem(cpu, node)
+#endif
+
+#endif /* !CONFIG_NUMA || !CONFIG_HAVE_MEMORYLESS_NODES */
+
+#endif /* _ASM_GENERIC_TOPOLOGY_H */
diff --git a/include/asm-generic/trace_clock.h b/include/asm-generic/trace_clock.h
new file mode 100644
index 000000000..cbbca2959
--- /dev/null
+++ b/include/asm-generic/trace_clock.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_TRACE_CLOCK_H
+#define _ASM_GENERIC_TRACE_CLOCK_H
+/*
+ * Arch-specific trace clocks.
+ */
+
+/*
+ * Additional trace clocks added to the trace_clocks
+ * array in kernel/trace/trace.c
+ * None if the architecture has not defined it.
+ */
+#ifndef ARCH_TRACE_CLOCKS
+# define ARCH_TRACE_CLOCKS
+#endif
+
+#endif /* _ASM_GENERIC_TRACE_CLOCK_H */
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
new file mode 100644
index 000000000..6b2e63df2
--- /dev/null
+++ b/include/asm-generic/uaccess.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_UACCESS_H
+#define __ASM_GENERIC_UACCESS_H
+
+/*
+ * User space memory access functions, these should work
+ * on any machine that has kernel and user data in the same
+ * address space, e.g. all NOMMU machines.
+ */
+#include <linux/string.h>
+
+#include <asm/segment.h>
+
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#ifndef KERNEL_DS
+#define KERNEL_DS MAKE_MM_SEG(~0UL)
+#endif
+
+#ifndef USER_DS
+#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
+#endif
+
+#ifndef get_fs
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current_thread_info()->addr_limit)
+
+static inline void set_fs(mm_segment_t fs)
+{
+ current_thread_info()->addr_limit = fs;
+}
+#endif
+
+#ifndef segment_eq
+#define segment_eq(a, b) ((a).seg == (b).seg)
+#endif
+
+#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
+
+/*
+ * The architecture should really override this if possible, at least
+ * doing a check on the get_fs()
+ */
+#ifndef __access_ok
+static inline int __access_ok(unsigned long addr, unsigned long size)
+{
+ return 1;
+}
+#endif
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ * This version just falls back to copy_{from,to}_user, which should
+ * provide a fast-path for small values.
+ */
+#define __put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __x = (x); \
+ int __pu_err = -EFAULT; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof (*(ptr))) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ case 8: \
+ __pu_err = __put_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ break; \
+ default: \
+ __put_user_bad(); \
+ break; \
+ } \
+ __pu_err; \
+})
+
+#define put_user(x, ptr) \
+({ \
+ void __user *__p = (ptr); \
+ might_fault(); \
+ access_ok(VERIFY_WRITE, __p, sizeof(*ptr)) ? \
+ __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \
+ -EFAULT; \
+})
+
+#ifndef __put_user_fn
+
+static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
+{
+ return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
+}
+
+#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
+
+#endif
+
+extern int __put_user_bad(void) __attribute__((noreturn));
+
+#define __get_user(x, ptr) \
+({ \
+ int __gu_err = -EFAULT; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: { \
+ unsigned char __x = 0; \
+ __gu_err = __get_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 2: { \
+ unsigned short __x = 0; \
+ __gu_err = __get_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 4: { \
+ unsigned int __x = 0; \
+ __gu_err = __get_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 8: { \
+ unsigned long long __x = 0; \
+ __gu_err = __get_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ default: \
+ __get_user_bad(); \
+ break; \
+ } \
+ __gu_err; \
+})
+
+#define get_user(x, ptr) \
+({ \
+ const void __user *__p = (ptr); \
+ might_fault(); \
+ access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \
+ __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
+ ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
+})
+
+#ifndef __get_user_fn
+static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
+{
+ return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
+}
+
+#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
+
+#endif
+
+extern int __get_user_bad(void) __attribute__((noreturn));
+
+/*
+ * Copy a null terminated string from userspace.
+ */
+#ifndef __strncpy_from_user
+static inline long
+__strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ char *tmp;
+ strncpy(dst, (const char __force *)src, count);
+ for (tmp = dst; *tmp && count > 0; tmp++, count--)
+ ;
+ return (tmp - dst);
+}
+#endif
+
+static inline long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ if (!access_ok(VERIFY_READ, src, 1))
+ return -EFAULT;
+ return __strncpy_from_user(dst, src, count);
+}
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 on exception, a value greater than N if too long
+ */
+#ifndef __strnlen_user
+#define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
+#endif
+
+/*
+ * Unlike strnlen, strnlen_user includes the nul terminator in
+ * its returned count. Callers should check for a returned value
+ * greater than N as an indication the string is too long.
+ */
+static inline long strnlen_user(const char __user *src, long n)
+{
+ if (!access_ok(VERIFY_READ, src, 1))
+ return 0;
+ return __strnlen_user(src, n);
+}
+
+/*
+ * Zero Userspace
+ */
+#ifndef __clear_user
+static inline __must_check unsigned long
+__clear_user(void __user *to, unsigned long n)
+{
+ memset((void __force *)to, 0, n);
+ return 0;
+}
+#endif
+
+static inline __must_check unsigned long
+clear_user(void __user *to, unsigned long n)
+{
+ might_fault();
+ if (!access_ok(VERIFY_WRITE, to, n))
+ return n;
+
+ return __clear_user(to, n);
+}
+
+#include <asm/extable.h>
+
+#endif /* __ASM_GENERIC_UACCESS_H */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
new file mode 100644
index 000000000..374c940e9
--- /dev/null
+++ b/include/asm-generic/unaligned.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_UNALIGNED_H
+#define __ASM_GENERIC_UNALIGNED_H
+
+/*
+ * This is the most generic implementation of unaligned accesses
+ * and should work almost anywhere.
+ */
+#include <asm/byteorder.h>
+
+/* Set by the arch if it can handle unaligned accesses in hardware. */
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+# include <linux/unaligned/access_ok.h>
+#endif
+
+#if defined(__LITTLE_ENDIAN)
+# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# endif
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_le
+# define put_unaligned __put_unaligned_le
+#elif defined(__BIG_ENDIAN)
+# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# endif
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_be
+# define put_unaligned __put_unaligned_be
+#else
+# error need to define endianess
+#endif
+
+#endif /* __ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
new file mode 100644
index 000000000..cdf904265
--- /dev/null
+++ b/include/asm-generic/unistd.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <uapi/asm-generic/unistd.h>
+#include <linux/export.h>
+
+/*
+ * These are required system calls, we should
+ * invert the logic eventually and let them
+ * be selected by default.
+ */
+#if __BITS_PER_LONG == 32
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_LLSEEK
+#endif
diff --git a/include/asm-generic/user.h b/include/asm-generic/user.h
new file mode 100644
index 000000000..35638c347
--- /dev/null
+++ b/include/asm-generic/user.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_GENERIC_USER_H
+#define __ASM_GENERIC_USER_H
+/*
+ * This file may define a 'struct user' structure. However, it is only
+ * used for a.out files, which are not supported on new architectures.
+ */
+
+#endif /* __ASM_GENERIC_USER_H */
diff --git a/include/asm-generic/vga.h b/include/asm-generic/vga.h
new file mode 100644
index 000000000..adf91a783
--- /dev/null
+++ b/include/asm-generic/vga.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ */
+#ifndef __ASM_GENERIC_VGA_H
+#define __ASM_GENERIC_VGA_H
+
+/*
+ * On most architectures that support VGA, we can just
+ * recalculate addresses and then access the videoram
+ * directly without any black magic.
+ *
+ * Everyone else needs to ioremap the address and use
+ * proper I/O accesses.
+ */
+#ifndef VGA_MAP_MEM
+#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x)
+#endif
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x, y) (*(y) = (x))
+
+#endif /* _ASM_GENERIC_VGA_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
new file mode 100644
index 000000000..a26e6f503
--- /dev/null
+++ b/include/asm-generic/vmlinux.lds.h
@@ -0,0 +1,1001 @@
+/*
+ * Helper macros to support writing architecture specific
+ * linker scripts.
+ *
+ * A minimal linker scripts has following content:
+ * [This is a sample, architectures may have special requiriements]
+ *
+ * OUTPUT_FORMAT(...)
+ * OUTPUT_ARCH(...)
+ * ENTRY(...)
+ * SECTIONS
+ * {
+ * . = START;
+ * __init_begin = .;
+ * HEAD_TEXT_SECTION
+ * INIT_TEXT_SECTION(PAGE_SIZE)
+ * INIT_DATA_SECTION(...)
+ * PERCPU_SECTION(CACHELINE_SIZE)
+ * __init_end = .;
+ *
+ * _stext = .;
+ * TEXT_SECTION = 0
+ * _etext = .;
+ *
+ * _sdata = .;
+ * RO_DATA_SECTION(PAGE_SIZE)
+ * RW_DATA_SECTION(...)
+ * _edata = .;
+ *
+ * EXCEPTION_TABLE(...)
+ * NOTES
+ *
+ * BSS_SECTION(0, 0, 0)
+ * _end = .;
+ *
+ * STABS_DEBUG
+ * DWARF_DEBUG
+ *
+ * DISCARDS // must be the last
+ * }
+ *
+ * [__init_begin, __init_end] is the init section that may be freed after init
+ * // __init_begin and __init_end should be page aligned, so that we can
+ * // free the whole .init memory
+ * [_stext, _etext] is the text section
+ * [_sdata, _edata] is the data section
+ *
+ * Some of the included output section have their own set of constants.
+ * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
+ * [__nosave_begin, __nosave_end] for the nosave data
+ */
+
+#ifndef LOAD_OFFSET
+#define LOAD_OFFSET 0
+#endif
+
+/* Align . to a 8 byte boundary equals to maximum function alignment. */
+#define ALIGN_FUNCTION() . = ALIGN(8)
+
+/*
+ * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
+ * generates .data.identifier sections, which need to be pulled in with
+ * .data. We don't want to pull in .data..other sections, which Linux
+ * has defined. Same for text and bss.
+ *
+ * RODATA_MAIN is not used because existing code already defines .rodata.x
+ * sections to be brought in with rodata.
+ */
+#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
+#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
+#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
+#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
+#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
+#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
+#else
+#define TEXT_MAIN .text
+#define DATA_MAIN .data
+#define SDATA_MAIN .sdata
+#define RODATA_MAIN .rodata
+#define BSS_MAIN .bss
+#define SBSS_MAIN .sbss
+#endif
+
+/*
+ * Align to a 32 byte boundary equal to the
+ * alignment gcc 4.5 uses for a struct
+ */
+#define STRUCT_ALIGNMENT 32
+#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
+
+/* The actual configuration determine if the init/exit sections
+ * are handled as text/data or they can be discarded (which
+ * often happens at runtime)
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+#define CPU_KEEP(sec) *(.cpu##sec)
+#define CPU_DISCARD(sec)
+#else
+#define CPU_KEEP(sec)
+#define CPU_DISCARD(sec) *(.cpu##sec)
+#endif
+
+#if defined(CONFIG_MEMORY_HOTPLUG)
+#define MEM_KEEP(sec) *(.mem##sec)
+#define MEM_DISCARD(sec)
+#else
+#define MEM_KEEP(sec)
+#define MEM_DISCARD(sec) *(.mem##sec)
+#endif
+
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#define MCOUNT_REC() . = ALIGN(8); \
+ __start_mcount_loc = .; \
+ KEEP(*(__mcount_loc)) \
+ __stop_mcount_loc = .;
+#else
+#define MCOUNT_REC()
+#endif
+
+#ifdef CONFIG_TRACE_BRANCH_PROFILING
+#define LIKELY_PROFILE() __start_annotated_branch_profile = .; \
+ KEEP(*(_ftrace_annotated_branch)) \
+ __stop_annotated_branch_profile = .;
+#else
+#define LIKELY_PROFILE()
+#endif
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+#define BRANCH_PROFILE() __start_branch_profile = .; \
+ KEEP(*(_ftrace_branch)) \
+ __stop_branch_profile = .;
+#else
+#define BRANCH_PROFILE()
+#endif
+
+#ifdef CONFIG_KPROBES
+#define KPROBE_BLACKLIST() . = ALIGN(8); \
+ __start_kprobe_blacklist = .; \
+ KEEP(*(_kprobe_blacklist)) \
+ __stop_kprobe_blacklist = .;
+#else
+#define KPROBE_BLACKLIST()
+#endif
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \
+ __start_error_injection_whitelist = .; \
+ KEEP(*(_error_injection_whitelist)) \
+ __stop_error_injection_whitelist = .;
+#else
+#define ERROR_INJECT_WHITELIST()
+#endif
+
+#ifdef CONFIG_EVENT_TRACING
+#define FTRACE_EVENTS() . = ALIGN(8); \
+ __start_ftrace_events = .; \
+ KEEP(*(_ftrace_events)) \
+ __stop_ftrace_events = .; \
+ __start_ftrace_eval_maps = .; \
+ KEEP(*(_ftrace_eval_map)) \
+ __stop_ftrace_eval_maps = .;
+#else
+#define FTRACE_EVENTS()
+#endif
+
+#ifdef CONFIG_TRACING
+#define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \
+ KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
+ __stop___trace_bprintk_fmt = .;
+#define TRACEPOINT_STR() __start___tracepoint_str = .; \
+ KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
+ __stop___tracepoint_str = .;
+#else
+#define TRACE_PRINTKS()
+#define TRACEPOINT_STR()
+#endif
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+#define TRACE_SYSCALLS() . = ALIGN(8); \
+ __start_syscalls_metadata = .; \
+ KEEP(*(__syscalls_metadata)) \
+ __stop_syscalls_metadata = .;
+#else
+#define TRACE_SYSCALLS()
+#endif
+
+#ifdef CONFIG_BPF_EVENTS
+#define BPF_RAW_TP() STRUCT_ALIGN(); \
+ __start__bpf_raw_tp = .; \
+ KEEP(*(__bpf_raw_tp_map)) \
+ __stop__bpf_raw_tp = .;
+#else
+#define BPF_RAW_TP()
+#endif
+
+#ifdef CONFIG_SERIAL_EARLYCON
+#define EARLYCON_TABLE() . = ALIGN(8); \
+ __earlycon_table = .; \
+ KEEP(*(__earlycon_table)) \
+ __earlycon_table_end = .;
+#else
+#define EARLYCON_TABLE()
+#endif
+
+#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
+#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
+#define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name)
+#define _OF_TABLE_0(name)
+#define _OF_TABLE_1(name) \
+ . = ALIGN(8); \
+ __##name##_of_table = .; \
+ KEEP(*(__##name##_of_table)) \
+ KEEP(*(__##name##_of_table_end))
+
+#define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer)
+#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
+#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
+#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
+#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
+#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
+
+#ifdef CONFIG_ACPI
+#define ACPI_PROBE_TABLE(name) \
+ . = ALIGN(8); \
+ __##name##_acpi_probe_table = .; \
+ KEEP(*(__##name##_acpi_probe_table)) \
+ __##name##_acpi_probe_table_end = .;
+#else
+#define ACPI_PROBE_TABLE(name)
+#endif
+
+#define KERNEL_DTB() \
+ STRUCT_ALIGN(); \
+ __dtb_start = .; \
+ KEEP(*(.dtb.init.rodata)) \
+ __dtb_end = .;
+
+/*
+ * .data section
+ */
+#define DATA_DATA \
+ *(.xiptext) \
+ *(DATA_MAIN) \
+ *(.ref.data) \
+ *(.data..shared_aligned) /* percpu related */ \
+ MEM_KEEP(init.data*) \
+ MEM_KEEP(exit.data*) \
+ *(.data.unlikely) \
+ __start_once = .; \
+ *(.data.once) \
+ __end_once = .; \
+ STRUCT_ALIGN(); \
+ *(__tracepoints) \
+ /* implement dynamic printk debug */ \
+ . = ALIGN(8); \
+ __start___jump_table = .; \
+ KEEP(*(__jump_table)) \
+ __stop___jump_table = .; \
+ . = ALIGN(8); \
+ __start___verbose = .; \
+ KEEP(*(__verbose)) \
+ __stop___verbose = .; \
+ LIKELY_PROFILE() \
+ BRANCH_PROFILE() \
+ TRACE_PRINTKS() \
+ BPF_RAW_TP() \
+ TRACEPOINT_STR()
+
+/*
+ * Data section helpers
+ */
+#define NOSAVE_DATA \
+ . = ALIGN(PAGE_SIZE); \
+ __nosave_begin = .; \
+ *(.data..nosave) \
+ . = ALIGN(PAGE_SIZE); \
+ __nosave_end = .;
+
+#define PAGE_ALIGNED_DATA(page_align) \
+ . = ALIGN(page_align); \
+ *(.data..page_aligned) \
+ . = ALIGN(page_align);
+
+#define READ_MOSTLY_DATA(align) \
+ . = ALIGN(align); \
+ *(.data..read_mostly) \
+ . = ALIGN(align);
+
+#define CACHELINE_ALIGNED_DATA(align) \
+ . = ALIGN(align); \
+ *(.data..cacheline_aligned)
+
+#define INIT_TASK_DATA(align) \
+ . = ALIGN(align); \
+ __start_init_task = .; \
+ init_thread_union = .; \
+ init_stack = .; \
+ KEEP(*(.data..init_task)) \
+ KEEP(*(.data..init_thread_info)) \
+ . = __start_init_task + THREAD_SIZE; \
+ __end_init_task = .;
+
+/*
+ * Allow architectures to handle ro_after_init data on their
+ * own by defining an empty RO_AFTER_INIT_DATA.
+ */
+#ifndef RO_AFTER_INIT_DATA
+#define RO_AFTER_INIT_DATA \
+ . = ALIGN(8); \
+ __start_ro_after_init = .; \
+ *(.data..ro_after_init) \
+ __end_ro_after_init = .;
+#endif
+
+/*
+ * Read only Data
+ */
+#define RO_DATA_SECTION(align) \
+ . = ALIGN((align)); \
+ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
+ __start_rodata = .; \
+ *(.rodata) *(.rodata.*) \
+ RO_AFTER_INIT_DATA /* Read only after init */ \
+ KEEP(*(__vermagic)) /* Kernel version magic */ \
+ . = ALIGN(8); \
+ __start___tracepoints_ptrs = .; \
+ KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
+ __stop___tracepoints_ptrs = .; \
+ *(__tracepoints_strings)/* Tracepoints: strings */ \
+ } \
+ \
+ .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
+ *(.rodata1) \
+ } \
+ \
+ /* PCI quirks */ \
+ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
+ __start_pci_fixups_early = .; \
+ KEEP(*(.pci_fixup_early)) \
+ __end_pci_fixups_early = .; \
+ __start_pci_fixups_header = .; \
+ KEEP(*(.pci_fixup_header)) \
+ __end_pci_fixups_header = .; \
+ __start_pci_fixups_final = .; \
+ KEEP(*(.pci_fixup_final)) \
+ __end_pci_fixups_final = .; \
+ __start_pci_fixups_enable = .; \
+ KEEP(*(.pci_fixup_enable)) \
+ __end_pci_fixups_enable = .; \
+ __start_pci_fixups_resume = .; \
+ KEEP(*(.pci_fixup_resume)) \
+ __end_pci_fixups_resume = .; \
+ __start_pci_fixups_resume_early = .; \
+ KEEP(*(.pci_fixup_resume_early)) \
+ __end_pci_fixups_resume_early = .; \
+ __start_pci_fixups_suspend = .; \
+ KEEP(*(.pci_fixup_suspend)) \
+ __end_pci_fixups_suspend = .; \
+ __start_pci_fixups_suspend_late = .; \
+ KEEP(*(.pci_fixup_suspend_late)) \
+ __end_pci_fixups_suspend_late = .; \
+ } \
+ \
+ /* Built-in firmware blobs */ \
+ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
+ __start_builtin_fw = .; \
+ KEEP(*(.builtin_fw)) \
+ __end_builtin_fw = .; \
+ } \
+ \
+ TRACEDATA \
+ \
+ /* Kernel symbol table: Normal symbols */ \
+ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
+ __start___ksymtab = .; \
+ KEEP(*(SORT(___ksymtab+*))) \
+ __stop___ksymtab = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only symbols */ \
+ __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
+ __start___ksymtab_gpl = .; \
+ KEEP(*(SORT(___ksymtab_gpl+*))) \
+ __stop___ksymtab_gpl = .; \
+ } \
+ \
+ /* Kernel symbol table: Normal unused symbols */ \
+ __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
+ __start___ksymtab_unused = .; \
+ KEEP(*(SORT(___ksymtab_unused+*))) \
+ __stop___ksymtab_unused = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only unused symbols */ \
+ __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
+ __start___ksymtab_unused_gpl = .; \
+ KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
+ __stop___ksymtab_unused_gpl = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-future-only symbols */ \
+ __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
+ __start___ksymtab_gpl_future = .; \
+ KEEP(*(SORT(___ksymtab_gpl_future+*))) \
+ __stop___ksymtab_gpl_future = .; \
+ } \
+ \
+ /* Kernel symbol table: Normal symbols */ \
+ __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
+ __start___kcrctab = .; \
+ KEEP(*(SORT(___kcrctab+*))) \
+ __stop___kcrctab = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only symbols */ \
+ __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
+ __start___kcrctab_gpl = .; \
+ KEEP(*(SORT(___kcrctab_gpl+*))) \
+ __stop___kcrctab_gpl = .; \
+ } \
+ \
+ /* Kernel symbol table: Normal unused symbols */ \
+ __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
+ __start___kcrctab_unused = .; \
+ KEEP(*(SORT(___kcrctab_unused+*))) \
+ __stop___kcrctab_unused = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only unused symbols */ \
+ __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
+ __start___kcrctab_unused_gpl = .; \
+ KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
+ __stop___kcrctab_unused_gpl = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-future-only symbols */ \
+ __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
+ __start___kcrctab_gpl_future = .; \
+ KEEP(*(SORT(___kcrctab_gpl_future+*))) \
+ __stop___kcrctab_gpl_future = .; \
+ } \
+ \
+ /* Kernel symbol table: strings */ \
+ __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
+ *(__ksymtab_strings) \
+ } \
+ \
+ /* __*init sections */ \
+ __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
+ *(.ref.rodata) \
+ MEM_KEEP(init.rodata) \
+ MEM_KEEP(exit.rodata) \
+ } \
+ \
+ /* Built-in module parameters. */ \
+ __param : AT(ADDR(__param) - LOAD_OFFSET) { \
+ __start___param = .; \
+ KEEP(*(__param)) \
+ __stop___param = .; \
+ } \
+ \
+ /* Built-in module versions. */ \
+ __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
+ __start___modver = .; \
+ KEEP(*(__modver)) \
+ __stop___modver = .; \
+ . = ALIGN((align)); \
+ __end_rodata = .; \
+ } \
+ . = ALIGN((align));
+
+/* RODATA & RO_DATA provided for backward compatibility.
+ * All archs are supposed to use RO_DATA() */
+#define RODATA RO_DATA_SECTION(4096)
+#define RO_DATA(align) RO_DATA_SECTION(align)
+
+#define SECURITY_INIT \
+ .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
+ __security_initcall_start = .; \
+ KEEP(*(.security_initcall.init)) \
+ __security_initcall_end = .; \
+ }
+
+/*
+ * Non-instrumentable text section
+ */
+#define NOINSTR_TEXT \
+ ALIGN_FUNCTION(); \
+ __noinstr_text_start = .; \
+ *(.noinstr.text) \
+ __noinstr_text_end = .;
+
+/*
+ * .text section. Map to function alignment to avoid address changes
+ * during second ld run in second ld pass when generating System.map
+ *
+ * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
+ * code elimination is enabled, so these sections should be converted
+ * to use ".." first.
+ */
+#define TEXT_TEXT \
+ ALIGN_FUNCTION(); \
+ *(.text.hot .text.hot.*) \
+ *(TEXT_MAIN .text.fixup) \
+ *(.text.unlikely .text.unlikely.*) \
+ *(.text.unknown .text.unknown.*) \
+ NOINSTR_TEXT \
+ *(.text..refcount) \
+ *(.ref.text) \
+ *(.text.asan.* .text.tsan.*) \
+ MEM_KEEP(init.text*) \
+ MEM_KEEP(exit.text*) \
+
+
+/* sched.text is aling to function alignment to secure we have same
+ * address even at second ld pass when generating System.map */
+#define SCHED_TEXT \
+ ALIGN_FUNCTION(); \
+ __sched_text_start = .; \
+ *(.sched.text) \
+ __sched_text_end = .;
+
+/* spinlock.text is aling to function alignment to secure we have same
+ * address even at second ld pass when generating System.map */
+#define LOCK_TEXT \
+ ALIGN_FUNCTION(); \
+ __lock_text_start = .; \
+ *(.spinlock.text) \
+ __lock_text_end = .;
+
+#define CPUIDLE_TEXT \
+ ALIGN_FUNCTION(); \
+ __cpuidle_text_start = .; \
+ *(.cpuidle.text) \
+ __cpuidle_text_end = .;
+
+#define KPROBES_TEXT \
+ ALIGN_FUNCTION(); \
+ __kprobes_text_start = .; \
+ *(.kprobes.text) \
+ __kprobes_text_end = .;
+
+#define ENTRY_TEXT \
+ ALIGN_FUNCTION(); \
+ __entry_text_start = .; \
+ *(.entry.text) \
+ __entry_text_end = .;
+
+#define IRQENTRY_TEXT \
+ ALIGN_FUNCTION(); \
+ __irqentry_text_start = .; \
+ *(.irqentry.text) \
+ __irqentry_text_end = .;
+
+#define SOFTIRQENTRY_TEXT \
+ ALIGN_FUNCTION(); \
+ __softirqentry_text_start = .; \
+ *(.softirqentry.text) \
+ __softirqentry_text_end = .;
+
+/* Section used for early init (in .S files) */
+#define HEAD_TEXT KEEP(*(.head.text))
+
+#define HEAD_TEXT_SECTION \
+ .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
+ HEAD_TEXT \
+ }
+
+/*
+ * Exception table
+ */
+#define EXCEPTION_TABLE(align) \
+ . = ALIGN(align); \
+ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
+ __start___ex_table = .; \
+ KEEP(*(__ex_table)) \
+ __stop___ex_table = .; \
+ }
+
+/*
+ * Init task
+ */
+#define INIT_TASK_DATA_SECTION(align) \
+ . = ALIGN(align); \
+ .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
+ INIT_TASK_DATA(align) \
+ }
+
+#ifdef CONFIG_CONSTRUCTORS
+#define KERNEL_CTORS() . = ALIGN(8); \
+ __ctors_start = .; \
+ KEEP(*(.ctors)) \
+ KEEP(*(SORT(.init_array.*))) \
+ KEEP(*(.init_array)) \
+ __ctors_end = .;
+#else
+#define KERNEL_CTORS()
+#endif
+
+/* init and exit section handling */
+#define INIT_DATA \
+ KEEP(*(SORT(___kentry+*))) \
+ *(.init.data init.data.*) \
+ MEM_DISCARD(init.data*) \
+ KERNEL_CTORS() \
+ MCOUNT_REC() \
+ *(.init.rodata .init.rodata.*) \
+ FTRACE_EVENTS() \
+ TRACE_SYSCALLS() \
+ KPROBE_BLACKLIST() \
+ ERROR_INJECT_WHITELIST() \
+ MEM_DISCARD(init.rodata) \
+ CLK_OF_TABLES() \
+ RESERVEDMEM_OF_TABLES() \
+ TIMER_OF_TABLES() \
+ CPU_METHOD_OF_TABLES() \
+ CPUIDLE_METHOD_OF_TABLES() \
+ KERNEL_DTB() \
+ IRQCHIP_OF_MATCH_TABLE() \
+ ACPI_PROBE_TABLE(irqchip) \
+ ACPI_PROBE_TABLE(timer) \
+ EARLYCON_TABLE()
+
+#define INIT_TEXT \
+ *(.init.text .init.text.*) \
+ *(.text.startup) \
+ MEM_DISCARD(init.text*)
+
+#define EXIT_DATA \
+ *(.exit.data .exit.data.*) \
+ *(.fini_array .fini_array.*) \
+ *(.dtors .dtors.*) \
+ MEM_DISCARD(exit.data*) \
+ MEM_DISCARD(exit.rodata*)
+
+#define EXIT_TEXT \
+ *(.exit.text) \
+ *(.text.exit) \
+ MEM_DISCARD(exit.text)
+
+#define EXIT_CALL \
+ *(.exitcall.exit)
+
+/*
+ * bss (Block Started by Symbol) - uninitialized data
+ * zeroed during startup
+ */
+#define SBSS(sbss_align) \
+ . = ALIGN(sbss_align); \
+ .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
+ *(.dynsbss) \
+ *(SBSS_MAIN) \
+ *(.scommon) \
+ }
+
+/*
+ * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
+ * sections to the front of bss.
+ */
+#ifndef BSS_FIRST_SECTIONS
+#define BSS_FIRST_SECTIONS
+#endif
+
+#define BSS(bss_align) \
+ . = ALIGN(bss_align); \
+ .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
+ BSS_FIRST_SECTIONS \
+ . = ALIGN(PAGE_SIZE); \
+ *(.bss..page_aligned) \
+ . = ALIGN(PAGE_SIZE); \
+ *(.dynbss) \
+ *(BSS_MAIN) \
+ *(COMMON) \
+ }
+
+/*
+ * DWARF debug sections.
+ * Symbols in the DWARF debugging sections are relative to
+ * the beginning of the section so we begin them at 0.
+ */
+#define DWARF_DEBUG \
+ /* DWARF 1 */ \
+ .debug 0 : { *(.debug) } \
+ .line 0 : { *(.line) } \
+ /* GNU DWARF 1 extensions */ \
+ .debug_srcinfo 0 : { *(.debug_srcinfo) } \
+ .debug_sfnames 0 : { *(.debug_sfnames) } \
+ /* DWARF 1.1 and DWARF 2 */ \
+ .debug_aranges 0 : { *(.debug_aranges) } \
+ .debug_pubnames 0 : { *(.debug_pubnames) } \
+ /* DWARF 2 */ \
+ .debug_info 0 : { *(.debug_info \
+ .gnu.linkonce.wi.*) } \
+ .debug_abbrev 0 : { *(.debug_abbrev) } \
+ .debug_line 0 : { *(.debug_line) } \
+ .debug_frame 0 : { *(.debug_frame) } \
+ .debug_str 0 : { *(.debug_str) } \
+ .debug_loc 0 : { *(.debug_loc) } \
+ .debug_macinfo 0 : { *(.debug_macinfo) } \
+ .debug_pubtypes 0 : { *(.debug_pubtypes) } \
+ /* DWARF 3 */ \
+ .debug_ranges 0 : { *(.debug_ranges) } \
+ /* SGI/MIPS DWARF 2 extensions */ \
+ .debug_weaknames 0 : { *(.debug_weaknames) } \
+ .debug_funcnames 0 : { *(.debug_funcnames) } \
+ .debug_typenames 0 : { *(.debug_typenames) } \
+ .debug_varnames 0 : { *(.debug_varnames) } \
+ /* GNU DWARF 2 extensions */ \
+ .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \
+ .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \
+ /* DWARF 4 */ \
+ .debug_types 0 : { *(.debug_types) } \
+ /* DWARF 5 */ \
+ .debug_addr 0 : { *(.debug_addr) } \
+ .debug_line_str 0 : { *(.debug_line_str) } \
+ .debug_loclists 0 : { *(.debug_loclists) } \
+ .debug_macro 0 : { *(.debug_macro) } \
+ .debug_names 0 : { *(.debug_names) } \
+ .debug_rnglists 0 : { *(.debug_rnglists) } \
+ .debug_str_offsets 0 : { *(.debug_str_offsets) }
+
+ /* Stabs debugging sections. */
+#define STABS_DEBUG \
+ .stab 0 : { *(.stab) } \
+ .stabstr 0 : { *(.stabstr) } \
+ .stab.excl 0 : { *(.stab.excl) } \
+ .stab.exclstr 0 : { *(.stab.exclstr) } \
+ .stab.index 0 : { *(.stab.index) } \
+ .stab.indexstr 0 : { *(.stab.indexstr) } \
+ .comment 0 : { *(.comment) }
+
+#ifdef CONFIG_GENERIC_BUG
+#define BUG_TABLE \
+ . = ALIGN(8); \
+ __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
+ __start___bug_table = .; \
+ KEEP(*(__bug_table)) \
+ __stop___bug_table = .; \
+ }
+#else
+#define BUG_TABLE
+#endif
+
+#ifdef CONFIG_UNWINDER_ORC
+#define ORC_UNWIND_TABLE \
+ . = ALIGN(4); \
+ .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \
+ __start_orc_unwind_ip = .; \
+ KEEP(*(.orc_unwind_ip)) \
+ __stop_orc_unwind_ip = .; \
+ } \
+ . = ALIGN(2); \
+ .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
+ __start_orc_unwind = .; \
+ KEEP(*(.orc_unwind)) \
+ __stop_orc_unwind = .; \
+ } \
+ . = ALIGN(4); \
+ .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
+ orc_lookup = .; \
+ . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \
+ LOOKUP_BLOCK_SIZE) + 1) * 4; \
+ orc_lookup_end = .; \
+ }
+#else
+#define ORC_UNWIND_TABLE
+#endif
+
+#ifdef CONFIG_PM_TRACE
+#define TRACEDATA \
+ . = ALIGN(4); \
+ .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
+ __tracedata_start = .; \
+ KEEP(*(.tracedata)) \
+ __tracedata_end = .; \
+ }
+#else
+#define TRACEDATA
+#endif
+
+#define NOTES \
+ .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
+ __start_notes = .; \
+ KEEP(*(.note.*)) \
+ __stop_notes = .; \
+ }
+
+#define INIT_SETUP(initsetup_align) \
+ . = ALIGN(initsetup_align); \
+ __setup_start = .; \
+ KEEP(*(.init.setup)) \
+ __setup_end = .;
+
+#define INIT_CALLS_LEVEL(level) \
+ __initcall##level##_start = .; \
+ KEEP(*(.initcall##level##.init)) \
+ KEEP(*(.initcall##level##s.init)) \
+
+#define INIT_CALLS \
+ __initcall_start = .; \
+ KEEP(*(.initcallearly.init)) \
+ INIT_CALLS_LEVEL(0) \
+ INIT_CALLS_LEVEL(1) \
+ INIT_CALLS_LEVEL(2) \
+ INIT_CALLS_LEVEL(3) \
+ INIT_CALLS_LEVEL(4) \
+ INIT_CALLS_LEVEL(5) \
+ INIT_CALLS_LEVEL(rootfs) \
+ INIT_CALLS_LEVEL(6) \
+ INIT_CALLS_LEVEL(7) \
+ __initcall_end = .;
+
+#define CON_INITCALL \
+ __con_initcall_start = .; \
+ KEEP(*(.con_initcall.init)) \
+ __con_initcall_end = .;
+
+#define SECURITY_INITCALL \
+ __security_initcall_start = .; \
+ KEEP(*(.security_initcall.init)) \
+ __security_initcall_end = .;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+#define INIT_RAM_FS \
+ . = ALIGN(4); \
+ __initramfs_start = .; \
+ KEEP(*(.init.ramfs)) \
+ . = ALIGN(8); \
+ KEEP(*(.init.ramfs.info))
+#else
+#define INIT_RAM_FS
+#endif
+
+/*
+ * Memory encryption operates on a page basis. Since we need to clear
+ * the memory encryption mask for this section, it needs to be aligned
+ * on a page boundary and be a page-size multiple in length.
+ *
+ * Note: We use a separate section so that only this section gets
+ * decrypted to avoid exposing more than we wish.
+ */
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+#define PERCPU_DECRYPTED_SECTION \
+ . = ALIGN(PAGE_SIZE); \
+ *(.data..decrypted) \
+ *(.data..percpu..decrypted) \
+ . = ALIGN(PAGE_SIZE);
+#else
+#define PERCPU_DECRYPTED_SECTION
+#endif
+
+
+/*
+ * Default discarded sections.
+ *
+ * Some archs want to discard exit text/data at runtime rather than
+ * link time due to cross-section references such as alt instructions,
+ * bug table, eh_frame, etc. DISCARDS must be the last of output
+ * section definitions so that such archs put those in earlier section
+ * definitions.
+ */
+#define DISCARDS \
+ /DISCARD/ : { \
+ EXIT_TEXT \
+ EXIT_DATA \
+ EXIT_CALL \
+ *(.discard) \
+ *(.discard.*) \
+ }
+
+/**
+ * PERCPU_INPUT - the percpu input sections
+ * @cacheline: cacheline size
+ *
+ * The core percpu section names and core symbols which do not rely
+ * directly upon load addresses.
+ *
+ * @cacheline is used to align subsections to avoid false cacheline
+ * sharing between subsections for different purposes.
+ */
+#define PERCPU_INPUT(cacheline) \
+ __per_cpu_start = .; \
+ *(.data..percpu..first) \
+ . = ALIGN(PAGE_SIZE); \
+ *(.data..percpu..page_aligned) \
+ . = ALIGN(cacheline); \
+ *(.data..percpu..read_mostly) \
+ . = ALIGN(cacheline); \
+ *(.data..percpu) \
+ *(.data..percpu..shared_aligned) \
+ PERCPU_DECRYPTED_SECTION \
+ __per_cpu_end = .;
+
+/**
+ * PERCPU_VADDR - define output section for percpu area
+ * @cacheline: cacheline size
+ * @vaddr: explicit base address (optional)
+ * @phdr: destination PHDR (optional)
+ *
+ * Macro which expands to output section for percpu area.
+ *
+ * @cacheline is used to align subsections to avoid false cacheline
+ * sharing between subsections for different purposes.
+ *
+ * If @vaddr is not blank, it specifies explicit base address and all
+ * percpu symbols will be offset from the given address. If blank,
+ * @vaddr always equals @laddr + LOAD_OFFSET.
+ *
+ * @phdr defines the output PHDR to use if not blank. Be warned that
+ * output PHDR is sticky. If @phdr is specified, the next output
+ * section in the linker script will go there too. @phdr should have
+ * a leading colon.
+ *
+ * Note that this macros defines __per_cpu_load as an absolute symbol.
+ * If there is no need to put the percpu section at a predetermined
+ * address, use PERCPU_SECTION.
+ */
+#define PERCPU_VADDR(cacheline, vaddr, phdr) \
+ __per_cpu_load = .; \
+ .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \
+ PERCPU_INPUT(cacheline) \
+ } phdr \
+ . = __per_cpu_load + SIZEOF(.data..percpu);
+
+/**
+ * PERCPU_SECTION - define output section for percpu area, simple version
+ * @cacheline: cacheline size
+ *
+ * Align to PAGE_SIZE and outputs output section for percpu area. This
+ * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
+ * __per_cpu_start will be identical.
+ *
+ * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
+ * except that __per_cpu_load is defined as a relative symbol against
+ * .data..percpu which is required for relocatable x86_32 configuration.
+ */
+#define PERCPU_SECTION(cacheline) \
+ . = ALIGN(PAGE_SIZE); \
+ .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
+ __per_cpu_load = .; \
+ PERCPU_INPUT(cacheline) \
+ }
+
+
+/*
+ * Definition of the high level *_SECTION macros
+ * They will fit only a subset of the architectures
+ */
+
+
+/*
+ * Writeable data.
+ * All sections are combined in a single .data section.
+ * The sections following CONSTRUCTORS are arranged so their
+ * typical alignment matches.
+ * A cacheline is typical/always less than a PAGE_SIZE so
+ * the sections that has this restriction (or similar)
+ * is located before the ones requiring PAGE_SIZE alignment.
+ * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
+ * matches the requirement of PAGE_ALIGNED_DATA.
+ *
+ * use 0 as page_align if page_aligned data is not used */
+#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
+ . = ALIGN(PAGE_SIZE); \
+ .data : AT(ADDR(.data) - LOAD_OFFSET) { \
+ INIT_TASK_DATA(inittask) \
+ NOSAVE_DATA \
+ PAGE_ALIGNED_DATA(pagealigned) \
+ CACHELINE_ALIGNED_DATA(cacheline) \
+ READ_MOSTLY_DATA(cacheline) \
+ DATA_DATA \
+ CONSTRUCTORS \
+ } \
+ BUG_TABLE \
+
+#define INIT_TEXT_SECTION(inittext_align) \
+ . = ALIGN(inittext_align); \
+ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
+ _sinittext = .; \
+ INIT_TEXT \
+ _einittext = .; \
+ }
+
+#define INIT_DATA_SECTION(initsetup_align) \
+ .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
+ INIT_DATA \
+ INIT_SETUP(initsetup_align) \
+ INIT_CALLS \
+ CON_INITCALL \
+ SECURITY_INITCALL \
+ INIT_RAM_FS \
+ }
+
+#define BSS_SECTION(sbss_align, bss_align, stop_align) \
+ . = ALIGN(sbss_align); \
+ __bss_start = .; \
+ SBSS(sbss_align) \
+ BSS(bss_align) \
+ . = ALIGN(stop_align); \
+ __bss_stop = .;
diff --git a/include/asm-generic/vtime.h b/include/asm-generic/vtime.h
new file mode 100644
index 000000000..b1a49677f
--- /dev/null
+++ b/include/asm-generic/vtime.h
@@ -0,0 +1 @@
+/* no content, but patch(1) dislikes empty files */
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
new file mode 100644
index 000000000..20c93f08c
--- /dev/null
+++ b/include/asm-generic/word-at-a-time.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
+
+struct word_at_a_time {
+ const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+ unsigned long mask = (val & c->low_bits) + c->low_bits;
+ return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+ long byte = 0;
+#ifdef CONFIG_64BIT
+ if (mask >> 32)
+ mask >>= 32;
+ else
+ byte = 4;
+#endif
+ if (mask >> 16)
+ mask >>= 16;
+ else
+ byte += 2;
+ return (mask >> 8) ? byte : byte + 1;
+}
+
+static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+ unsigned long rhs = val | c->low_bits;
+ *data = rhs;
+ return (val + c->high_bits) & ~rhs;
+}
+
+#ifndef zero_bytemask
+#define zero_bytemask(mask) (~1ul << __fls(mask))
+#endif
+
+#else
+
+/*
+ * The optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+ return mask*0x0001020304050608ul >> 56;
+}
+
+#else /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return count_masked_bytes(mask);
+}
+
+#endif /* __BIG_ENDIAN */
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h
new file mode 100644
index 000000000..b4d843225
--- /dev/null
+++ b/include/asm-generic/xor.h
@@ -0,0 +1,718 @@
+/*
+ * include/asm-generic/xor.h
+ *
+ * Generic optimized RAID-5 checksumming functions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/prefetch.h>
+
+static void
+xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ p1[0] ^= p2[0];
+ p1[1] ^= p2[1];
+ p1[2] ^= p2[2];
+ p1[3] ^= p2[3];
+ p1[4] ^= p2[4];
+ p1[5] ^= p2[5];
+ p1[6] ^= p2[6];
+ p1[7] ^= p2[7];
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ p1[0] ^= p2[0] ^ p3[0];
+ p1[1] ^= p2[1] ^ p3[1];
+ p1[2] ^= p2[2] ^ p3[2];
+ p1[3] ^= p2[3] ^ p3[3];
+ p1[4] ^= p2[4] ^ p3[4];
+ p1[5] ^= p2[5] ^ p3[5];
+ p1[6] ^= p2[6] ^ p3[6];
+ p1[7] ^= p2[7] ^ p3[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ p1[0] ^= p2[0] ^ p3[0] ^ p4[0];
+ p1[1] ^= p2[1] ^ p3[1] ^ p4[1];
+ p1[2] ^= p2[2] ^ p3[2] ^ p4[2];
+ p1[3] ^= p2[3] ^ p3[3] ^ p4[3];
+ p1[4] ^= p2[4] ^ p3[4] ^ p4[4];
+ p1[5] ^= p2[5] ^ p3[5] ^ p4[5];
+ p1[6] ^= p2[6] ^ p3[6] ^ p4[6];
+ p1[7] ^= p2[7] ^ p3[7] ^ p4[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0];
+ p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1];
+ p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2];
+ p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3];
+ p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4];
+ p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5];
+ p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6];
+ p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ d0 ^= p4[0];
+ d1 ^= p4[1];
+ d2 ^= p4[2];
+ d3 ^= p4[3];
+ d4 ^= p4[4];
+ d5 ^= p4[5];
+ d6 ^= p4[6];
+ d7 ^= p4[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ d0 ^= p4[0];
+ d1 ^= p4[1];
+ d2 ^= p4[2];
+ d3 ^= p4[3];
+ d4 ^= p4[4];
+ d5 ^= p4[5];
+ d6 ^= p4[6];
+ d7 ^= p4[7];
+ d0 ^= p5[0];
+ d1 ^= p5[1];
+ d2 ^= p5[2];
+ d3 ^= p5[3];
+ d4 ^= p5[4];
+ d5 ^= p5[5];
+ d6 ^= p5[6];
+ d7 ^= p5[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+ prefetchw(p1);
+ prefetch(p2);
+
+ do {
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ once_more:
+ p1[0] ^= p2[0];
+ p1[1] ^= p2[1];
+ p1[2] ^= p2[2];
+ p1[3] ^= p2[3];
+ p1[4] ^= p2[4];
+ p1[5] ^= p2[5];
+ p1[6] ^= p2[6];
+ p1[7] ^= p2[7];
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+
+ do {
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ once_more:
+ p1[0] ^= p2[0] ^ p3[0];
+ p1[1] ^= p2[1] ^ p3[1];
+ p1[2] ^= p2[2] ^ p3[2];
+ p1[3] ^= p2[3] ^ p3[3];
+ p1[4] ^= p2[4] ^ p3[4];
+ p1[5] ^= p2[5] ^ p3[5];
+ p1[6] ^= p2[6] ^ p3[6];
+ p1[7] ^= p2[7] ^ p3[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+ prefetch(p4);
+
+ do {
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ prefetch(p4+8);
+ once_more:
+ p1[0] ^= p2[0] ^ p3[0] ^ p4[0];
+ p1[1] ^= p2[1] ^ p3[1] ^ p4[1];
+ p1[2] ^= p2[2] ^ p3[2] ^ p4[2];
+ p1[3] ^= p2[3] ^ p3[3] ^ p4[3];
+ p1[4] ^= p2[4] ^ p3[4] ^ p4[4];
+ p1[5] ^= p2[5] ^ p3[5] ^ p4[5];
+ p1[6] ^= p2[6] ^ p3[6] ^ p4[6];
+ p1[7] ^= p2[7] ^ p3[7] ^ p4[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+ prefetch(p4);
+ prefetch(p5);
+
+ do {
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ prefetch(p4+8);
+ prefetch(p5+8);
+ once_more:
+ p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0];
+ p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1];
+ p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2];
+ p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3];
+ p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4];
+ p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5];
+ p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6];
+ p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ once_more:
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ once_more:
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+ prefetch(p4);
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ prefetch(p4+8);
+ once_more:
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ d0 ^= p4[0];
+ d1 ^= p4[1];
+ d2 ^= p4[2];
+ d3 ^= p4[3];
+ d4 ^= p4[4];
+ d5 ^= p4[5];
+ d6 ^= p4[6];
+ d7 ^= p4[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+ prefetch(p4);
+ prefetch(p5);
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ prefetch(p4+8);
+ prefetch(p5+8);
+ once_more:
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ d0 ^= p4[0];
+ d1 ^= p4[1];
+ d2 ^= p4[2];
+ d3 ^= p4[3];
+ d4 ^= p4[4];
+ d5 ^= p4[5];
+ d6 ^= p4[6];
+ d7 ^= p4[7];
+ d0 ^= p5[0];
+ d1 ^= p5[1];
+ d2 ^= p5[2];
+ d3 ^= p5[3];
+ d4 ^= p5[4];
+ d5 ^= p5[5];
+ d6 ^= p5[6];
+ d7 ^= p5[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static struct xor_block_template xor_block_8regs = {
+ .name = "8regs",
+ .do_2 = xor_8regs_2,
+ .do_3 = xor_8regs_3,
+ .do_4 = xor_8regs_4,
+ .do_5 = xor_8regs_5,
+};
+
+static struct xor_block_template xor_block_32regs = {
+ .name = "32regs",
+ .do_2 = xor_32regs_2,
+ .do_3 = xor_32regs_3,
+ .do_4 = xor_32regs_4,
+ .do_5 = xor_32regs_5,
+};
+
+static struct xor_block_template xor_block_8regs_p __maybe_unused = {
+ .name = "8regs_prefetch",
+ .do_2 = xor_8regs_p_2,
+ .do_3 = xor_8regs_p_3,
+ .do_4 = xor_8regs_p_4,
+ .do_5 = xor_8regs_p_5,
+};
+
+static struct xor_block_template xor_block_32regs_p __maybe_unused = {
+ .name = "32regs_prefetch",
+ .do_2 = xor_32regs_p_2,
+ .do_3 = xor_32regs_p_3,
+ .do_4 = xor_32regs_p_4,
+ .do_5 = xor_32regs_p_5,
+};
+
+#define XOR_TRY_TEMPLATES \
+ do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_8regs_p); \
+ xor_speed(&xor_block_32regs); \
+ xor_speed(&xor_block_32regs_p); \
+ } while (0)