summaryrefslogtreecommitdiffstats
path: root/arch/ia64/include
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /arch/ia64/include
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/ia64/include')
-rw-r--r--arch/ia64/include/asm/Kbuild5
-rw-r--r--arch/ia64/include/asm/acenv.h49
-rw-r--r--arch/ia64/include/asm/acpi-ext.h17
-rw-r--r--arch/ia64/include/asm/acpi.h110
-rw-r--r--arch/ia64/include/asm/agp.h27
-rw-r--r--arch/ia64/include/asm/asm-offsets.h1
-rw-r--r--arch/ia64/include/asm/asm-prototypes.h30
-rw-r--r--arch/ia64/include/asm/asmmacro.h136
-rw-r--r--arch/ia64/include/asm/atomic.h223
-rw-r--r--arch/ia64/include/asm/barrier.h79
-rw-r--r--arch/ia64/include/asm/bitops.h456
-rw-r--r--arch/ia64/include/asm/bug.h19
-rw-r--r--arch/ia64/include/asm/cache.h30
-rw-r--r--arch/ia64/include/asm/cacheflush.h33
-rw-r--r--arch/ia64/include/asm/checksum.h63
-rw-r--r--arch/ia64/include/asm/clocksource.h11
-rw-r--r--arch/ia64/include/asm/cpu.h23
-rw-r--r--arch/ia64/include/asm/cputime.h21
-rw-r--r--arch/ia64/include/asm/current.h18
-rw-r--r--arch/ia64/include/asm/cyclone.h16
-rw-r--r--arch/ia64/include/asm/delay.h89
-rw-r--r--arch/ia64/include/asm/device.h14
-rw-r--r--arch/ia64/include/asm/div64.h1
-rw-r--r--arch/ia64/include/asm/dma-mapping.h16
-rw-r--r--arch/ia64/include/asm/dma.h19
-rw-r--r--arch/ia64/include/asm/dmi.h15
-rw-r--r--arch/ia64/include/asm/early_ioremap.h11
-rw-r--r--arch/ia64/include/asm/elf.h233
-rw-r--r--arch/ia64/include/asm/emergency-restart.h6
-rw-r--r--arch/ia64/include/asm/esi.h30
-rw-r--r--arch/ia64/include/asm/exception.h23
-rw-r--r--arch/ia64/include/asm/export.h3
-rw-r--r--arch/ia64/include/asm/extable.h12
-rw-r--r--arch/ia64/include/asm/fb.h24
-rw-r--r--arch/ia64/include/asm/fpswa.h74
-rw-r--r--arch/ia64/include/asm/ftrace.h28
-rw-r--r--arch/ia64/include/asm/futex.h109
-rw-r--r--arch/ia64/include/asm/gcc_intrin.h13
-rw-r--r--arch/ia64/include/asm/hardirq.h27
-rw-r--r--arch/ia64/include/asm/hugetlb.h33
-rw-r--r--arch/ia64/include/asm/hw_irq.h168
-rw-r--r--arch/ia64/include/asm/idle.h8
-rw-r--r--arch/ia64/include/asm/intrinsics.h13
-rw-r--r--arch/ia64/include/asm/io.h287
-rw-r--r--arch/ia64/include/asm/iommu.h22
-rw-r--r--arch/ia64/include/asm/iommu_table.h7
-rw-r--r--arch/ia64/include/asm/iosapic.h106
-rw-r--r--arch/ia64/include/asm/irq.h35
-rw-r--r--arch/ia64/include/asm/irq_regs.h1
-rw-r--r--arch/ia64/include/asm/irq_remapping.h5
-rw-r--r--arch/ia64/include/asm/irqflags.h95
-rw-r--r--arch/ia64/include/asm/kdebug.h45
-rw-r--r--arch/ia64/include/asm/kexec.h46
-rw-r--r--arch/ia64/include/asm/kmap_types.h13
-rw-r--r--arch/ia64/include/asm/kprobes.h118
-rw-r--r--arch/ia64/include/asm/kregs.h166
-rw-r--r--arch/ia64/include/asm/libata-portmap.h9
-rw-r--r--arch/ia64/include/asm/linkage.h19
-rw-r--r--arch/ia64/include/asm/local.h1
-rw-r--r--arch/ia64/include/asm/mca.h188
-rw-r--r--arch/ia64/include/asm/mca_asm.h245
-rw-r--r--arch/ia64/include/asm/meminit.h74
-rw-r--r--arch/ia64/include/asm/mman.h18
-rw-r--r--arch/ia64/include/asm/mmiowb.h17
-rw-r--r--arch/ia64/include/asm/mmu.h14
-rw-r--r--arch/ia64/include/asm/mmu_context.h200
-rw-r--r--arch/ia64/include/asm/mmzone.h35
-rw-r--r--arch/ia64/include/asm/module.h35
-rw-r--r--arch/ia64/include/asm/module.lds.h14
-rw-r--r--arch/ia64/include/asm/msidef.h43
-rw-r--r--arch/ia64/include/asm/native/inst.h119
-rw-r--r--arch/ia64/include/asm/native/irq.h20
-rw-r--r--arch/ia64/include/asm/native/patchlist.h24
-rw-r--r--arch/ia64/include/asm/nodedata.h63
-rw-r--r--arch/ia64/include/asm/numa.h83
-rw-r--r--arch/ia64/include/asm/page.h235
-rw-r--r--arch/ia64/include/asm/pal.h1826
-rw-r--r--arch/ia64/include/asm/param.h18
-rw-r--r--arch/ia64/include/asm/parport.h20
-rw-r--r--arch/ia64/include/asm/patch.h28
-rw-r--r--arch/ia64/include/asm/pci.h72
-rw-r--r--arch/ia64/include/asm/percpu.h53
-rw-r--r--arch/ia64/include/asm/perfmon.h111
-rw-r--r--arch/ia64/include/asm/pgalloc.h65
-rw-r--r--arch/ia64/include/asm/pgtable.h566
-rw-r--r--arch/ia64/include/asm/processor.h674
-rw-r--r--arch/ia64/include/asm/ptrace.h145
-rw-r--r--arch/ia64/include/asm/sal.h919
-rw-r--r--arch/ia64/include/asm/sections.h51
-rw-r--r--arch/ia64/include/asm/serial.h17
-rw-r--r--arch/ia64/include/asm/shmparam.h13
-rw-r--r--arch/ia64/include/asm/signal.h33
-rw-r--r--arch/ia64/include/asm/smp.h103
-rw-r--r--arch/ia64/include/asm/sn/intr.h15
-rw-r--r--arch/ia64/include/asm/sn/sn_sal.h124
-rw-r--r--arch/ia64/include/asm/sparsemem.h27
-rw-r--r--arch/ia64/include/asm/spinlock.h276
-rw-r--r--arch/ia64/include/asm/spinlock_types.h22
-rw-r--r--arch/ia64/include/asm/string.h22
-rw-r--r--arch/ia64/include/asm/switch_to.h71
-rw-r--r--arch/ia64/include/asm/syscall.h78
-rw-r--r--arch/ia64/include/asm/termios.h58
-rw-r--r--arch/ia64/include/asm/thread_info.h133
-rw-r--r--arch/ia64/include/asm/timex.h47
-rw-r--r--arch/ia64/include/asm/tlb.h50
-rw-r--r--arch/ia64/include/asm/tlbflush.h128
-rw-r--r--arch/ia64/include/asm/topology.h56
-rw-r--r--arch/ia64/include/asm/types.h32
-rw-r--r--arch/ia64/include/asm/uaccess.h293
-rw-r--r--arch/ia64/include/asm/unaligned.h12
-rw-r--r--arch/ia64/include/asm/uncached.h9
-rw-r--r--arch/ia64/include/asm/unistd.h38
-rw-r--r--arch/ia64/include/asm/unwind.h234
-rw-r--r--arch/ia64/include/asm/user.h59
-rw-r--r--arch/ia64/include/asm/ustack.h12
-rw-r--r--arch/ia64/include/asm/uv/uv.h30
-rw-r--r--arch/ia64/include/asm/uv/uv_hub.h315
-rw-r--r--arch/ia64/include/asm/uv/uv_mmrs.h825
-rw-r--r--arch/ia64/include/asm/vermagic.h15
-rw-r--r--arch/ia64/include/asm/vga.h26
-rw-r--r--arch/ia64/include/asm/vmalloc.h4
-rw-r--r--arch/ia64/include/asm/xor.h23
-rw-r--r--arch/ia64/include/asm/xtp.h46
-rw-r--r--arch/ia64/include/uapi/asm/Kbuild2
-rw-r--r--arch/ia64/include/uapi/asm/auxvec.h14
-rw-r--r--arch/ia64/include/uapi/asm/bitsperlong.h9
-rw-r--r--arch/ia64/include/uapi/asm/break.h23
-rw-r--r--arch/ia64/include/uapi/asm/byteorder.h7
-rw-r--r--arch/ia64/include/uapi/asm/cmpxchg.h155
-rw-r--r--arch/ia64/include/uapi/asm/fcntl.h15
-rw-r--r--arch/ia64/include/uapi/asm/fpu.h67
-rw-r--r--arch/ia64/include/uapi/asm/gcc_intrin.h619
-rw-r--r--arch/ia64/include/uapi/asm/ia64regs.h101
-rw-r--r--arch/ia64/include/uapi/asm/intel_intrin.h162
-rw-r--r--arch/ia64/include/uapi/asm/intrinsics.h86
-rw-r--r--arch/ia64/include/uapi/asm/mman.h17
-rw-r--r--arch/ia64/include/uapi/asm/param.h30
-rw-r--r--arch/ia64/include/uapi/asm/perfmon.h178
-rw-r--r--arch/ia64/include/uapi/asm/perfmon_default_smpl.h84
-rw-r--r--arch/ia64/include/uapi/asm/posix_types.h9
-rw-r--r--arch/ia64/include/uapi/asm/ptrace.h248
-rw-r--r--arch/ia64/include/uapi/asm/ptrace_offsets.h269
-rw-r--r--arch/ia64/include/uapi/asm/resource.h8
-rw-r--r--arch/ia64/include/uapi/asm/rse.h67
-rw-r--r--arch/ia64/include/uapi/asm/setup.h25
-rw-r--r--arch/ia64/include/uapi/asm/sigcontext.h71
-rw-r--r--arch/ia64/include/uapi/asm/siginfo.h28
-rw-r--r--arch/ia64/include/uapi/asm/signal.h122
-rw-r--r--arch/ia64/include/uapi/asm/stat.h52
-rw-r--r--arch/ia64/include/uapi/asm/statfs.h21
-rw-r--r--arch/ia64/include/uapi/asm/swab.h35
-rw-r--r--arch/ia64/include/uapi/asm/termbits.h209
-rw-r--r--arch/ia64/include/uapi/asm/termios.h51
-rw-r--r--arch/ia64/include/uapi/asm/types.h32
-rw-r--r--arch/ia64/include/uapi/asm/ucontext.h13
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h22
-rw-r--r--arch/ia64/include/uapi/asm/ustack.h13
157 files changed, 15396 insertions, 0 deletions
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
new file mode 100644
index 000000000..f994c1daf
--- /dev/null
+++ b/arch/ia64/include/asm/Kbuild
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+generated-y += syscall_table.h
+generic-y += kvm_para.h
+generic-y += mcs_spinlock.h
+generic-y += vtime.h
diff --git a/arch/ia64/include/asm/acenv.h b/arch/ia64/include/asm/acenv.h
new file mode 100644
index 000000000..9d673cd4c
--- /dev/null
+++ b/arch/ia64/include/asm/acenv.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IA64 specific ACPICA environments and implementation
+ *
+ * Copyright (C) 2014, Intel Corporation
+ * Author: Lv Zheng <lv.zheng@intel.com>
+ */
+
+#ifndef _ASM_IA64_ACENV_H
+#define _ASM_IA64_ACENV_H
+
+#include <asm/intrinsics.h>
+
+#define COMPILER_DEPENDENT_INT64 long
+#define COMPILER_DEPENDENT_UINT64 unsigned long
+
+/* Asm macros */
+
+static inline int
+ia64_acpi_acquire_global_lock(unsigned int *lock)
+{
+ unsigned int old, new, val;
+ do {
+ old = *lock;
+ new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
+ val = ia64_cmpxchg4_acq(lock, new, old);
+ } while (unlikely (val != old));
+ return (new < 3) ? -1 : 0;
+}
+
+static inline int
+ia64_acpi_release_global_lock(unsigned int *lock)
+{
+ unsigned int old, new, val;
+ do {
+ old = *lock;
+ new = old & ~0x3;
+ val = ia64_cmpxchg4_acq(lock, new, old);
+ } while (unlikely (val != old));
+ return old & 0x1;
+}
+
+#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
+ ((Acq) = ia64_acpi_acquire_global_lock(&facs->global_lock))
+
+#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
+ ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
+
+#endif /* _ASM_IA64_ACENV_H */
diff --git a/arch/ia64/include/asm/acpi-ext.h b/arch/ia64/include/asm/acpi-ext.h
new file mode 100644
index 000000000..eaa57583d
--- /dev/null
+++ b/arch/ia64/include/asm/acpi-ext.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (c) Copyright 2003, 2006 Hewlett-Packard Development Company, L.P.
+ * Alex Williamson <alex.williamson@hp.com>
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * Vendor specific extensions to ACPI.
+ */
+
+#ifndef _ASM_IA64_ACPI_EXT_H
+#define _ASM_IA64_ACPI_EXT_H
+
+#include <linux/types.h>
+
+extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length);
+
+#endif /* _ASM_IA64_ACPI_EXT_H */
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
new file mode 100644
index 000000000..87927eb82
--- /dev/null
+++ b/arch/ia64/include/asm/acpi.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
+ * Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ */
+
+#ifndef _ASM_ACPI_H
+#define _ASM_ACPI_H
+
+#ifdef __KERNEL__
+
+#include <acpi/pdc_intel.h>
+
+#include <linux/init.h>
+#include <linux/numa.h>
+#include <asm/numa.h>
+
+
+extern int acpi_lapic;
+#define acpi_disabled 0 /* ACPI always enabled on IA64 */
+#define acpi_noirq 0 /* ACPI always enabled on IA64 */
+#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
+#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
+
+static inline bool acpi_has_cpu_in_madt(void)
+{
+ return !!acpi_lapic;
+}
+
+#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
+static inline void disable_acpi(void) { }
+
+int acpi_request_vector (u32 int_type);
+int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
+
+/* Low-level suspend routine. */
+extern int acpi_suspend_lowlevel(void);
+
+static inline unsigned long acpi_get_wakeup_address(void)
+{
+ return 0;
+}
+
+/*
+ * Record the cpei override flag and current logical cpu. This is
+ * useful for CPU removal.
+ */
+extern unsigned int can_cpei_retarget(void);
+extern unsigned int is_cpu_cpei_target(unsigned int cpu);
+extern void set_cpei_target_cpu(unsigned int cpu);
+extern unsigned int get_cpei_target_cpu(void);
+extern void prefill_possible_map(void);
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+extern int additional_cpus;
+#else
+#define additional_cpus 0
+#endif
+
+#ifdef CONFIG_ACPI_NUMA
+#if MAX_NUMNODES > 256
+#define MAX_PXM_DOMAINS MAX_NUMNODES
+#else
+#define MAX_PXM_DOMAINS (256)
+#endif
+extern int pxm_to_nid_map[MAX_PXM_DOMAINS];
+extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
+#endif
+
+static inline bool arch_has_acpi_pdc(void) { return true; }
+static inline void arch_acpi_set_pdc_bits(u32 *buf)
+{
+ buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
+}
+
+#ifdef CONFIG_ACPI_NUMA
+extern cpumask_t early_cpu_possible_map;
+#define for_each_possible_early_cpu(cpu) \
+ for_each_cpu((cpu), &early_cpu_possible_map)
+
+static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
+{
+ int low_cpu, high_cpu;
+ int cpu;
+ int next_nid = 0;
+
+ low_cpu = cpumask_weight(&early_cpu_possible_map);
+
+ high_cpu = max(low_cpu, min_cpus);
+ high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
+
+ for (cpu = low_cpu; cpu < high_cpu; cpu++) {
+ cpumask_set_cpu(cpu, &early_cpu_possible_map);
+ if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
+ node_cpuid[cpu].nid = next_nid;
+ next_nid++;
+ if (next_nid >= num_online_nodes())
+ next_nid = 0;
+ }
+ }
+}
+
+extern void acpi_numa_fixup(void);
+
+#endif /* CONFIG_ACPI_NUMA */
+
+#endif /*__KERNEL__*/
+
+#endif /*_ASM_ACPI_H*/
diff --git a/arch/ia64/include/asm/agp.h b/arch/ia64/include/asm/agp.h
new file mode 100644
index 000000000..0261507dc
--- /dev/null
+++ b/arch/ia64/include/asm/agp.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_AGP_H
+#define _ASM_IA64_AGP_H
+
+/*
+ * IA-64 specific AGP definitions.
+ *
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/*
+ * To avoid memory-attribute aliasing issues, we require that the AGPGART engine operate
+ * in coherent mode, which lets us map the AGP memory as normal (write-back) memory
+ * (unlike x86, where it gets mapped "write-coalescing").
+ */
+#define map_page_into_agp(page) do { } while (0)
+#define unmap_page_from_agp(page) do { } while (0)
+#define flush_agp_cache() mb()
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
+#endif /* _ASM_IA64_AGP_H */
diff --git a/arch/ia64/include/asm/asm-offsets.h b/arch/ia64/include/asm/asm-offsets.h
new file mode 100644
index 000000000..d370ee36a
--- /dev/null
+++ b/arch/ia64/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/ia64/include/asm/asm-prototypes.h b/arch/ia64/include/asm/asm-prototypes.h
new file mode 100644
index 000000000..a96689447
--- /dev/null
+++ b/arch/ia64/include/asm/asm-prototypes.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_ASM_PROTOTYPES_H
+#define _ASM_IA64_ASM_PROTOTYPES_H
+
+#include <asm/cacheflush.h>
+#include <asm/checksum.h>
+#include <asm/esi.h>
+#include <asm/ftrace.h>
+#include <asm/page.h>
+#include <asm/pal.h>
+#include <asm/string.h>
+#include <linux/uaccess.h>
+#include <asm/unwind.h>
+#include <asm/xor.h>
+
+extern const char ia64_ivt[];
+
+signed int __divsi3(signed int, unsigned int);
+signed int __modsi3(signed int, unsigned int);
+
+signed long long __divdi3(signed long long, unsigned long long);
+signed long long __moddi3(signed long long, unsigned long long);
+
+unsigned int __udivsi3(unsigned int, unsigned int);
+unsigned int __umodsi3(unsigned int, unsigned int);
+
+unsigned long long __udivdi3(unsigned long long, unsigned long long);
+unsigned long long __umoddi3(unsigned long long, unsigned long long);
+
+#endif /* _ASM_IA64_ASM_PROTOTYPES_H */
diff --git a/arch/ia64/include/asm/asmmacro.h b/arch/ia64/include/asm/asmmacro.h
new file mode 100644
index 000000000..52619c517
--- /dev/null
+++ b/arch/ia64/include/asm/asmmacro.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_ASMMACRO_H
+#define _ASM_IA64_ASMMACRO_H
+
+/*
+ * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#define ENTRY(name) \
+ .align 32; \
+ .proc name; \
+name:
+
+#define ENTRY_MIN_ALIGN(name) \
+ .align 16; \
+ .proc name; \
+name:
+
+#define GLOBAL_ENTRY(name) \
+ .global name; \
+ ENTRY(name)
+
+#define END(name) \
+ .endp name
+
+/*
+ * Helper macros to make unwind directives more readable:
+ */
+
+/* prologue_gr: */
+#define ASM_UNW_PRLG_RP 0x8
+#define ASM_UNW_PRLG_PFS 0x4
+#define ASM_UNW_PRLG_PSP 0x2
+#define ASM_UNW_PRLG_PR 0x1
+#define ASM_UNW_PRLG_GRSAVE(ninputs) (32+(ninputs))
+
+/*
+ * Helper macros for accessing user memory.
+ *
+ * When adding any new .section/.previous entries here, make sure to
+ * also add it to the DISCARD section in arch/ia64/kernel/gate.lds.S or
+ * unpleasant things will happen.
+ */
+
+ .section "__ex_table", "a" // declare section & section attributes
+ .previous
+
+# define EX(y,x...) \
+ .xdata4 "__ex_table", 99f-., y-.; \
+ [99:] x
+# define EXCLR(y,x...) \
+ .xdata4 "__ex_table", 99f-., y-.+4; \
+ [99:] x
+
+/*
+ * Tag MCA recoverable instruction ranges.
+ */
+
+ .section "__mca_table", "a" // declare section & section attributes
+ .previous
+
+# define MCA_RECOVER_RANGE(y) \
+ .xdata4 "__mca_table", y-., 99f-.; \
+ [99:]
+
+/*
+ * Mark instructions that need a load of a virtual address patched to be
+ * a load of a physical address. We use this either in critical performance
+ * path (ivt.S - TLB miss processing) or in places where it might not be
+ * safe to use a "tpa" instruction (mca_asm.S - error recovery).
+ */
+ .section ".data..patch.vtop", "a" // declare section & section attributes
+ .previous
+
+#define LOAD_PHYSICAL(pr, reg, obj) \
+[1:](pr)movl reg = obj; \
+ .xdata4 ".data..patch.vtop", 1b-.
+
+/*
+ * For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,
+ * we'll patch out the work-around bundles with NOPs, so their impact is minimal.
+ */
+#define DO_MCKINLEY_E9_WORKAROUND
+
+#ifdef DO_MCKINLEY_E9_WORKAROUND
+ .section ".data..patch.mckinley_e9", "a"
+ .previous
+/* workaround for Itanium 2 Errata 9: */
+# define FSYS_RETURN \
+ .xdata4 ".data..patch.mckinley_e9", 1f-.; \
+1:{ .mib; \
+ nop.m 0; \
+ mov r16=ar.pfs; \
+ br.call.sptk.many b7=2f;; \
+ }; \
+2:{ .mib; \
+ nop.m 0; \
+ mov ar.pfs=r16; \
+ br.ret.sptk.many b6;; \
+ }
+#else
+# define FSYS_RETURN br.ret.sptk.many b6
+#endif
+
+/*
+ * If physical stack register size is different from DEF_NUM_STACK_REG,
+ * dynamically patch the kernel for correct size.
+ */
+ .section ".data..patch.phys_stack_reg", "a"
+ .previous
+#define LOAD_PHYS_STACK_REG_SIZE(reg) \
+[1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \
+ .xdata4 ".data..patch.phys_stack_reg", 1b-.
+
+/*
+ * Up until early 2004, use of .align within a function caused bad unwind info.
+ * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
+ * otherwise.
+ */
+#ifdef HAVE_WORKING_TEXT_ALIGN
+# define TEXT_ALIGN(n) .align n
+#else
+# define TEXT_ALIGN(n)
+#endif
+
+#ifdef HAVE_SERIALIZE_DIRECTIVE
+# define dv_serialize_data .serialize.data
+# define dv_serialize_instruction .serialize.instruction
+#else
+# define dv_serialize_data
+# define dv_serialize_instruction
+#endif
+
+#endif /* _ASM_IA64_ASMMACRO_H */
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
new file mode 100644
index 000000000..f267d9564
--- /dev/null
+++ b/arch/ia64/include/asm/atomic.h
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_ATOMIC_H
+#define _ASM_IA64_ATOMIC_H
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ *
+ * NOTE: don't mess with the types below! The "unsigned long" and
+ * "int" types were carefully placed so as to ensure proper operation
+ * of the macros.
+ *
+ * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#include <linux/types.h>
+
+#include <asm/intrinsics.h>
+#include <asm/barrier.h>
+
+
+#define ATOMIC64_INIT(i) { (i) }
+
+#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic64_read(v) READ_ONCE((v)->counter)
+
+#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
+#define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
+
+#define ATOMIC_OP(op, c_op) \
+static __inline__ int \
+ia64_atomic_##op (int i, atomic_t *v) \
+{ \
+ __s32 old, new; \
+ CMPXCHG_BUGCHECK_DECL \
+ \
+ do { \
+ CMPXCHG_BUGCHECK(v); \
+ old = atomic_read(v); \
+ new = old c_op i; \
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
+ return new; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op) \
+static __inline__ int \
+ia64_atomic_fetch_##op (int i, atomic_t *v) \
+{ \
+ __s32 old, new; \
+ CMPXCHG_BUGCHECK_DECL \
+ \
+ do { \
+ CMPXCHG_BUGCHECK(v); \
+ old = atomic_read(v); \
+ new = old c_op i; \
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
+ return old; \
+}
+
+#define ATOMIC_OPS(op, c_op) \
+ ATOMIC_OP(op, c_op) \
+ ATOMIC_FETCH_OP(op, c_op)
+
+ATOMIC_OPS(add, +)
+ATOMIC_OPS(sub, -)
+
+#ifdef __OPTIMIZE__
+#define __ia64_atomic_const(i) \
+ static const int __ia64_atomic_p = __builtin_constant_p(i) ? \
+ ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \
+ (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
+ __ia64_atomic_p
+#else
+#define __ia64_atomic_const(i) 0
+#endif
+
+#define atomic_add_return(i,v) \
+({ \
+ int __ia64_aar_i = (i); \
+ __ia64_atomic_const(i) \
+ ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
+ : ia64_atomic_add(__ia64_aar_i, v); \
+})
+
+#define atomic_sub_return(i,v) \
+({ \
+ int __ia64_asr_i = (i); \
+ __ia64_atomic_const(i) \
+ ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
+ : ia64_atomic_sub(__ia64_asr_i, v); \
+})
+
+#define atomic_fetch_add(i,v) \
+({ \
+ int __ia64_aar_i = (i); \
+ __ia64_atomic_const(i) \
+ ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
+ : ia64_atomic_fetch_add(__ia64_aar_i, v); \
+})
+
+#define atomic_fetch_sub(i,v) \
+({ \
+ int __ia64_asr_i = (i); \
+ __ia64_atomic_const(i) \
+ ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
+ : ia64_atomic_fetch_sub(__ia64_asr_i, v); \
+})
+
+ATOMIC_FETCH_OP(and, &)
+ATOMIC_FETCH_OP(or, |)
+ATOMIC_FETCH_OP(xor, ^)
+
+#define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
+#define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
+#define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
+
+#define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
+#define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
+#define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP
+
+#define ATOMIC64_OP(op, c_op) \
+static __inline__ s64 \
+ia64_atomic64_##op (s64 i, atomic64_t *v) \
+{ \
+ s64 old, new; \
+ CMPXCHG_BUGCHECK_DECL \
+ \
+ do { \
+ CMPXCHG_BUGCHECK(v); \
+ old = atomic64_read(v); \
+ new = old c_op i; \
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
+ return new; \
+}
+
+#define ATOMIC64_FETCH_OP(op, c_op) \
+static __inline__ s64 \
+ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
+{ \
+ s64 old, new; \
+ CMPXCHG_BUGCHECK_DECL \
+ \
+ do { \
+ CMPXCHG_BUGCHECK(v); \
+ old = atomic64_read(v); \
+ new = old c_op i; \
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
+ return old; \
+}
+
+#define ATOMIC64_OPS(op, c_op) \
+ ATOMIC64_OP(op, c_op) \
+ ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(add, +)
+ATOMIC64_OPS(sub, -)
+
+#define atomic64_add_return(i,v) \
+({ \
+ s64 __ia64_aar_i = (i); \
+ __ia64_atomic_const(i) \
+ ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
+ : ia64_atomic64_add(__ia64_aar_i, v); \
+})
+
+#define atomic64_sub_return(i,v) \
+({ \
+ s64 __ia64_asr_i = (i); \
+ __ia64_atomic_const(i) \
+ ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
+ : ia64_atomic64_sub(__ia64_asr_i, v); \
+})
+
+#define atomic64_fetch_add(i,v) \
+({ \
+ s64 __ia64_aar_i = (i); \
+ __ia64_atomic_const(i) \
+ ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
+ : ia64_atomic64_fetch_add(__ia64_aar_i, v); \
+})
+
+#define atomic64_fetch_sub(i,v) \
+({ \
+ s64 __ia64_asr_i = (i); \
+ __ia64_atomic_const(i) \
+ ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
+ : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
+})
+
+ATOMIC64_FETCH_OP(and, &)
+ATOMIC64_FETCH_OP(or, |)
+ATOMIC64_FETCH_OP(xor, ^)
+
+#define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
+#define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
+#define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
+
+#define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
+#define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
+#define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP
+
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#define atomic64_cmpxchg(v, old, new) \
+ (cmpxchg(&((v)->counter), old, new))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+#define atomic_add(i,v) (void)atomic_add_return((i), (v))
+#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
+
+#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
+#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
+
+#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
new file mode 100644
index 000000000..751cdd353
--- /dev/null
+++ b/arch/ia64/include/asm/barrier.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Memory barrier definitions. This is based on information published
+ * in the Processor Abstraction Layer and the System Abstraction Layer
+ * manual.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+#ifndef _ASM_IA64_BARRIER_H
+#define _ASM_IA64_BARRIER_H
+
+#include <linux/compiler.h>
+
+/*
+ * Macros to force memory ordering. In these descriptions, "previous"
+ * and "subsequent" refer to program order; "visible" means that all
+ * architecturally visible effects of a memory access have occurred
+ * (at a minimum, this means the memory has been read or written).
+ *
+ * wmb(): Guarantees that all preceding stores to memory-
+ * like regions are visible before any subsequent
+ * stores and that all following stores will be
+ * visible only after all previous stores.
+ * rmb(): Like wmb(), but for reads.
+ * mb(): wmb()/rmb() combo, i.e., all previous memory
+ * accesses are visible before all subsequent
+ * accesses and vice versa. This is also known as
+ * a "fence."
+ *
+ * Note: "mb()" and its variants cannot be used as a fence to order
+ * accesses to memory mapped I/O registers. For that, mf.a needs to
+ * be used. However, we don't want to always use mf.a because (a)
+ * it's (presumably) much slower than mf and (b) mf.a is supported for
+ * sequential memory pages only.
+ */
+#define mb() ia64_mf()
+#define rmb() mb()
+#define wmb() mb()
+
+#define dma_rmb() mb()
+#define dma_wmb() mb()
+
+# define __smp_mb() mb()
+
+#define __smp_mb__before_atomic() barrier()
+#define __smp_mb__after_atomic() barrier()
+
+/*
+ * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
+ * need for asm trickery!
+ */
+
+#define __smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+
+#define __smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ___p1; \
+})
+
+/*
+ * The group barrier in front of the rsm & ssm are necessary to ensure
+ * that none of the previous instructions in the same group are
+ * affected by the rsm/ssm.
+ */
+
+#include <asm-generic/barrier.h>
+
+#endif /* _ASM_IA64_BARRIER_H */
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
new file mode 100644
index 000000000..2f24ee645
--- /dev/null
+++ b/arch/ia64/include/asm/bitops.h
@@ -0,0 +1,456 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_BITOPS_H
+#define _ASM_IA64_BITOPS_H
+
+/*
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
+ * O(1) scheduler patch
+ */
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/intrinsics.h>
+#include <asm/barrier.h>
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered. See __set_bit()
+ * if you do not require the atomic guarantees.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ *
+ * The address must be (at least) "long" aligned.
+ * Note that there are driver (e.g., eepro100) which use these operations to
+ * operate on hw-defined data-structures, so we can't easily change these
+ * operations to force a bigger alignment.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+static __inline__ void
+set_bit (int nr, volatile void *addr)
+{
+ __u32 bit, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ bit = 1 << (nr & 31);
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old | bit;
+ } while (cmpxchg_acq(m, old, new) != old);
+}
+
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __inline__ void
+__set_bit (int nr, volatile void *addr)
+{
+ *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered. However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
+ * in order to ensure changes are visible on other processors.
+ */
+static __inline__ void
+clear_bit (int nr, volatile void *addr)
+{
+ __u32 mask, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ mask = ~(1 << (nr & 31));
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old & mask;
+ } while (cmpxchg_acq(m, old, new) != old);
+}
+
+/**
+ * clear_bit_unlock - Clears a bit in memory with release
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit_unlock() is atomic and may not be reordered. It does
+ * contain a memory barrier suitable for unlock type operations.
+ */
+static __inline__ void
+clear_bit_unlock (int nr, volatile void *addr)
+{
+ __u32 mask, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ mask = ~(1 << (nr & 31));
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old & mask;
+ } while (cmpxchg_rel(m, old, new) != old);
+}
+
+/**
+ * __clear_bit_unlock - Non-atomically clears a bit in memory with release
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * Similarly to clear_bit_unlock, the implementation uses a store
+ * with release semantics. See also arch_spin_unlock().
+ */
+static __inline__ void
+__clear_bit_unlock(int nr, void *addr)
+{
+ __u32 * const m = (__u32 *) addr + (nr >> 5);
+ __u32 const new = *m & ~(1 << (nr & 31));
+
+ ia64_st4_rel_nta(m, new);
+}
+
+/**
+ * __clear_bit - Clears a bit in memory (non-atomic version)
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * Unlike clear_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __inline__ void
+__clear_bit (int nr, volatile void *addr)
+{
+ *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to toggle
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static __inline__ void
+change_bit (int nr, volatile void *addr)
+{
+ __u32 bit, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ bit = (1 << (nr & 31));
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old ^ bit;
+ } while (cmpxchg_acq(m, old, new) != old);
+}
+
+/**
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to toggle
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __inline__ void
+__change_bit (int nr, volatile void *addr)
+{
+ *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies the acquisition side of the memory barrier.
+ */
+static __inline__ int
+test_and_set_bit (int nr, volatile void *addr)
+{
+ __u32 bit, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ bit = 1 << (nr & 31);
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old | bit;
+ } while (cmpxchg_acq(m, old, new) != old);
+ return (old & bit) != 0;
+}
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This is the same as test_and_set_bit on ia64
+ */
+#define test_and_set_bit_lock test_and_set_bit
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __inline__ int
+__test_and_set_bit (int nr, volatile void *addr)
+{
+ __u32 *p = (__u32 *) addr + (nr >> 5);
+ __u32 m = 1 << (nr & 31);
+ int oldbitset = (*p & m) != 0;
+
+ *p |= m;
+ return oldbitset;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies the acquisition side of the memory barrier.
+ */
+static __inline__ int
+test_and_clear_bit (int nr, volatile void *addr)
+{
+ __u32 mask, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ mask = ~(1 << (nr & 31));
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old & mask;
+ } while (cmpxchg_acq(m, old, new) != old);
+ return (old & ~mask) != 0;
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __inline__ int
+__test_and_clear_bit(int nr, volatile void * addr)
+{
+ __u32 *p = (__u32 *) addr + (nr >> 5);
+ __u32 m = 1 << (nr & 31);
+ int oldbitset = (*p & m) != 0;
+
+ *p &= ~m;
+ return oldbitset;
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies the acquisition side of the memory barrier.
+ */
+static __inline__ int
+test_and_change_bit (int nr, volatile void *addr)
+{
+ __u32 bit, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ bit = (1 << (nr & 31));
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old ^ bit;
+ } while (cmpxchg_acq(m, old, new) != old);
+ return (old & bit) != 0;
+}
+
+/**
+ * __test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ */
+static __inline__ int
+__test_and_change_bit (int nr, void *addr)
+{
+ __u32 old, bit = (1 << (nr & 31));
+ __u32 *m = (__u32 *) addr + (nr >> 5);
+
+ old = *m;
+ *m = old ^ bit;
+ return (old & bit) != 0;
+}
+
+static __inline__ int
+test_bit (int nr, const volatile void *addr)
+{
+ return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
+}
+
+/**
+ * ffz - find the first zero bit in a long word
+ * @x: The long word to find the bit in
+ *
+ * Returns the bit-number (0..63) of the first (least significant) zero bit.
+ * Undefined if no zero exists, so code should check against ~0UL first...
+ */
+static inline unsigned long
+ffz (unsigned long x)
+{
+ unsigned long result;
+
+ result = ia64_popcnt(x & (~x - 1));
+ return result;
+}
+
+/**
+ * __ffs - find first bit in word.
+ * @x: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __inline__ unsigned long
+__ffs (unsigned long x)
+{
+ unsigned long result;
+
+ result = ia64_popcnt((x-1) & ~x);
+ return result;
+}
+
+#ifdef __KERNEL__
+
+/*
+ * Return bit number of last (most-significant) bit set. Undefined
+ * for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
+ */
+static inline unsigned long
+ia64_fls (unsigned long x)
+{
+ long double d = x;
+ long exp;
+
+ exp = ia64_getf_exp(d);
+ return exp - 0xffff;
+}
+
+/*
+ * Find the last (most significant) bit set. Returns 0 for x==0 and
+ * bits are numbered from 1..32 (e.g., fls(9) == 4).
+ */
+static inline int fls(unsigned int t)
+{
+ unsigned long x = t & 0xffffffffu;
+
+ if (!x)
+ return 0;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ return ia64_popcnt(x);
+}
+
+/*
+ * Find the last (most significant) bit set. Undefined for x==0.
+ * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
+ */
+static inline unsigned long
+__fls (unsigned long x)
+{
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ x |= x >> 32;
+ return ia64_popcnt(x) - 1;
+}
+
+#include <asm-generic/bitops/fls64.h>
+
+#include <asm-generic/bitops/builtin-ffs.h>
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+static __inline__ unsigned long __arch_hweight64(unsigned long x)
+{
+ unsigned long result;
+ result = ia64_popcnt(x);
+ return result;
+}
+
+#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
+#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
+#define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful))
+
+#include <asm-generic/bitops/const_hweight.h>
+
+#endif /* __KERNEL__ */
+
+#include <asm-generic/bitops/find.h>
+
+#ifdef __KERNEL__
+
+#include <asm-generic/bitops/le.h>
+
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#include <asm-generic/bitops/sched.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_IA64_BITOPS_H */
diff --git a/arch/ia64/include/asm/bug.h b/arch/ia64/include/asm/bug.h
new file mode 100644
index 000000000..66b37a532
--- /dev/null
+++ b/arch/ia64/include/asm/bug.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_BUG_H
+#define _ASM_IA64_BUG_H
+
+#ifdef CONFIG_BUG
+#define ia64_abort() __builtin_trap()
+#define BUG() do { \
+ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+ barrier_before_unreachable(); \
+ ia64_abort(); \
+} while (0)
+
+/* should this BUG be made generic? */
+#define HAVE_ARCH_BUG
+#endif
+
+#include <asm-generic/bug.h>
+
+#endif
diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
new file mode 100644
index 000000000..2f1c70647
--- /dev/null
+++ b/arch/ia64/include/asm/cache.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_CACHE_H
+#define _ASM_IA64_CACHE_H
+
+
+/*
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/* Bytes per L1 (data) cache line. */
+#define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#ifdef CONFIG_SMP
+# define SMP_CACHE_SHIFT L1_CACHE_SHIFT
+# define SMP_CACHE_BYTES L1_CACHE_BYTES
+#else
+ /*
+ * The "aligned" directive can only _increase_ alignment, so this is
+ * safe and provides an easy way to avoid wasting space on a
+ * uni-processor:
+ */
+# define SMP_CACHE_SHIFT 3
+# define SMP_CACHE_BYTES (1 << 3)
+#endif
+
+#define __read_mostly __section(".data..read_mostly")
+
+#endif /* _ASM_IA64_CACHE_H */
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
new file mode 100644
index 000000000..708c0fa5d
--- /dev/null
+++ b/arch/ia64/include/asm/cacheflush.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_CACHEFLUSH_H
+#define _ASM_IA64_CACHEFLUSH_H
+
+/*
+ * Copyright (C) 2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/page-flags.h>
+#include <linux/bitops.h>
+
+#include <asm/page.h>
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+#define flush_dcache_page(page) \
+do { \
+ clear_bit(PG_arch_1, &(page)->flags); \
+} while (0)
+
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#define flush_icache_range flush_icache_range
+extern void clflush_cache_range(void *addr, int size);
+
+#define flush_icache_user_page(vma, page, user_addr, len) \
+do { \
+ unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \
+ flush_icache_range(_addr, _addr + (len)); \
+} while (0)
+
+#include <asm-generic/cacheflush.h>
+
+#endif /* _ASM_IA64_CACHEFLUSH_H */
diff --git a/arch/ia64/include/asm/checksum.h b/arch/ia64/include/asm/checksum.h
new file mode 100644
index 000000000..f3026213a
--- /dev/null
+++ b/arch/ia64/include/asm/checksum.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_CHECKSUM_H
+#define _ASM_IA64_CHECKSUM_H
+
+/*
+ * Modified 1998, 1999
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+
+/*
+ * Computes the checksum of the TCP/UDP pseudo-header returns a 16-bit
+ * checksum, already complemented
+ */
+extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto, __wsum sum);
+
+extern __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto, __wsum sum);
+
+/*
+ * Computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * This routine is used for miscellaneous IP-like checksums, mainly in
+ * icmp.c
+ */
+extern __sum16 ip_compute_csum(const void *buff, int len);
+
+/*
+ * Fold a partial checksum without adding pseudo headers.
+ */
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+ sum = (sum & 0xffff) + (sum >> 16);
+ sum = (sum & 0xffff) + (sum >> 16);
+ return (__force __sum16)~sum;
+}
+
+#define _HAVE_ARCH_IPV6_CSUM 1
+struct in6_addr;
+extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __wsum csum);
+
+#endif /* _ASM_IA64_CHECKSUM_H */
diff --git a/arch/ia64/include/asm/clocksource.h b/arch/ia64/include/asm/clocksource.h
new file mode 100644
index 000000000..71a517751
--- /dev/null
+++ b/arch/ia64/include/asm/clocksource.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* IA64-specific clocksource additions */
+
+#ifndef _ASM_IA64_CLOCKSOURCE_H
+#define _ASM_IA64_CLOCKSOURCE_H
+
+struct arch_clocksource_data {
+ void *fsys_mmio; /* used by fsyscall asm code */
+};
+
+#endif /* _ASM_IA64_CLOCKSOURCE_H */
diff --git a/arch/ia64/include/asm/cpu.h b/arch/ia64/include/asm/cpu.h
new file mode 100644
index 000000000..db125df9e
--- /dev/null
+++ b/arch/ia64/include/asm/cpu.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_CPU_H_
+#define _ASM_IA64_CPU_H_
+
+#include <linux/device.h>
+#include <linux/cpu.h>
+#include <linux/topology.h>
+#include <linux/percpu.h>
+
+struct ia64_cpu {
+ struct cpu cpu;
+};
+
+DECLARE_PER_CPU(struct ia64_cpu, cpu_devices);
+
+DECLARE_PER_CPU(int, cpu_state);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int arch_register_cpu(int num);
+extern void arch_unregister_cpu(int);
+#endif
+
+#endif /* _ASM_IA64_CPU_H_ */
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
new file mode 100644
index 000000000..7f28c3564
--- /dev/null
+++ b/arch/ia64/include/asm/cputime.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Definitions for measuring cputime on ia64 machines.
+ *
+ * Based on <asm-powerpc/cputime.h>.
+ *
+ * Copyright (C) 2007 FUJITSU LIMITED
+ * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
+ *
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in nsec.
+ * Otherwise we measure cpu time in jiffies using the generic definitions.
+ */
+
+#ifndef __IA64_CPUTIME_H
+#define __IA64_CPUTIME_H
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+extern void arch_vtime_task_switch(struct task_struct *tsk);
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+
+#endif /* __IA64_CPUTIME_H */
diff --git a/arch/ia64/include/asm/current.h b/arch/ia64/include/asm/current.h
new file mode 100644
index 000000000..86fbcc88d
--- /dev/null
+++ b/arch/ia64/include/asm/current.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_CURRENT_H
+#define _ASM_IA64_CURRENT_H
+
+/*
+ * Modified 1998-2000
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+#include <asm/intrinsics.h>
+
+/*
+ * In kernel mode, thread pointer (r13) is used to point to the current task
+ * structure.
+ */
+#define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP))
+
+#endif /* _ASM_IA64_CURRENT_H */
diff --git a/arch/ia64/include/asm/cyclone.h b/arch/ia64/include/asm/cyclone.h
new file mode 100644
index 000000000..a48139364
--- /dev/null
+++ b/arch/ia64/include/asm/cyclone.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_IA64_CYCLONE_H
+#define ASM_IA64_CYCLONE_H
+
+#ifdef CONFIG_IA64_CYCLONE
+extern int use_cyclone;
+extern void __init cyclone_setup(void);
+#else /* CONFIG_IA64_CYCLONE */
+#define use_cyclone 0
+static inline void cyclone_setup(void)
+{
+ printk(KERN_ERR "Cyclone Counter: System not configured"
+ " w/ CONFIG_IA64_CYCLONE.\n");
+}
+#endif /* CONFIG_IA64_CYCLONE */
+#endif /* !ASM_IA64_CYCLONE_H */
diff --git a/arch/ia64/include/asm/delay.h b/arch/ia64/include/asm/delay.h
new file mode 100644
index 000000000..0227ac586
--- /dev/null
+++ b/arch/ia64/include/asm/delay.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_DELAY_H
+#define _ASM_IA64_DELAY_H
+
+/*
+ * Delay routines using a pre-computed "cycles/usec" value.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/compiler.h>
+
+#include <asm/intrinsics.h>
+#include <asm/processor.h>
+
+static __inline__ void
+ia64_set_itm (unsigned long val)
+{
+ ia64_setreg(_IA64_REG_CR_ITM, val);
+ ia64_srlz_d();
+}
+
+static __inline__ unsigned long
+ia64_get_itm (void)
+{
+ unsigned long result;
+
+ result = ia64_getreg(_IA64_REG_CR_ITM);
+ ia64_srlz_d();
+ return result;
+}
+
+static __inline__ void
+ia64_set_itv (unsigned long val)
+{
+ ia64_setreg(_IA64_REG_CR_ITV, val);
+ ia64_srlz_d();
+}
+
+static __inline__ unsigned long
+ia64_get_itv (void)
+{
+ return ia64_getreg(_IA64_REG_CR_ITV);
+}
+
+static __inline__ void
+ia64_set_itc (unsigned long val)
+{
+ ia64_setreg(_IA64_REG_AR_ITC, val);
+ ia64_srlz_d();
+}
+
+static __inline__ unsigned long
+ia64_get_itc (void)
+{
+ unsigned long result;
+
+ result = ia64_getreg(_IA64_REG_AR_ITC);
+ ia64_barrier();
+#ifdef CONFIG_ITANIUM
+ while (unlikely((__s32) result == -1)) {
+ result = ia64_getreg(_IA64_REG_AR_ITC);
+ ia64_barrier();
+ }
+#endif
+ return result;
+}
+
+extern void ia64_delay_loop (unsigned long loops);
+
+static __inline__ void
+__delay (unsigned long loops)
+{
+ if (unlikely(loops < 1))
+ return;
+
+ ia64_delay_loop (loops - 1);
+}
+
+extern void udelay (unsigned long usecs);
+
+#endif /* _ASM_IA64_DELAY_H */
diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h
new file mode 100644
index 000000000..918b198cd
--- /dev/null
+++ b/arch/ia64/include/asm/device.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Arch specific extensions to struct device
+ */
+#ifndef _ASM_IA64_DEVICE_H
+#define _ASM_IA64_DEVICE_H
+
+struct dev_archdata {
+};
+
+struct pdev_archdata {
+};
+
+#endif /* _ASM_IA64_DEVICE_H */
diff --git a/arch/ia64/include/asm/div64.h b/arch/ia64/include/asm/div64.h
new file mode 100644
index 000000000..6cd978cef
--- /dev/null
+++ b/arch/ia64/include/asm/div64.h
@@ -0,0 +1 @@
+#include <asm-generic/div64.h>
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
new file mode 100644
index 000000000..a5d9d788e
--- /dev/null
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_DMA_MAPPING_H
+#define _ASM_IA64_DMA_MAPPING_H
+
+/*
+ * Copyright (C) 2003-2004 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+extern const struct dma_map_ops *dma_ops;
+
+static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+{
+ return dma_ops;
+}
+
+#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/dma.h b/arch/ia64/include/asm/dma.h
new file mode 100644
index 000000000..59625e9c1
--- /dev/null
+++ b/arch/ia64/include/asm/dma.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_DMA_H
+#define _ASM_IA64_DMA_H
+
+/*
+ * Copyright (C) 1998-2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#include <asm/io.h> /* need byte IO */
+
+extern unsigned long MAX_DMA_ADDRESS;
+
+extern int isa_dma_bridge_buggy;
+
+#define free_dma(x)
+
+#endif /* _ASM_IA64_DMA_H */
diff --git a/arch/ia64/include/asm/dmi.h b/arch/ia64/include/asm/dmi.h
new file mode 100644
index 000000000..ecd9e0a0f
--- /dev/null
+++ b/arch/ia64/include/asm/dmi.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_DMI_H
+#define _ASM_DMI_H 1
+
+#include <linux/slab.h>
+#include <asm/io.h>
+
+/* Use normal IO mappings for DMI */
+#define dmi_early_remap ioremap
+#define dmi_early_unmap(x, l) iounmap(x)
+#define dmi_remap ioremap
+#define dmi_unmap iounmap
+#define dmi_alloc(l) kzalloc(l, GFP_ATOMIC)
+
+#endif
diff --git a/arch/ia64/include/asm/early_ioremap.h b/arch/ia64/include/asm/early_ioremap.h
new file mode 100644
index 000000000..934191b1e
--- /dev/null
+++ b/arch/ia64/include/asm/early_ioremap.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_EARLY_IOREMAP_H
+#define _ASM_IA64_EARLY_IOREMAP_H
+
+extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
+#define early_memremap(phys_addr, size) early_ioremap(phys_addr, size)
+
+extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
+#define early_memunmap(addr, size) early_iounmap(addr, size)
+
+#endif
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
new file mode 100644
index 000000000..6629301a2
--- /dev/null
+++ b/arch/ia64/include/asm/elf.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_ELF_H
+#define _ASM_IA64_ELF_H
+
+/*
+ * ELF-specific definitions.
+ *
+ * Copyright (C) 1998-1999, 2002-2004 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#include <asm/fpu.h>
+#include <asm/page.h>
+#include <asm/auxvec.h>
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == EM_IA_64)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS64
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_IA_64
+
+#define CORE_DUMP_USE_REGSET
+
+/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
+ interpreted as follows by Linux: */
+#define EF_IA_64_LINUX_EXECUTABLE_STACK 0x1 /* is stack (& heap) executable by default? */
+
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed.
+ * Typical use of this is to invoke "./ld.so someprog" to test out a
+ * new version of the loader. We need to make sure that it is out of
+ * the way of the program that it will "exec", and that there is
+ * sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
+
+#define PT_IA_64_UNWIND 0x70000001
+
+/* IA-64 relocations: */
+#define R_IA64_NONE 0x00 /* none */
+#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */
+#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */
+#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */
+#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */
+#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */
+#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */
+#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */
+#define R_IA64_GPREL22 0x2a /* @gprel(sym+add), add imm22 */
+#define R_IA64_GPREL64I 0x2b /* @gprel(sym+add), mov imm64 */
+#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym+add), data4 MSB */
+#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym+add), data4 LSB */
+#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym+add), data8 MSB */
+#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym+add), data8 LSB */
+#define R_IA64_LTOFF22 0x32 /* @ltoff(sym+add), add imm22 */
+#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym+add), mov imm64 */
+#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym+add), add imm22 */
+#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym+add), mov imm64 */
+#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym+add), data8 MSB */
+#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym+add), data8 LSB */
+#define R_IA64_FPTR64I 0x43 /* @fptr(sym+add), mov imm64 */
+#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym+add), data4 MSB */
+#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym+add), data4 LSB */
+#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym+add), data8 MSB */
+#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym+add), data8 LSB */
+#define R_IA64_PCREL60B 0x48 /* @pcrel(sym+add), brl */
+#define R_IA64_PCREL21B 0x49 /* @pcrel(sym+add), ptb, call */
+#define R_IA64_PCREL21M 0x4a /* @pcrel(sym+add), chk.s */
+#define R_IA64_PCREL21F 0x4b /* @pcrel(sym+add), fchkf */
+#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym+add), data4 MSB */
+#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym+add), data4 LSB */
+#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym+add), data8 MSB */
+#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym+add), data8 LSB */
+#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */
+#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */
+#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), 4 MSB */
+#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), 4 LSB */
+#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), 8 MSB */
+#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), 8 LSB */
+#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym+add), data4 MSB */
+#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym+add), data4 LSB */
+#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym+add), data8 MSB */
+#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym+add), data8 LSB */
+#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym+add), data4 MSB */
+#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym+add), data4 LSB */
+#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym+add), data8 MSB */
+#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym+add), data8 LSB */
+#define R_IA64_REL32MSB 0x6c /* data 4 + REL */
+#define R_IA64_REL32LSB 0x6d /* data 4 + REL */
+#define R_IA64_REL64MSB 0x6e /* data 8 + REL */
+#define R_IA64_REL64LSB 0x6f /* data 8 + REL */
+#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */
+#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */
+#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */
+#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */
+#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym+add), ptb, call */
+#define R_IA64_PCREL22 0x7a /* @pcrel(sym+add), imm22 */
+#define R_IA64_PCREL64I 0x7b /* @pcrel(sym+add), imm64 */
+#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */
+#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */
+#define R_IA64_COPY 0x84 /* dynamic reloc, data copy */
+#define R_IA64_SUB 0x85 /* -symbol + addend, add imm22 */
+#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */
+#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */
+#define R_IA64_TPREL14 0x91 /* @tprel(sym+add), add imm14 */
+#define R_IA64_TPREL22 0x92 /* @tprel(sym+add), add imm22 */
+#define R_IA64_TPREL64I 0x93 /* @tprel(sym+add), add imm64 */
+#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym+add), data8 MSB */
+#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym+add), data8 LSB */
+#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), add imm22 */
+#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym+add), data8 MSB */
+#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym+add), data8 LSB */
+#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(s+a)), imm22 */
+#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym+add), imm14 */
+#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym+add), imm22 */
+#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym+add), imm64 */
+#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym+add), data4 MSB */
+#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym+add), data4 LSB */
+#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym+add), data8 MSB */
+#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym+add), data8 LSB */
+#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */
+
+/* IA-64 specific section flags: */
+#define SHF_IA_64_SHORT 0x10000000 /* section near gp */
+
+/*
+ * We use (abuse?) this macro to insert the (empty) vm_area that is
+ * used to map the register backing store. I don't see any better
+ * place to do this, but we should discuss this with Linus once we can
+ * talk to him...
+ */
+extern void ia64_init_addr_space (void);
+#define ELF_PLAT_INIT(_r, load_addr) ia64_init_addr_space()
+
+/* ELF register definitions. This is needed for core dump support. */
+
+/*
+ * elf_gregset_t contains the application-level state in the following order:
+ * r0-r31
+ * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
+ * predicate registers (p0-p63)
+ * b0-b7
+ * ip cfm psr
+ * ar.rsc ar.bsp ar.bspstore ar.rnat
+ * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
+ */
+#define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */
+#define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */
+
+/* elf_gregset_t register offsets */
+#define ELF_GR_0_OFFSET 0
+#define ELF_NAT_OFFSET (32 * sizeof(elf_greg_t))
+#define ELF_PR_OFFSET (33 * sizeof(elf_greg_t))
+#define ELF_BR_0_OFFSET (34 * sizeof(elf_greg_t))
+#define ELF_CR_IIP_OFFSET (42 * sizeof(elf_greg_t))
+#define ELF_CFM_OFFSET (43 * sizeof(elf_greg_t))
+#define ELF_CR_IPSR_OFFSET (44 * sizeof(elf_greg_t))
+#define ELF_GR_OFFSET(i) (ELF_GR_0_OFFSET + i * sizeof(elf_greg_t))
+#define ELF_BR_OFFSET(i) (ELF_BR_0_OFFSET + i * sizeof(elf_greg_t))
+#define ELF_AR_RSC_OFFSET (45 * sizeof(elf_greg_t))
+#define ELF_AR_BSP_OFFSET (46 * sizeof(elf_greg_t))
+#define ELF_AR_BSPSTORE_OFFSET (47 * sizeof(elf_greg_t))
+#define ELF_AR_RNAT_OFFSET (48 * sizeof(elf_greg_t))
+#define ELF_AR_CCV_OFFSET (49 * sizeof(elf_greg_t))
+#define ELF_AR_UNAT_OFFSET (50 * sizeof(elf_greg_t))
+#define ELF_AR_FPSR_OFFSET (51 * sizeof(elf_greg_t))
+#define ELF_AR_PFS_OFFSET (52 * sizeof(elf_greg_t))
+#define ELF_AR_LC_OFFSET (53 * sizeof(elf_greg_t))
+#define ELF_AR_EC_OFFSET (54 * sizeof(elf_greg_t))
+#define ELF_AR_CSD_OFFSET (55 * sizeof(elf_greg_t))
+#define ELF_AR_SSD_OFFSET (56 * sizeof(elf_greg_t))
+#define ELF_AR_END_OFFSET (57 * sizeof(elf_greg_t))
+
+typedef unsigned long elf_greg_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct ia64_fpreg elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+
+
+struct pt_regs; /* forward declaration... */
+extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
+#define ELF_CORE_COPY_REGS(_dest,_regs) ia64_elf_core_copy_regs(_regs, _dest);
+
+/* This macro yields a bitmask that programs can use to figure out
+ what instruction set this CPU supports. */
+#define ELF_HWCAP 0
+
+/* This macro yields a string that ld.so will use to load
+ implementation specific libraries for optimization. Not terribly
+ relevant until we have real hardware to play with... */
+#define ELF_PLATFORM NULL
+
+#define elf_read_implies_exec(ex, executable_stack) \
+ ((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
+
+struct task_struct;
+
+#define GATE_EHDR ((const struct elfhdr *) GATE_ADDR)
+
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
+#define ARCH_DLINFO \
+do { \
+ extern char __kernel_syscall_via_epc[]; \
+ NEW_AUX_ENT(AT_SYSINFO, (unsigned long) __kernel_syscall_via_epc); \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long) GATE_EHDR); \
+} while (0)
+
+/*
+ * format for entries in the Global Offset Table
+ */
+struct got_entry {
+ uint64_t val;
+};
+
+/*
+ * Layout of the Function Descriptor
+ */
+struct fdesc {
+ uint64_t ip;
+ uint64_t gp;
+};
+
+#endif /* _ASM_IA64_ELF_H */
diff --git a/arch/ia64/include/asm/emergency-restart.h b/arch/ia64/include/asm/emergency-restart.h
new file mode 100644
index 000000000..108d8c48e
--- /dev/null
+++ b/arch/ia64/include/asm/emergency-restart.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_EMERGENCY_RESTART_H
+#define _ASM_EMERGENCY_RESTART_H
+
+#include <asm-generic/emergency-restart.h>
+
+#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/ia64/include/asm/esi.h b/arch/ia64/include/asm/esi.h
new file mode 100644
index 000000000..56d1310af
--- /dev/null
+++ b/arch/ia64/include/asm/esi.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ESI service calls.
+ *
+ * Copyright (c) Copyright 2005-2006 Hewlett-Packard Development Company, L.P.
+ * Alex Williamson <alex.williamson@hp.com>
+ */
+#ifndef esi_h
+#define esi_h
+
+#include <linux/efi.h>
+
+#define ESI_QUERY 0x00000001
+#define ESI_OPEN_HANDLE 0x02000000
+#define ESI_CLOSE_HANDLE 0x02000001
+
+enum esi_proc_type {
+ ESI_PROC_SERIALIZED, /* calls need to be serialized */
+ ESI_PROC_MP_SAFE, /* MP-safe, but not reentrant */
+ ESI_PROC_REENTRANT /* MP-safe and reentrant */
+};
+
+extern struct ia64_sal_retval esi_call_phys (void *, u64 *);
+extern int ia64_esi_call(efi_guid_t, struct ia64_sal_retval *,
+ enum esi_proc_type,
+ u64, u64, u64, u64, u64, u64, u64, u64);
+extern int ia64_esi_call_phys(efi_guid_t, struct ia64_sal_retval *, u64, u64,
+ u64, u64, u64, u64, u64, u64);
+
+#endif /* esi_h */
diff --git a/arch/ia64/include/asm/exception.h b/arch/ia64/include/asm/exception.h
new file mode 100644
index 000000000..1d5df8116
--- /dev/null
+++ b/arch/ia64/include/asm/exception.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_EXCEPTION_H
+#define __ASM_EXCEPTION_H
+
+struct pt_regs;
+struct exception_table_entry;
+
+extern void ia64_handle_exception(struct pt_regs *regs,
+ const struct exception_table_entry *e);
+
+#define ia64_done_with_exception(regs) \
+({ \
+ int __ex_ret = 0; \
+ const struct exception_table_entry *e; \
+ e = search_exception_tables((regs)->cr_iip + ia64_psr(regs)->ri); \
+ if (e) { \
+ ia64_handle_exception(regs, e); \
+ __ex_ret = 1; \
+ } \
+ __ex_ret; \
+})
+
+#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/ia64/include/asm/export.h b/arch/ia64/include/asm/export.h
new file mode 100644
index 000000000..ad18c6583
--- /dev/null
+++ b/arch/ia64/include/asm/export.h
@@ -0,0 +1,3 @@
+/* EXPORT_DATA_SYMBOL != EXPORT_SYMBOL here */
+#define KSYM_FUNC(name) @fptr(name)
+#include <asm-generic/export.h>
diff --git a/arch/ia64/include/asm/extable.h b/arch/ia64/include/asm/extable.h
new file mode 100644
index 000000000..83eac6aa0
--- /dev/null
+++ b/arch/ia64/include/asm/extable.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_EXTABLE_H
+#define _ASM_IA64_EXTABLE_H
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+struct exception_table_entry {
+ int insn; /* location-relative address of insn this fixup is for */
+ int fixup; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */
+};
+
+#endif
diff --git a/arch/ia64/include/asm/fb.h b/arch/ia64/include/asm/fb.h
new file mode 100644
index 000000000..5f95782bf
--- /dev/null
+++ b/arch/ia64/include/asm/fb.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <linux/efi.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/ia64/include/asm/fpswa.h b/arch/ia64/include/asm/fpswa.h
new file mode 100644
index 000000000..2a0c23728
--- /dev/null
+++ b/arch/ia64/include/asm/fpswa.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_FPSWA_H
+#define _ASM_IA64_FPSWA_H
+
+/*
+ * Floating-point Software Assist
+ *
+ * Copyright (C) 1999 Intel Corporation.
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Goutham Rao <goutham.rao@intel.com>
+ */
+
+typedef struct {
+ /* 4 * 128 bits */
+ unsigned long fp_lp[4*2];
+} fp_state_low_preserved_t;
+
+typedef struct {
+ /* 10 * 128 bits */
+ unsigned long fp_lv[10 * 2];
+} fp_state_low_volatile_t;
+
+typedef struct {
+ /* 16 * 128 bits */
+ unsigned long fp_hp[16 * 2];
+} fp_state_high_preserved_t;
+
+typedef struct {
+ /* 96 * 128 bits */
+ unsigned long fp_hv[96 * 2];
+} fp_state_high_volatile_t;
+
+/**
+ * floating point state to be passed to the FP emulation library by
+ * the trap/fault handler
+ */
+typedef struct {
+ unsigned long bitmask_low64;
+ unsigned long bitmask_high64;
+ fp_state_low_preserved_t *fp_state_low_preserved;
+ fp_state_low_volatile_t *fp_state_low_volatile;
+ fp_state_high_preserved_t *fp_state_high_preserved;
+ fp_state_high_volatile_t *fp_state_high_volatile;
+} fp_state_t;
+
+typedef struct {
+ unsigned long status;
+ unsigned long err0;
+ unsigned long err1;
+ unsigned long err2;
+} fpswa_ret_t;
+
+/**
+ * function header for the Floating Point software assist
+ * library. This function is invoked by the Floating point software
+ * assist trap/fault handler.
+ */
+typedef fpswa_ret_t (*efi_fpswa_t) (unsigned long trap_type, void *bundle, unsigned long *ipsr,
+ unsigned long *fsr, unsigned long *isr, unsigned long *preds,
+ unsigned long *ifs, fp_state_t *fp_state);
+
+/**
+ * This is the FPSWA library interface as defined by EFI. We need to pass a
+ * pointer to the interface itself on a call to the assist library
+ */
+typedef struct {
+ unsigned int revision;
+ unsigned int reserved;
+ efi_fpswa_t fpswa;
+} fpswa_interface_t;
+
+extern fpswa_interface_t *fpswa_interface;
+
+#endif /* _ASM_IA64_FPSWA_H */
diff --git a/arch/ia64/include/asm/ftrace.h b/arch/ia64/include/asm/ftrace.h
new file mode 100644
index 000000000..a07a8e575
--- /dev/null
+++ b/arch/ia64/include/asm/ftrace.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_FTRACE_H
+#define _ASM_IA64_FTRACE_H
+
+#ifdef CONFIG_FUNCTION_TRACER
+#define MCOUNT_INSN_SIZE 32 /* sizeof mcount call */
+
+#ifndef __ASSEMBLY__
+extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0);
+#define mcount _mcount
+
+/* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */
+#define MCOUNT_ADDR (((struct fnptr *)mcount)->ip)
+#define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip)
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ /* second bundle, insn 2 */
+ return addr - 0x12;
+}
+
+struct dyn_arch_ftrace {
+};
+#endif
+
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#endif /* _ASM_IA64_FTRACE_H */
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
new file mode 100644
index 000000000..1db26b432
--- /dev/null
+++ b/arch/ia64/include/asm/futex.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
+do { \
+ register unsigned long r8 __asm ("r8") = 0; \
+ __asm__ __volatile__( \
+ " mf;; \n" \
+ "[1:] " insn ";; \n" \
+ " .xdata4 \"__ex_table\", 1b-., 2f-. \n" \
+ "[2:]" \
+ : "+r" (r8), "=r" (oldval) \
+ : "r" (uaddr), "r" (oparg) \
+ : "memory"); \
+ ret = r8; \
+} while (0)
+
+#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
+do { \
+ register unsigned long r8 __asm ("r8") = 0; \
+ int val, newval; \
+ do { \
+ __asm__ __volatile__( \
+ " mf;; \n" \
+ "[1:] ld4 %3=[%4];; \n" \
+ " mov %2=%3 \n" \
+ insn ";; \n" \
+ " mov ar.ccv=%2;; \n" \
+ "[2:] cmpxchg4.acq %1=[%4],%3,ar.ccv;; \n" \
+ " .xdata4 \"__ex_table\", 1b-., 3f-.\n" \
+ " .xdata4 \"__ex_table\", 2b-., 3f-.\n" \
+ "[3:]" \
+ : "+r" (r8), "=r" (val), "=&r" (oldval), \
+ "=&r" (newval) \
+ : "r" (uaddr), "r" (oparg) \
+ : "memory"); \
+ if (unlikely (r8)) \
+ break; \
+ } while (unlikely (val != oldval)); \
+ ret = r8; \
+} while (0)
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval = 0, ret;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op1("xchg4 %1=[%2],%3", ret, oldval, uaddr,
+ oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op2("add %3=%3,%5", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op2("or %3=%3,%5", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op2("and %3=%3,%5", ret, oldval, uaddr,
+ ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op2("xor %3=%3,%5", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ {
+ register unsigned long r8 __asm ("r8") = 0;
+ unsigned long prev;
+ __asm__ __volatile__(
+ " mf;; \n"
+ " mov ar.ccv=%4;; \n"
+ "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n"
+ " .xdata4 \"__ex_table\", 1b-., 2f-. \n"
+ "[2:]"
+ : "+r" (r8), "=&r" (prev)
+ : "r" (uaddr), "r" (newval),
+ "rO" ((long) (unsigned) oldval)
+ : "memory");
+ *uval = prev;
+ return r8;
+ }
+}
+
+#endif /* _ASM_FUTEX_H */
diff --git a/arch/ia64/include/asm/gcc_intrin.h b/arch/ia64/include/asm/gcc_intrin.h
new file mode 100644
index 000000000..83f230b23
--- /dev/null
+++ b/arch/ia64/include/asm/gcc_intrin.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *
+ * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
+ * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
+ */
+#ifndef _ASM_IA64_GCC_INTRIN_H
+#define _ASM_IA64_GCC_INTRIN_H
+
+#include <uapi/asm/gcc_intrin.h>
+
+register unsigned long ia64_r13 asm ("r13") __used;
+#endif /* _ASM_IA64_GCC_INTRIN_H */
diff --git a/arch/ia64/include/asm/hardirq.h b/arch/ia64/include/asm/hardirq.h
new file mode 100644
index 000000000..ccde7c2ba
--- /dev/null
+++ b/arch/ia64/include/asm/hardirq.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_HARDIRQ_H
+#define _ASM_IA64_HARDIRQ_H
+
+/*
+ * Modified 1998-2002, 2004 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/*
+ * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure.
+ */
+
+#define __ARCH_IRQ_STAT 1
+
+#define local_softirq_pending_ref ia64_cpu_info.softirq_pending
+
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+#include <asm/processor.h>
+
+extern void __iomem *ipi_base_addr;
+
+void ack_bad_irq(unsigned int irq);
+
+#endif /* _ASM_IA64_HARDIRQ_H */
diff --git a/arch/ia64/include/asm/hugetlb.h b/arch/ia64/include/asm/hugetlb.h
new file mode 100644
index 000000000..7e46ebde8
--- /dev/null
+++ b/arch/ia64/include/asm/hugetlb.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_HUGETLB_H
+#define _ASM_IA64_HUGETLB_H
+
+#include <asm/page.h>
+
+#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
+void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
+ unsigned long end, unsigned long floor,
+ unsigned long ceiling);
+
+#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
+int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len);
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr,
+ unsigned long len)
+{
+ return (REGION_NUMBER(addr) == RGN_HPAGE ||
+ REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE);
+}
+#define is_hugepage_only_range is_hugepage_only_range
+
+#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+}
+
+#include <asm-generic/hugetlb.h>
+
+#endif /* _ASM_IA64_HUGETLB_H */
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h
new file mode 100644
index 000000000..f6ff95b4e
--- /dev/null
+++ b/arch/ia64/include/asm/hw_irq.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_HW_IRQ_H
+#define _ASM_IA64_HW_IRQ_H
+
+/*
+ * Copyright (C) 2001-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/profile.h>
+
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+
+typedef u8 ia64_vector;
+
+/*
+ * 0 special
+ *
+ * 1,3-14 are reserved from firmware
+ *
+ * 16-255 (vectored external interrupts) are available
+ *
+ * 15 spurious interrupt (see IVR)
+ *
+ * 16 lowest priority, 255 highest priority
+ *
+ * 15 classes of 16 interrupts each.
+ */
+#define IA64_MIN_VECTORED_IRQ 16
+#define IA64_MAX_VECTORED_IRQ 255
+#define IA64_NUM_VECTORS 256
+
+#define AUTO_ASSIGN -1
+
+#define IA64_SPURIOUS_INT_VECTOR 0x0f
+
+/*
+ * Vectors 0x10-0x1f are used for low priority interrupts, e.g. CMCI.
+ */
+#define IA64_CPEP_VECTOR 0x1c /* corrected platform error polling vector */
+#define IA64_CMCP_VECTOR 0x1d /* corrected machine-check polling vector */
+#define IA64_CPE_VECTOR 0x1e /* corrected platform error interrupt vector */
+#define IA64_CMC_VECTOR 0x1f /* corrected machine-check interrupt vector */
+/*
+ * Vectors 0x20-0x2f are reserved for legacy ISA IRQs.
+ * Use vectors 0x30-0xe7 as the default device vector range for ia64.
+ * Platforms may choose to reduce this range in platform_irq_setup, but the
+ * platform range must fall within
+ * [IA64_DEF_FIRST_DEVICE_VECTOR..IA64_DEF_LAST_DEVICE_VECTOR]
+ */
+extern int ia64_first_device_vector;
+extern int ia64_last_device_vector;
+
+#ifdef CONFIG_SMP
+/* Reserve the lower priority vector than device vectors for "move IRQ" IPI */
+#define IA64_IRQ_MOVE_VECTOR 0x30 /* "move IRQ" IPI */
+#define IA64_DEF_FIRST_DEVICE_VECTOR 0x31
+#else
+#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30
+#endif
+#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7
+#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector
+#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector
+#define IA64_MAX_DEVICE_VECTORS (IA64_DEF_LAST_DEVICE_VECTOR - IA64_DEF_FIRST_DEVICE_VECTOR + 1)
+#define IA64_NUM_DEVICE_VECTORS (IA64_LAST_DEVICE_VECTOR - IA64_FIRST_DEVICE_VECTOR + 1)
+
+#define IA64_MCA_RENDEZ_VECTOR 0xe8 /* MCA rendez interrupt */
+#define IA64_PERFMON_VECTOR 0xee /* performance monitor interrupt vector */
+#define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */
+#define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */
+#define IA64_IPI_LOCAL_TLB_FLUSH 0xfc /* SMP flush local TLB */
+#define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */
+#define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */
+
+/* Used for encoding redirected irqs */
+
+#define IA64_IRQ_REDIRECTED (1 << 31)
+
+/* IA64 inter-cpu interrupt related definitions */
+
+#define IA64_IPI_DEFAULT_BASE_ADDR 0xfee00000
+
+/* Delivery modes for inter-cpu interrupts */
+enum {
+ IA64_IPI_DM_INT = 0x0, /* pend an external interrupt */
+ IA64_IPI_DM_PMI = 0x2, /* pend a PMI */
+ IA64_IPI_DM_NMI = 0x4, /* pend an NMI (vector 2) */
+ IA64_IPI_DM_INIT = 0x5, /* pend an INIT interrupt */
+ IA64_IPI_DM_EXTINT = 0x7, /* pend an 8259-compatible interrupt. */
+};
+
+extern __u8 isa_irq_to_vector_map[16];
+#define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)]
+
+struct irq_cfg {
+ ia64_vector vector;
+ cpumask_t domain;
+ cpumask_t old_domain;
+ unsigned move_cleanup_count;
+ u8 move_in_progress : 1;
+};
+extern spinlock_t vector_lock;
+extern struct irq_cfg irq_cfg[NR_IRQS];
+#define irq_to_domain(x) irq_cfg[(x)].domain
+DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
+
+extern struct irq_chip irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
+
+#define ia64_register_ipi ia64_native_register_ipi
+#define assign_irq_vector ia64_native_assign_irq_vector
+#define free_irq_vector ia64_native_free_irq_vector
+#define ia64_resend_irq ia64_native_resend_irq
+
+extern void ia64_native_register_ipi(void);
+extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
+extern int ia64_native_assign_irq_vector (int irq); /* allocate a free vector */
+extern void ia64_native_free_irq_vector (int vector);
+extern int reserve_irq_vector (int vector);
+extern void __setup_vector_irq(int cpu);
+extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
+extern void destroy_and_reserve_irq (unsigned int irq);
+
+#ifdef CONFIG_SMP
+extern int irq_prepare_move(int irq, int cpu);
+extern void irq_complete_move(unsigned int irq);
+#else
+static inline int irq_prepare_move(int irq, int cpu) { return 0; }
+static inline void irq_complete_move(unsigned int irq) {}
+#endif
+
+static inline void ia64_native_resend_irq(unsigned int vector)
+{
+ ia64_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
+}
+
+/*
+ * Next follows the irq descriptor interface. On IA-64, each CPU supports 256 interrupt
+ * vectors. On smaller systems, there is a one-to-one correspondence between interrupt
+ * vectors and the Linux irq numbers. However, larger systems may have multiple interrupt
+ * domains meaning that the translation from vector number to irq number depends on the
+ * interrupt domain that a CPU belongs to. This API abstracts such platform-dependent
+ * differences and provides a uniform means to translate between vector and irq numbers
+ * and to obtain the irq descriptor for a given irq number.
+ */
+
+/* Extract the IA-64 vector that corresponds to IRQ. */
+static inline ia64_vector
+irq_to_vector (int irq)
+{
+ return irq_cfg[irq].vector;
+}
+
+/*
+ * Convert the local IA-64 vector to the corresponding irq number. This translation is
+ * done in the context of the interrupt domain that the currently executing CPU belongs
+ * to.
+ */
+static inline unsigned int
+local_vector_to_irq (ia64_vector vec)
+{
+ return __this_cpu_read(vector_irq[vec]);
+}
+
+#endif /* _ASM_IA64_HW_IRQ_H */
diff --git a/arch/ia64/include/asm/idle.h b/arch/ia64/include/asm/idle.h
new file mode 100644
index 000000000..97c55b97e
--- /dev/null
+++ b/arch/ia64/include/asm/idle.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_IDLE_H
+#define _ASM_IA64_IDLE_H
+
+static inline void enter_idle(void) { }
+static inline void exit_idle(void) { }
+
+#endif /* _ASM_IA64_IDLE_H */
diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h
new file mode 100644
index 000000000..035b17fe1
--- /dev/null
+++ b/arch/ia64/include/asm/intrinsics.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Compiler-dependent intrinsics.
+ *
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#ifndef _ASM_IA64_INTRINSICS_H
+#define _ASM_IA64_INTRINSICS_H
+
+#include <uapi/asm/intrinsics.h>
+
+#endif /* _ASM_IA64_INTRINSICS_H */
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
new file mode 100644
index 000000000..3d666a11a
--- /dev/null
+++ b/arch/ia64/include/asm/io.h
@@ -0,0 +1,287 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_IO_H
+#define _ASM_IA64_IO_H
+
+/*
+ * This file contains the definitions for the emulated IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated to
+ * (a) handle it all in a way that makes gcc able to optimize it as
+ * well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+
+#include <asm/unaligned.h>
+#include <asm/early_ioremap.h>
+
+/* We don't use IO slowdowns on the ia64, but.. */
+#define __SLOW_DOWN_IO do { } while (0)
+#define SLOW_DOWN_IO do { } while (0)
+
+#define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED)
+
+/*
+ * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
+ * large machines may have multiple other I/O spaces so we can't place any a priori limit
+ * on IO_SPACE_LIMIT. These additional spaces are described in ACPI.
+ */
+#define IO_SPACE_LIMIT 0xffffffffffffffffUL
+
+#define MAX_IO_SPACES_BITS 8
+#define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS)
+#define IO_SPACE_BITS 24
+#define IO_SPACE_SIZE (1UL << IO_SPACE_BITS)
+
+#define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS)
+#define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS)
+#define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1))
+
+#define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | ((p) & 0xfff))
+
+struct io_space {
+ unsigned long mmio_base; /* base in MMIO space */
+ int sparse;
+};
+
+extern struct io_space io_space[];
+extern unsigned int num_io_spaces;
+
+# ifdef __KERNEL__
+
+/*
+ * All MMIO iomem cookies are in region 6; anything less is a PIO cookie:
+ * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap)
+ * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port)
+ *
+ * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch
+ * code that uses bare port numbers without the prerequisite pci_iomap().
+ */
+#define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS))
+#define PIO_MASK (PIO_OFFSET - 1)
+#define PIO_RESERVED __IA64_UNCACHED_OFFSET
+#define HAVE_ARCH_PIO_SIZE
+
+#include <asm/intrinsics.h>
+#include <asm/page.h>
+#include <asm-generic/iomap.h>
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ */
+static inline unsigned long
+virt_to_phys (volatile void *address)
+{
+ return (unsigned long) address - PAGE_OFFSET;
+}
+#define virt_to_phys virt_to_phys
+
+static inline void*
+phys_to_virt (unsigned long address)
+{
+ return (void *) (address + PAGE_OFFSET);
+}
+#define phys_to_virt phys_to_virt
+
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size);
+extern int valid_phys_addr_range (phys_addr_t addr, size_t count); /* efi.c */
+extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count);
+
+/*
+ * The following two macros are deprecated and scheduled for removal.
+ * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
+ */
+#define bus_to_virt phys_to_virt
+#define virt_to_bus virt_to_phys
+#define page_to_bus page_to_phys
+
+# endif /* KERNEL */
+
+/*
+ * Memory fence w/accept. This should never be used in code that is
+ * not IA-64 specific.
+ */
+#define __ia64_mf_a() ia64_mfa()
+
+static inline void*
+__ia64_mk_io_addr (unsigned long port)
+{
+ struct io_space *space;
+ unsigned long offset;
+
+ space = &io_space[IO_SPACE_NR(port)];
+ port = IO_SPACE_PORT(port);
+ if (space->sparse)
+ offset = IO_SPACE_SPARSE_ENCODING(port);
+ else
+ offset = port;
+
+ return (void *) (space->mmio_base | offset);
+}
+
+/*
+ * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
+ * that the access has completed before executing other I/O accesses. Since we're doing
+ * the accesses through an uncachable (UC) translation, the CPU will execute them in
+ * program order. However, we still need to tell the compiler not to shuffle them around
+ * during optimization, which is why we use "volatile" pointers.
+ */
+
+#define inb inb
+static inline unsigned int inb(unsigned long port)
+{
+ volatile unsigned char *addr = __ia64_mk_io_addr(port);
+ unsigned char ret;
+
+ ret = *addr;
+ __ia64_mf_a();
+ return ret;
+}
+
+#define inw inw
+static inline unsigned int inw(unsigned long port)
+{
+ volatile unsigned short *addr = __ia64_mk_io_addr(port);
+ unsigned short ret;
+
+ ret = *addr;
+ __ia64_mf_a();
+ return ret;
+}
+
+#define inl inl
+static inline unsigned int inl(unsigned long port)
+{
+ volatile unsigned int *addr = __ia64_mk_io_addr(port);
+ unsigned int ret;
+
+ ret = *addr;
+ __ia64_mf_a();
+ return ret;
+}
+
+#define outb outb
+static inline void outb(unsigned char val, unsigned long port)
+{
+ volatile unsigned char *addr = __ia64_mk_io_addr(port);
+
+ *addr = val;
+ __ia64_mf_a();
+}
+
+#define outw outw
+static inline void outw(unsigned short val, unsigned long port)
+{
+ volatile unsigned short *addr = __ia64_mk_io_addr(port);
+
+ *addr = val;
+ __ia64_mf_a();
+}
+
+#define outl outl
+static inline void outl(unsigned int val, unsigned long port)
+{
+ volatile unsigned int *addr = __ia64_mk_io_addr(port);
+
+ *addr = val;
+ __ia64_mf_a();
+}
+
+#define insb insb
+static inline void insb(unsigned long port, void *dst, unsigned long count)
+{
+ unsigned char *dp = dst;
+
+ while (count--)
+ *dp++ = inb(port);
+}
+
+#define insw insw
+static inline void insw(unsigned long port, void *dst, unsigned long count)
+{
+ unsigned short *dp = dst;
+
+ while (count--)
+ put_unaligned(inw(port), dp++);
+}
+
+#define insl insl
+static inline void insl(unsigned long port, void *dst, unsigned long count)
+{
+ unsigned int *dp = dst;
+
+ while (count--)
+ put_unaligned(inl(port), dp++);
+}
+
+#define outsb outsb
+static inline void outsb(unsigned long port, const void *src,
+ unsigned long count)
+{
+ const unsigned char *sp = src;
+
+ while (count--)
+ outb(*sp++, port);
+}
+
+#define outsw outsw
+static inline void outsw(unsigned long port, const void *src,
+ unsigned long count)
+{
+ const unsigned short *sp = src;
+
+ while (count--)
+ outw(get_unaligned(sp++), port);
+}
+
+#define outsl outsl
+static inline void outsl(unsigned long port, const void *src,
+ unsigned long count)
+{
+ const unsigned int *sp = src;
+
+ while (count--)
+ outl(get_unaligned(sp++), port);
+}
+
+# ifdef __KERNEL__
+
+extern void __iomem * ioremap(unsigned long offset, unsigned long size);
+extern void __iomem * ioremap_uc(unsigned long offset, unsigned long size);
+extern void iounmap (volatile void __iomem *addr);
+static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size)
+{
+ return ioremap(phys_addr, size);
+}
+#define ioremap ioremap
+#define ioremap_cache ioremap_cache
+#define ioremap_uc ioremap_uc
+#define iounmap iounmap
+
+/*
+ * String version of IO memory access ops:
+ */
+extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
+extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
+extern void memset_io(volatile void __iomem *s, int c, long n);
+
+#define memcpy_fromio memcpy_fromio
+#define memcpy_toio memcpy_toio
+#define memset_io memset_io
+#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
+#define xlate_dev_mem_ptr xlate_dev_mem_ptr
+#include <asm-generic/io.h>
+#undef PCI_IOBASE
+
+# endif /* __KERNEL__ */
+
+#endif /* _ASM_IA64_IO_H */
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
new file mode 100644
index 000000000..eb0db20c9
--- /dev/null
+++ b/arch/ia64/include/asm/iommu.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_IOMMU_H
+#define _ASM_IA64_IOMMU_H 1
+
+#include <linux/acpi.h>
+
+/* 10 seconds */
+#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
+
+extern void no_iommu_init(void);
+#ifdef CONFIG_INTEL_IOMMU
+extern int force_iommu, no_iommu;
+extern int iommu_detected;
+
+static inline int __init
+arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) { return 0; }
+#else
+#define no_iommu (1)
+#define iommu_detected (0)
+#endif
+
+#endif
diff --git a/arch/ia64/include/asm/iommu_table.h b/arch/ia64/include/asm/iommu_table.h
new file mode 100644
index 000000000..cc96116ac
--- /dev/null
+++ b/arch/ia64/include/asm/iommu_table.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_IOMMU_TABLE_H
+#define _ASM_IA64_IOMMU_TABLE_H
+
+#define IOMMU_INIT_POST(_detect)
+
+#endif /* _ASM_IA64_IOMMU_TABLE_H */
diff --git a/arch/ia64/include/asm/iosapic.h b/arch/ia64/include/asm/iosapic.h
new file mode 100644
index 000000000..a91aeb413
--- /dev/null
+++ b/arch/ia64/include/asm/iosapic.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_IA64_IOSAPIC_H
+#define __ASM_IA64_IOSAPIC_H
+
+#define IOSAPIC_REG_SELECT 0x0
+#define IOSAPIC_WINDOW 0x10
+#define IOSAPIC_EOI 0x40
+
+#define IOSAPIC_VERSION 0x1
+
+/*
+ * Redirection table entry
+ */
+#define IOSAPIC_RTE_LOW(i) (0x10+i*2)
+#define IOSAPIC_RTE_HIGH(i) (0x11+i*2)
+
+#define IOSAPIC_DEST_SHIFT 16
+
+/*
+ * Delivery mode
+ */
+#define IOSAPIC_DELIVERY_SHIFT 8
+#define IOSAPIC_FIXED 0x0
+#define IOSAPIC_LOWEST_PRIORITY 0x1
+#define IOSAPIC_PMI 0x2
+#define IOSAPIC_NMI 0x4
+#define IOSAPIC_INIT 0x5
+#define IOSAPIC_EXTINT 0x7
+
+/*
+ * Interrupt polarity
+ */
+#define IOSAPIC_POLARITY_SHIFT 13
+#define IOSAPIC_POL_HIGH 0
+#define IOSAPIC_POL_LOW 1
+
+/*
+ * Trigger mode
+ */
+#define IOSAPIC_TRIGGER_SHIFT 15
+#define IOSAPIC_EDGE 0
+#define IOSAPIC_LEVEL 1
+
+/*
+ * Mask bit
+ */
+
+#define IOSAPIC_MASK_SHIFT 16
+#define IOSAPIC_MASK (1<<IOSAPIC_MASK_SHIFT)
+
+#define IOSAPIC_VECTOR_MASK 0xffffff00
+
+#ifndef __ASSEMBLY__
+
+#define NR_IOSAPICS 256
+
+#define iosapic_pcat_compat_init ia64_native_iosapic_pcat_compat_init
+#define __iosapic_read __ia64_native_iosapic_read
+#define __iosapic_write __ia64_native_iosapic_write
+#define iosapic_get_irq_chip ia64_native_iosapic_get_irq_chip
+
+extern void __init ia64_native_iosapic_pcat_compat_init(void);
+extern struct irq_chip *ia64_native_iosapic_get_irq_chip(unsigned long trigger);
+
+static inline unsigned int
+__ia64_native_iosapic_read(char __iomem *iosapic, unsigned int reg)
+{
+ writel(reg, iosapic + IOSAPIC_REG_SELECT);
+ return readl(iosapic + IOSAPIC_WINDOW);
+}
+
+static inline void
+__ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
+{
+ writel(reg, iosapic + IOSAPIC_REG_SELECT);
+ writel(val, iosapic + IOSAPIC_WINDOW);
+}
+
+static inline void iosapic_eoi(char __iomem *iosapic, u32 vector)
+{
+ writel(vector, iosapic + IOSAPIC_EOI);
+}
+
+extern void __init iosapic_system_init (int pcat_compat);
+extern int iosapic_init (unsigned long address, unsigned int gsi_base);
+extern int iosapic_remove (unsigned int gsi_base);
+extern int gsi_to_irq (unsigned int gsi);
+extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity,
+ unsigned long trigger);
+extern void iosapic_unregister_intr (unsigned int irq);
+extern void iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
+ unsigned long polarity,
+ unsigned long trigger);
+extern int __init iosapic_register_platform_intr (u32 int_type,
+ unsigned int gsi,
+ int pmi_vector,
+ u16 eid, u16 id,
+ unsigned long polarity,
+ unsigned long trigger);
+
+#ifdef CONFIG_NUMA
+extern void map_iosapic_to_node (unsigned int, int);
+#endif
+
+# endif /* !__ASSEMBLY__ */
+#endif /* __ASM_IA64_IOSAPIC_H */
diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h
new file mode 100644
index 000000000..5acf52e90
--- /dev/null
+++ b/arch/ia64/include/asm/irq.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_IRQ_H
+#define _ASM_IA64_IRQ_H
+
+/*
+ * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * 11/24/98 S.Eranian updated TIMER_IRQ and irq_canonicalize
+ * 01/20/99 S.Eranian added keyboard interrupt
+ * 02/29/00 D.Mosberger moved most things into hw_irq.h
+ */
+
+#include <linux/types.h>
+#include <linux/cpumask.h>
+#include <generated/nr-irqs.h>
+
+static __inline__ int
+irq_canonicalize (int irq)
+{
+ /*
+ * We do the legacy thing here of pretending that irqs < 16
+ * are 8259 irqs. This really shouldn't be necessary at all,
+ * but we keep it here as serial.c still uses it...
+ */
+ return ((irq == 2) ? 9 : irq);
+}
+
+extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
+
+int create_irq(void);
+void destroy_irq(unsigned int irq);
+
+#endif /* _ASM_IA64_IRQ_H */
diff --git a/arch/ia64/include/asm/irq_regs.h b/arch/ia64/include/asm/irq_regs.h
new file mode 100644
index 000000000..3dd9c0b70
--- /dev/null
+++ b/arch/ia64/include/asm/irq_regs.h
@@ -0,0 +1 @@
+#include <asm-generic/irq_regs.h>
diff --git a/arch/ia64/include/asm/irq_remapping.h b/arch/ia64/include/asm/irq_remapping.h
new file mode 100644
index 000000000..547a6e870
--- /dev/null
+++ b/arch/ia64/include/asm/irq_remapping.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __IA64_INTR_REMAPPING_H
+#define __IA64_INTR_REMAPPING_H
+#define irq_remapping_enabled 0
+#endif
diff --git a/arch/ia64/include/asm/irqflags.h b/arch/ia64/include/asm/irqflags.h
new file mode 100644
index 000000000..1dc30f12e
--- /dev/null
+++ b/arch/ia64/include/asm/irqflags.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IRQ flags defines.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+
+#ifndef _ASM_IA64_IRQFLAGS_H
+#define _ASM_IA64_IRQFLAGS_H
+
+#include <asm/pal.h>
+#include <asm/kregs.h>
+
+#ifdef CONFIG_IA64_DEBUG_IRQ
+extern unsigned long last_cli_ip;
+static inline void arch_maybe_save_ip(unsigned long flags)
+{
+ if (flags & IA64_PSR_I)
+ last_cli_ip = ia64_getreg(_IA64_REG_IP);
+}
+#else
+#define arch_maybe_save_ip(flags) do {} while (0)
+#endif
+
+/*
+ * - clearing psr.i is implicitly serialized (visible by next insn)
+ * - setting psr.i requires data serialization
+ * - we need a stop-bit before reading PSR because we sometimes
+ * write a floating-point register right before reading the PSR
+ * and that writes to PSR.mfl
+ */
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ ia64_stop();
+ return ia64_getreg(_IA64_REG_PSR);
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags = arch_local_save_flags();
+
+ ia64_stop();
+ ia64_rsm(IA64_PSR_I);
+ arch_maybe_save_ip(flags);
+ return flags;
+}
+
+static inline void arch_local_irq_disable(void)
+{
+#ifdef CONFIG_IA64_DEBUG_IRQ
+ arch_local_irq_save();
+#else
+ ia64_stop();
+ ia64_rsm(IA64_PSR_I);
+#endif
+}
+
+static inline void arch_local_irq_enable(void)
+{
+ ia64_stop();
+ ia64_ssm(IA64_PSR_I);
+ ia64_srlz_d();
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+#ifdef CONFIG_IA64_DEBUG_IRQ
+ unsigned long old_psr = arch_local_save_flags();
+#endif
+ ia64_intrin_local_irq_restore(flags & IA64_PSR_I);
+ arch_maybe_save_ip(old_psr & ~flags);
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
+{
+ return (flags & IA64_PSR_I) == 0;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+static inline void arch_safe_halt(void)
+{
+ arch_local_irq_enable();
+ ia64_pal_halt_light(); /* PAL_HALT_LIGHT */
+}
+
+
+#endif /* _ASM_IA64_IRQFLAGS_H */
diff --git a/arch/ia64/include/asm/kdebug.h b/arch/ia64/include/asm/kdebug.h
new file mode 100644
index 000000000..4f7e6dc97
--- /dev/null
+++ b/arch/ia64/include/asm/kdebug.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _IA64_KDEBUG_H
+#define _IA64_KDEBUG_H 1
+/*
+ *
+ * Copyright (C) Intel Corporation, 2005
+ *
+ * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
+ * <anil.s.keshavamurthy@intel.com> adopted from
+ * include/asm-x86_64/kdebug.h
+ *
+ * 2005-Oct Keith Owens <kaos@sgi.com>. Expand notify_die to cover more
+ * events.
+ */
+
+enum die_val {
+ DIE_BREAK = 1,
+ DIE_FAULT,
+ DIE_OOPS,
+ DIE_MACHINE_HALT,
+ DIE_MACHINE_RESTART,
+ DIE_MCA_MONARCH_ENTER,
+ DIE_MCA_MONARCH_PROCESS,
+ DIE_MCA_MONARCH_LEAVE,
+ DIE_MCA_SLAVE_ENTER,
+ DIE_MCA_SLAVE_PROCESS,
+ DIE_MCA_SLAVE_LEAVE,
+ DIE_MCA_RENDZVOUS_ENTER,
+ DIE_MCA_RENDZVOUS_PROCESS,
+ DIE_MCA_RENDZVOUS_LEAVE,
+ DIE_MCA_NEW_TIMEOUT,
+ DIE_INIT_ENTER,
+ DIE_INIT_MONARCH_ENTER,
+ DIE_INIT_MONARCH_PROCESS,
+ DIE_INIT_MONARCH_LEAVE,
+ DIE_INIT_SLAVE_ENTER,
+ DIE_INIT_SLAVE_PROCESS,
+ DIE_INIT_SLAVE_LEAVE,
+ DIE_KDEBUG_ENTER,
+ DIE_KDEBUG_LEAVE,
+ DIE_KDUMP_ENTER,
+ DIE_KDUMP_LEAVE,
+};
+
+#endif
diff --git a/arch/ia64/include/asm/kexec.h b/arch/ia64/include/asm/kexec.h
new file mode 100644
index 000000000..294b1e1eb
--- /dev/null
+++ b/arch/ia64/include/asm/kexec.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_KEXEC_H
+#define _ASM_IA64_KEXEC_H
+
+#include <asm/setup.h>
+
+/* Maximum physical address we can use pages from */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+/* Maximum address we can reach in physical address mode */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+/* Maximum address we can use for the control code buffer */
+#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+
+#define KEXEC_CONTROL_PAGE_SIZE (8192 + 8192 + 4096)
+
+/* The native architecture */
+#define KEXEC_ARCH KEXEC_ARCH_IA_64
+
+#define kexec_flush_icache_page(page) do { \
+ unsigned long page_addr = (unsigned long)page_address(page); \
+ flush_icache_range(page_addr, page_addr + PAGE_SIZE); \
+ } while(0)
+
+extern struct kimage *ia64_kimage;
+extern const unsigned int relocate_new_kernel_size;
+extern void relocate_new_kernel(unsigned long, unsigned long,
+ struct ia64_boot_param *, unsigned long);
+static inline void
+crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
+{
+}
+extern struct resource efi_memmap_res;
+extern struct resource boot_param_res;
+extern void kdump_smp_send_stop(void);
+extern void kdump_smp_send_init(void);
+extern void kexec_disable_iosapic(void);
+extern void crash_save_this_cpu(void);
+struct rsvd_region;
+extern unsigned long kdump_find_rsvd_region(unsigned long size,
+ struct rsvd_region *rsvd_regions, int n);
+extern void kdump_cpu_freeze(struct unw_frame_info *info, void *arg);
+extern int kdump_status[];
+extern atomic_t kdump_cpu_freezed;
+extern atomic_t kdump_in_progress;
+
+#endif /* _ASM_IA64_KEXEC_H */
diff --git a/arch/ia64/include/asm/kmap_types.h b/arch/ia64/include/asm/kmap_types.h
new file mode 100644
index 000000000..5c268cf7c
--- /dev/null
+++ b/arch/ia64/include/asm/kmap_types.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_KMAP_TYPES_H
+#define _ASM_IA64_KMAP_TYPES_H
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+#define __WITH_KM_FENCE
+#endif
+
+#include <asm-generic/kmap_types.h>
+
+#undef __WITH_KM_FENCE
+
+#endif /* _ASM_IA64_KMAP_TYPES_H */
diff --git a/arch/ia64/include/asm/kprobes.h b/arch/ia64/include/asm/kprobes.h
new file mode 100644
index 000000000..c5cf5e4fb
--- /dev/null
+++ b/arch/ia64/include/asm/kprobes.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_KPROBES_H
+#define _ASM_KPROBES_H
+/*
+ * Kernel Probes (KProbes)
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ * Copyright (C) Intel Corporation, 2005
+ *
+ * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
+ * <anil.s.keshavamurthy@intel.com> adapted from i386
+ */
+#include <asm-generic/kprobes.h>
+#include <asm/break.h>
+
+#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6)
+
+#ifdef CONFIG_KPROBES
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE 2 /* last half is for kprobe-booster */
+#define NOP_M_INST (long)(1<<27)
+#define BRL_INST(i1, i2) ((long)((0xcL << 37) | /* brl */ \
+ (0x1L << 12) | /* many */ \
+ (((i1) & 1) << 36) | ((i2) << 13))) /* imm */
+
+typedef union cmp_inst {
+ struct {
+ unsigned long long qp : 6;
+ unsigned long long p1 : 6;
+ unsigned long long c : 1;
+ unsigned long long r2 : 7;
+ unsigned long long r3 : 7;
+ unsigned long long p2 : 6;
+ unsigned long long ta : 1;
+ unsigned long long x2 : 2;
+ unsigned long long tb : 1;
+ unsigned long long opcode : 4;
+ unsigned long long reserved : 23;
+ }f;
+ unsigned long long l;
+} cmp_inst_t;
+
+struct kprobe;
+
+typedef struct _bundle {
+ struct {
+ unsigned long long template : 5;
+ unsigned long long slot0 : 41;
+ unsigned long long slot1_p0 : 64-46;
+ } quad0;
+ struct {
+ unsigned long long slot1_p1 : 41 - (64-46);
+ unsigned long long slot2 : 41;
+ } quad1;
+} __attribute__((__aligned__(16))) bundle_t;
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned long status;
+};
+
+#define MAX_PARAM_RSE_SIZE (0x60+0x60/0x3f)
+/* per-cpu kprobe control block */
+#define ARCH_PREV_KPROBE_SZ 2
+struct kprobe_ctlblk {
+ unsigned long kprobe_status;
+ unsigned long *bsp;
+ unsigned long cfm;
+ atomic_t prev_kprobe_index;
+ struct prev_kprobe prev_kprobe[ARCH_PREV_KPROBE_SZ];
+};
+
+#define kretprobe_blacklist_size 0
+
+#define SLOT0_OPCODE_SHIFT (37)
+#define SLOT1_p1_OPCODE_SHIFT (37 - (64-46))
+#define SLOT2_OPCODE_SHIFT (37)
+
+#define INDIRECT_CALL_OPCODE (1)
+#define IP_RELATIVE_CALL_OPCODE (5)
+#define IP_RELATIVE_BRANCH_OPCODE (4)
+#define IP_RELATIVE_PREDICT_OPCODE (7)
+#define LONG_BRANCH_OPCODE (0xC)
+#define LONG_CALL_OPCODE (0xD)
+#define flush_insn_slot(p) do { } while (0)
+
+typedef struct kprobe_opcode {
+ bundle_t bundle;
+} kprobe_opcode_t;
+
+/* Architecture specific copy of original instruction*/
+struct arch_specific_insn {
+ /* copy of the instruction to be emulated */
+ kprobe_opcode_t *insn;
+ #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1
+ #define INST_FLAG_FIX_BRANCH_REG 2
+ #define INST_FLAG_BREAK_INST 4
+ #define INST_FLAG_BOOSTABLE 8
+ unsigned long inst_flag;
+ unsigned short target_br_reg;
+ unsigned short slot;
+};
+
+extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
+extern void invalidate_stacked_regs(void);
+extern void flush_register_stack(void);
+extern void arch_remove_kprobe(struct kprobe *p);
+
+#endif /* CONFIG_KPROBES */
+#endif /* _ASM_KPROBES_H */
diff --git a/arch/ia64/include/asm/kregs.h b/arch/ia64/include/asm/kregs.h
new file mode 100644
index 000000000..44113b75e
--- /dev/null
+++ b/arch/ia64/include/asm/kregs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_KREGS_H
+#define _ASM_IA64_KREGS_H
+
+/*
+ * Copyright (C) 2001-2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+/*
+ * This file defines the kernel register usage convention used by Linux/ia64.
+ */
+
+/*
+ * Kernel registers:
+ */
+#define IA64_KR_IO_BASE 0 /* ar.k0: legacy I/O base address */
+#define IA64_KR_TSSD 1 /* ar.k1: IVE uses this as the TSSD */
+#define IA64_KR_PER_CPU_DATA 3 /* ar.k3: physical per-CPU base */
+#define IA64_KR_CURRENT_STACK 4 /* ar.k4: what's mapped in IA64_TR_CURRENT_STACK */
+#define IA64_KR_FPU_OWNER 5 /* ar.k5: fpu-owner (UP only, at the moment) */
+#define IA64_KR_CURRENT 6 /* ar.k6: "current" task pointer */
+#define IA64_KR_PT_BASE 7 /* ar.k7: page table base address (physical) */
+
+#define _IA64_KR_PASTE(x,y) x##y
+#define _IA64_KR_PREFIX(n) _IA64_KR_PASTE(ar.k, n)
+#define IA64_KR(n) _IA64_KR_PREFIX(IA64_KR_##n)
+
+/*
+ * Translation registers:
+ */
+#define IA64_TR_KERNEL 0 /* itr0, dtr0: maps kernel image (code & data) */
+#define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */
+#define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */
+
+#define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/
+#define IA64_TR_ALLOC_MAX 64 /* Max number for dynamic use*/
+
+/* Processor status register bits: */
+#define IA64_PSR_BE_BIT 1
+#define IA64_PSR_UP_BIT 2
+#define IA64_PSR_AC_BIT 3
+#define IA64_PSR_MFL_BIT 4
+#define IA64_PSR_MFH_BIT 5
+#define IA64_PSR_IC_BIT 13
+#define IA64_PSR_I_BIT 14
+#define IA64_PSR_PK_BIT 15
+#define IA64_PSR_DT_BIT 17
+#define IA64_PSR_DFL_BIT 18
+#define IA64_PSR_DFH_BIT 19
+#define IA64_PSR_SP_BIT 20
+#define IA64_PSR_PP_BIT 21
+#define IA64_PSR_DI_BIT 22
+#define IA64_PSR_SI_BIT 23
+#define IA64_PSR_DB_BIT 24
+#define IA64_PSR_LP_BIT 25
+#define IA64_PSR_TB_BIT 26
+#define IA64_PSR_RT_BIT 27
+/* The following are not affected by save_flags()/restore_flags(): */
+#define IA64_PSR_CPL0_BIT 32
+#define IA64_PSR_CPL1_BIT 33
+#define IA64_PSR_IS_BIT 34
+#define IA64_PSR_MC_BIT 35
+#define IA64_PSR_IT_BIT 36
+#define IA64_PSR_ID_BIT 37
+#define IA64_PSR_DA_BIT 38
+#define IA64_PSR_DD_BIT 39
+#define IA64_PSR_SS_BIT 40
+#define IA64_PSR_RI_BIT 41
+#define IA64_PSR_ED_BIT 43
+#define IA64_PSR_BN_BIT 44
+#define IA64_PSR_IA_BIT 45
+
+/* A mask of PSR bits that we generally don't want to inherit across a clone2() or an
+ execve(). Only list flags here that need to be cleared/set for BOTH clone2() and
+ execve(). */
+#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | IA64_PSR_LP | \
+ IA64_PSR_TB | IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
+ IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA)
+#define IA64_PSR_BITS_TO_SET (IA64_PSR_DFH | IA64_PSR_SP)
+
+#define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT)
+#define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT)
+#define IA64_PSR_AC (__IA64_UL(1) << IA64_PSR_AC_BIT)
+#define IA64_PSR_MFL (__IA64_UL(1) << IA64_PSR_MFL_BIT)
+#define IA64_PSR_MFH (__IA64_UL(1) << IA64_PSR_MFH_BIT)
+#define IA64_PSR_IC (__IA64_UL(1) << IA64_PSR_IC_BIT)
+#define IA64_PSR_I (__IA64_UL(1) << IA64_PSR_I_BIT)
+#define IA64_PSR_PK (__IA64_UL(1) << IA64_PSR_PK_BIT)
+#define IA64_PSR_DT (__IA64_UL(1) << IA64_PSR_DT_BIT)
+#define IA64_PSR_DFL (__IA64_UL(1) << IA64_PSR_DFL_BIT)
+#define IA64_PSR_DFH (__IA64_UL(1) << IA64_PSR_DFH_BIT)
+#define IA64_PSR_SP (__IA64_UL(1) << IA64_PSR_SP_BIT)
+#define IA64_PSR_PP (__IA64_UL(1) << IA64_PSR_PP_BIT)
+#define IA64_PSR_DI (__IA64_UL(1) << IA64_PSR_DI_BIT)
+#define IA64_PSR_SI (__IA64_UL(1) << IA64_PSR_SI_BIT)
+#define IA64_PSR_DB (__IA64_UL(1) << IA64_PSR_DB_BIT)
+#define IA64_PSR_LP (__IA64_UL(1) << IA64_PSR_LP_BIT)
+#define IA64_PSR_TB (__IA64_UL(1) << IA64_PSR_TB_BIT)
+#define IA64_PSR_RT (__IA64_UL(1) << IA64_PSR_RT_BIT)
+/* The following are not affected by save_flags()/restore_flags(): */
+#define IA64_PSR_CPL (__IA64_UL(3) << IA64_PSR_CPL0_BIT)
+#define IA64_PSR_IS (__IA64_UL(1) << IA64_PSR_IS_BIT)
+#define IA64_PSR_MC (__IA64_UL(1) << IA64_PSR_MC_BIT)
+#define IA64_PSR_IT (__IA64_UL(1) << IA64_PSR_IT_BIT)
+#define IA64_PSR_ID (__IA64_UL(1) << IA64_PSR_ID_BIT)
+#define IA64_PSR_DA (__IA64_UL(1) << IA64_PSR_DA_BIT)
+#define IA64_PSR_DD (__IA64_UL(1) << IA64_PSR_DD_BIT)
+#define IA64_PSR_SS (__IA64_UL(1) << IA64_PSR_SS_BIT)
+#define IA64_PSR_RI (__IA64_UL(3) << IA64_PSR_RI_BIT)
+#define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT)
+#define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT)
+#define IA64_PSR_IA (__IA64_UL(1) << IA64_PSR_IA_BIT)
+
+/* User mask bits: */
+#define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH)
+
+/* Default Control Register */
+#define IA64_DCR_PP_BIT 0 /* privileged performance monitor default */
+#define IA64_DCR_BE_BIT 1 /* big-endian default */
+#define IA64_DCR_LC_BIT 2 /* ia32 lock-check enable */
+#define IA64_DCR_DM_BIT 8 /* defer TLB miss faults */
+#define IA64_DCR_DP_BIT 9 /* defer page-not-present faults */
+#define IA64_DCR_DK_BIT 10 /* defer key miss faults */
+#define IA64_DCR_DX_BIT 11 /* defer key permission faults */
+#define IA64_DCR_DR_BIT 12 /* defer access right faults */
+#define IA64_DCR_DA_BIT 13 /* defer access bit faults */
+#define IA64_DCR_DD_BIT 14 /* defer debug faults */
+
+#define IA64_DCR_PP (__IA64_UL(1) << IA64_DCR_PP_BIT)
+#define IA64_DCR_BE (__IA64_UL(1) << IA64_DCR_BE_BIT)
+#define IA64_DCR_LC (__IA64_UL(1) << IA64_DCR_LC_BIT)
+#define IA64_DCR_DM (__IA64_UL(1) << IA64_DCR_DM_BIT)
+#define IA64_DCR_DP (__IA64_UL(1) << IA64_DCR_DP_BIT)
+#define IA64_DCR_DK (__IA64_UL(1) << IA64_DCR_DK_BIT)
+#define IA64_DCR_DX (__IA64_UL(1) << IA64_DCR_DX_BIT)
+#define IA64_DCR_DR (__IA64_UL(1) << IA64_DCR_DR_BIT)
+#define IA64_DCR_DA (__IA64_UL(1) << IA64_DCR_DA_BIT)
+#define IA64_DCR_DD (__IA64_UL(1) << IA64_DCR_DD_BIT)
+
+/* Interrupt Status Register */
+#define IA64_ISR_X_BIT 32 /* execute access */
+#define IA64_ISR_W_BIT 33 /* write access */
+#define IA64_ISR_R_BIT 34 /* read access */
+#define IA64_ISR_NA_BIT 35 /* non-access */
+#define IA64_ISR_SP_BIT 36 /* speculative load exception */
+#define IA64_ISR_RS_BIT 37 /* mandatory register-stack exception */
+#define IA64_ISR_IR_BIT 38 /* invalid register frame exception */
+#define IA64_ISR_CODE_MASK 0xf
+
+#define IA64_ISR_X (__IA64_UL(1) << IA64_ISR_X_BIT)
+#define IA64_ISR_W (__IA64_UL(1) << IA64_ISR_W_BIT)
+#define IA64_ISR_R (__IA64_UL(1) << IA64_ISR_R_BIT)
+#define IA64_ISR_NA (__IA64_UL(1) << IA64_ISR_NA_BIT)
+#define IA64_ISR_SP (__IA64_UL(1) << IA64_ISR_SP_BIT)
+#define IA64_ISR_RS (__IA64_UL(1) << IA64_ISR_RS_BIT)
+#define IA64_ISR_IR (__IA64_UL(1) << IA64_ISR_IR_BIT)
+
+/* ISR code field for non-access instructions */
+#define IA64_ISR_CODE_TPA 0
+#define IA64_ISR_CODE_FC 1
+#define IA64_ISR_CODE_PROBE 2
+#define IA64_ISR_CODE_TAK 3
+#define IA64_ISR_CODE_LFETCH 4
+#define IA64_ISR_CODE_PROBEF 5
+
+#endif /* _ASM_IA64_kREGS_H */
diff --git a/arch/ia64/include/asm/libata-portmap.h b/arch/ia64/include/asm/libata-portmap.h
new file mode 100644
index 000000000..757f84e5d
--- /dev/null
+++ b/arch/ia64/include/asm/libata-portmap.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_IA64_LIBATA_PORTMAP_H
+#define __ASM_IA64_LIBATA_PORTMAP_H
+
+#define ATA_PRIMARY_IRQ(dev) isa_irq_to_vector(14)
+
+#define ATA_SECONDARY_IRQ(dev) isa_irq_to_vector(15)
+
+#endif
diff --git a/arch/ia64/include/asm/linkage.h b/arch/ia64/include/asm/linkage.h
new file mode 100644
index 000000000..5178af560
--- /dev/null
+++ b/arch/ia64/include/asm/linkage.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#ifndef __ASSEMBLY__
+
+#define asmlinkage CPP_ASMLINKAGE __attribute__((syscall_linkage))
+
+#else
+
+#include <asm/asmmacro.h>
+
+#endif
+
+#define cond_syscall(x) asm(".weak\t" #x "#\n" #x "#\t=\tsys_ni_syscall#")
+#define SYSCALL_ALIAS(alias, name) \
+ asm ( #alias "# = " #name "#\n\t.globl " #alias "#")
+
+#endif
diff --git a/arch/ia64/include/asm/local.h b/arch/ia64/include/asm/local.h
new file mode 100644
index 000000000..c11c530f7
--- /dev/null
+++ b/arch/ia64/include/asm/local.h
@@ -0,0 +1 @@
+#include <asm-generic/local.h>
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
new file mode 100644
index 000000000..726df17f1
--- /dev/null
+++ b/arch/ia64/include/asm/mca.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * File: mca.h
+ * Purpose: Machine check handling specific defines
+ *
+ * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
+ * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
+ * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
+ * Copyright (C) Russ Anderson <rja@sgi.com>
+ */
+
+#ifndef _ASM_IA64_MCA_H
+#define _ASM_IA64_MCA_H
+
+#if !defined(__ASSEMBLY__)
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+#include <asm/param.h>
+#include <asm/sal.h>
+#include <asm/processor.h>
+#include <asm/mca_asm.h>
+
+#define IA64_MCA_RENDEZ_TIMEOUT (20 * 1000) /* value in milliseconds - 20 seconds */
+
+typedef struct ia64_fptr {
+ unsigned long fp;
+ unsigned long gp;
+} ia64_fptr_t;
+
+typedef union cmcv_reg_u {
+ u64 cmcv_regval;
+ struct {
+ u64 cmcr_vector : 8;
+ u64 cmcr_reserved1 : 4;
+ u64 cmcr_ignored1 : 1;
+ u64 cmcr_reserved2 : 3;
+ u64 cmcr_mask : 1;
+ u64 cmcr_ignored2 : 47;
+ } cmcv_reg_s;
+
+} cmcv_reg_t;
+
+#define cmcv_mask cmcv_reg_s.cmcr_mask
+#define cmcv_vector cmcv_reg_s.cmcr_vector
+
+enum {
+ IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0,
+ IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1,
+ IA64_MCA_RENDEZ_CHECKIN_INIT = 0x2,
+ IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA = 0x3,
+};
+
+/* Information maintained by the MC infrastructure */
+typedef struct ia64_mc_info_s {
+ u64 imi_mca_handler;
+ size_t imi_mca_handler_size;
+ u64 imi_monarch_init_handler;
+ size_t imi_monarch_init_handler_size;
+ u64 imi_slave_init_handler;
+ size_t imi_slave_init_handler_size;
+ u8 imi_rendez_checkin[NR_CPUS];
+
+} ia64_mc_info_t;
+
+/* Handover state from SAL to OS and vice versa, for both MCA and INIT events.
+ * Besides the handover state, it also contains some saved registers from the
+ * time of the event.
+ * Note: mca_asm.S depends on the precise layout of this structure.
+ */
+
+struct ia64_sal_os_state {
+
+ /* SAL to OS */
+ unsigned long os_gp; /* GP of the os registered with the SAL, physical */
+ unsigned long pal_proc; /* PAL_PROC entry point, physical */
+ unsigned long sal_proc; /* SAL_PROC entry point, physical */
+ unsigned long rv_rc; /* MCA - Rendezvous state, INIT - reason code */
+ unsigned long proc_state_param; /* from R18 */
+ unsigned long monarch; /* 1 for a monarch event, 0 for a slave */
+
+ /* common */
+ unsigned long sal_ra; /* Return address in SAL, physical */
+ unsigned long sal_gp; /* GP of the SAL - physical */
+ pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */
+ /* Previous values of IA64_KR(CURRENT) and IA64_KR(CURRENT_STACK).
+ * Note: if the MCA/INIT recovery code wants to resume to a new context
+ * then it must change these values to reflect the new kernel stack.
+ */
+ unsigned long prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */
+ unsigned long prev_IA64_KR_CURRENT_STACK;
+ struct task_struct *prev_task; /* previous task, NULL if it is not useful */
+ /* Some interrupt registers are not saved in minstate, pt_regs or
+ * switch_stack. Because MCA/INIT can occur when interrupts are
+ * disabled, we need to save the additional interrupt registers over
+ * MCA/INIT and resume.
+ */
+ unsigned long isr;
+ unsigned long ifa;
+ unsigned long itir;
+ unsigned long iipa;
+ unsigned long iim;
+ unsigned long iha;
+
+ /* OS to SAL */
+ unsigned long os_status; /* OS status to SAL, enum below */
+ unsigned long context; /* 0 if return to same context
+ 1 if return to new context */
+
+ /* I-resources */
+ unsigned long iip;
+ unsigned long ipsr;
+ unsigned long ifs;
+};
+
+enum {
+ IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */
+ IA64_MCA_WARM_BOOT = -1, /* Warm boot of the system need from SAL */
+ IA64_MCA_COLD_BOOT = -2, /* Cold boot of the system need from SAL */
+ IA64_MCA_HALT = -3 /* System to be halted by SAL */
+};
+
+enum {
+ IA64_INIT_RESUME = 0x0, /* Resume after return from INIT */
+ IA64_INIT_WARM_BOOT = -1, /* Warm boot of the system need from SAL */
+};
+
+enum {
+ IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */
+ IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */
+};
+
+/* Per-CPU MCA state that is too big for normal per-CPU variables. */
+
+struct ia64_mca_cpu {
+ u64 mca_stack[KERNEL_STACK_SIZE/8];
+ u64 init_stack[KERNEL_STACK_SIZE/8];
+};
+
+/* Array of physical addresses of each CPU's MCA area. */
+extern unsigned long __per_cpu_mca[NR_CPUS];
+
+extern int cpe_vector;
+extern int ia64_cpe_irq;
+extern void ia64_mca_init(void);
+extern void ia64_mca_irq_init(void);
+extern void ia64_mca_cpu_init(void *);
+extern void ia64_os_mca_dispatch(void);
+extern void ia64_os_mca_dispatch_end(void);
+extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *);
+extern void ia64_init_handler(struct pt_regs *,
+ struct switch_stack *,
+ struct ia64_sal_os_state *);
+extern void ia64_os_init_on_kdump(void);
+extern void ia64_monarch_init_handler(void);
+extern void ia64_slave_init_handler(void);
+extern void ia64_mca_cmc_vector_setup(void);
+extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *));
+extern void ia64_unreg_MCA_extension(void);
+extern unsigned long ia64_get_rnat(unsigned long *);
+extern void ia64_set_psr_mc(void);
+extern void ia64_mca_printk(const char * fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+struct ia64_mca_notify_die {
+ struct ia64_sal_os_state *sos;
+ int *monarch_cpu;
+ int *data;
+};
+
+DECLARE_PER_CPU(u64, ia64_mca_pal_base);
+
+#else /* __ASSEMBLY__ */
+
+#define IA64_MCA_CORRECTED 0x0 /* Error has been corrected by OS_MCA */
+#define IA64_MCA_WARM_BOOT -1 /* Warm boot of the system need from SAL */
+#define IA64_MCA_COLD_BOOT -2 /* Cold boot of the system need from SAL */
+#define IA64_MCA_HALT -3 /* System to be halted by SAL */
+
+#define IA64_INIT_RESUME 0x0 /* Resume after return from INIT */
+#define IA64_INIT_WARM_BOOT -1 /* Warm boot of the system need from SAL */
+
+#define IA64_MCA_SAME_CONTEXT 0x0 /* SAL to return to same context */
+#define IA64_MCA_NEW_CONTEXT -1 /* SAL to return to new context */
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_IA64_MCA_H */
diff --git a/arch/ia64/include/asm/mca_asm.h b/arch/ia64/include/asm/mca_asm.h
new file mode 100644
index 000000000..e3ab1f41f
--- /dev/null
+++ b/arch/ia64/include/asm/mca_asm.h
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * File: mca_asm.h
+ * Purpose: Machine check handling specific defines
+ *
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
+ * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
+ * Copyright (C) 2000 Hewlett-Packard Co.
+ * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2002 Intel Corp.
+ * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
+ * Copyright (C) 2005 Silicon Graphics, Inc
+ * Copyright (C) 2005 Keith Owens <kaos@sgi.com>
+ */
+#ifndef _ASM_IA64_MCA_ASM_H
+#define _ASM_IA64_MCA_ASM_H
+
+#include <asm/percpu.h>
+
+#define PSR_IC 13
+#define PSR_I 14
+#define PSR_DT 17
+#define PSR_RT 27
+#define PSR_MC 35
+#define PSR_IT 36
+#define PSR_BN 44
+
+/*
+ * This macro converts a instruction virtual address to a physical address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ * 1. Lop off bits 61 thru 63 in the virtual address
+ */
+#define INST_VA_TO_PA(addr) \
+ dep addr = 0, addr, 61, 3
+/*
+ * This macro converts a data virtual address to a physical address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ * 1. Lop off bits 61 thru 63 in the virtual address
+ */
+#define DATA_VA_TO_PA(addr) \
+ tpa addr = addr
+/*
+ * This macro converts a data physical address to a virtual address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ * 1. Put 0x7 in bits 61 thru 63.
+ */
+#define DATA_PA_TO_VA(addr,temp) \
+ mov temp = 0x7 ;; \
+ dep addr = temp, addr, 61, 3
+
+#define GET_THIS_PADDR(reg, var) \
+ mov reg = IA64_KR(PER_CPU_DATA);; \
+ addl reg = THIS_CPU(var), reg
+
+/*
+ * This macro jumps to the instruction at the given virtual address
+ * and starts execution in physical mode with all the address
+ * translations turned off.
+ * 1. Save the current psr
+ * 2. Make sure that all the upper 32 bits are off
+ *
+ * 3. Clear the interrupt enable and interrupt state collection bits
+ * in the psr before updating the ipsr and iip.
+ *
+ * 4. Turn off the instruction, data and rse translation bits of the psr
+ * and store the new value into ipsr
+ * Also make sure that the interrupts are disabled.
+ * Ensure that we are in little endian mode.
+ * [psr.{rt, it, dt, i, be} = 0]
+ *
+ * 5. Get the physical address corresponding to the virtual address
+ * of the next instruction bundle and put it in iip.
+ * (Using magic numbers 24 and 40 in the deposint instruction since
+ * the IA64_SDK code directly maps to lower 24bits as physical address
+ * from a virtual address).
+ *
+ * 6. Do an rfi to move the values from ipsr to psr and iip to ip.
+ */
+#define PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
+ mov old_psr = psr; \
+ ;; \
+ dep old_psr = 0, old_psr, 32, 32; \
+ \
+ mov ar.rsc = 0 ; \
+ ;; \
+ srlz.d; \
+ mov temp2 = ar.bspstore; \
+ ;; \
+ DATA_VA_TO_PA(temp2); \
+ ;; \
+ mov temp1 = ar.rnat; \
+ ;; \
+ mov ar.bspstore = temp2; \
+ ;; \
+ mov ar.rnat = temp1; \
+ mov temp1 = psr; \
+ mov temp2 = psr; \
+ ;; \
+ \
+ dep temp2 = 0, temp2, PSR_IC, 2; \
+ ;; \
+ mov psr.l = temp2; \
+ ;; \
+ srlz.d; \
+ dep temp1 = 0, temp1, 32, 32; \
+ ;; \
+ dep temp1 = 0, temp1, PSR_IT, 1; \
+ ;; \
+ dep temp1 = 0, temp1, PSR_DT, 1; \
+ ;; \
+ dep temp1 = 0, temp1, PSR_RT, 1; \
+ ;; \
+ dep temp1 = 0, temp1, PSR_I, 1; \
+ ;; \
+ dep temp1 = 0, temp1, PSR_IC, 1; \
+ ;; \
+ dep temp1 = -1, temp1, PSR_MC, 1; \
+ ;; \
+ mov cr.ipsr = temp1; \
+ ;; \
+ LOAD_PHYSICAL(p0, temp2, start_addr); \
+ ;; \
+ mov cr.iip = temp2; \
+ mov cr.ifs = r0; \
+ DATA_VA_TO_PA(sp); \
+ DATA_VA_TO_PA(gp); \
+ ;; \
+ srlz.i; \
+ ;; \
+ nop 1; \
+ nop 2; \
+ nop 1; \
+ nop 2; \
+ rfi; \
+ ;;
+
+/*
+ * This macro jumps to the instruction at the given virtual address
+ * and starts execution in virtual mode with all the address
+ * translations turned on.
+ * 1. Get the old saved psr
+ *
+ * 2. Clear the interrupt state collection bit in the current psr.
+ *
+ * 3. Set the instruction translation bit back in the old psr
+ * Note we have to do this since we are right now saving only the
+ * lower 32-bits of old psr.(Also the old psr has the data and
+ * rse translation bits on)
+ *
+ * 4. Set ipsr to this old_psr with "it" bit set and "bn" = 1.
+ *
+ * 5. Reset the current thread pointer (r13).
+ *
+ * 6. Set iip to the virtual address of the next instruction bundle.
+ *
+ * 7. Do an rfi to move ipsr to psr and iip to ip.
+ */
+
+#define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
+ mov temp2 = psr; \
+ ;; \
+ mov old_psr = temp2; \
+ ;; \
+ dep temp2 = 0, temp2, PSR_IC, 2; \
+ ;; \
+ mov psr.l = temp2; \
+ mov ar.rsc = 0; \
+ ;; \
+ srlz.d; \
+ mov r13 = ar.k6; \
+ mov temp2 = ar.bspstore; \
+ ;; \
+ DATA_PA_TO_VA(temp2,temp1); \
+ ;; \
+ mov temp1 = ar.rnat; \
+ ;; \
+ mov ar.bspstore = temp2; \
+ ;; \
+ mov ar.rnat = temp1; \
+ ;; \
+ mov temp1 = old_psr; \
+ ;; \
+ mov temp2 = 1; \
+ ;; \
+ dep temp1 = temp2, temp1, PSR_IC, 1; \
+ ;; \
+ dep temp1 = temp2, temp1, PSR_IT, 1; \
+ ;; \
+ dep temp1 = temp2, temp1, PSR_DT, 1; \
+ ;; \
+ dep temp1 = temp2, temp1, PSR_RT, 1; \
+ ;; \
+ dep temp1 = temp2, temp1, PSR_BN, 1; \
+ ;; \
+ \
+ mov cr.ipsr = temp1; \
+ movl temp2 = start_addr; \
+ ;; \
+ mov cr.iip = temp2; \
+ movl gp = __gp \
+ ;; \
+ DATA_PA_TO_VA(sp, temp1); \
+ srlz.i; \
+ ;; \
+ nop 1; \
+ nop 2; \
+ nop 1; \
+ rfi \
+ ;;
+
+/*
+ * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel
+ * stacks, except that the SAL/OS state and a switch_stack are stored near the
+ * top of the MCA/INIT stack. To support concurrent entry to MCA or INIT, as
+ * well as MCA over INIT, each event needs its own SAL/OS state. All entries
+ * are 16 byte aligned.
+ *
+ * +---------------------------+
+ * | pt_regs |
+ * +---------------------------+
+ * | switch_stack |
+ * +---------------------------+
+ * | SAL/OS state |
+ * +---------------------------+
+ * | 16 byte scratch area |
+ * +---------------------------+ <-------- SP at start of C MCA handler
+ * | ..... |
+ * +---------------------------+
+ * | RBS for MCA/INIT handler |
+ * +---------------------------+
+ * | struct task for MCA/INIT |
+ * +---------------------------+ <-------- Bottom of MCA/INIT stack
+ */
+
+#define ALIGN16(x) ((x)&~15)
+#define MCA_PT_REGS_OFFSET ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE)
+#define MCA_SWITCH_STACK_OFFSET ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE)
+#define MCA_SOS_OFFSET ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE)
+#define MCA_SP_OFFSET ALIGN16(MCA_SOS_OFFSET-16)
+
+#endif /* _ASM_IA64_MCA_ASM_H */
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
new file mode 100644
index 000000000..092f1c91b
--- /dev/null
+++ b/arch/ia64/include/asm/meminit.h
@@ -0,0 +1,74 @@
+#ifndef meminit_h
+#define meminit_h
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+
+/*
+ * Entries defined so far:
+ * - boot param structure itself
+ * - memory map
+ * - initrd (optional)
+ * - command line string
+ * - kernel code & data
+ * - crash dumping code reserved region
+ * - Kernel memory map built from EFI memory map
+ * - ELF core header
+ *
+ * More could be added if necessary
+ */
+#define IA64_MAX_RSVD_REGIONS 9
+
+struct rsvd_region {
+ u64 start; /* virtual address of beginning of element */
+ u64 end; /* virtual address of end of element + 1 */
+};
+
+extern struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
+extern int num_rsvd_regions;
+
+extern void find_memory (void);
+extern void reserve_memory (void);
+extern void find_initrd (void);
+extern int filter_rsvd_memory (u64 start, u64 end, void *arg);
+extern int filter_memory (u64 start, u64 end, void *arg);
+extern unsigned long efi_memmap_init(u64 *s, u64 *e);
+extern int find_max_min_low_pfn (u64, u64, void *);
+
+extern unsigned long vmcore_find_descriptor_size(unsigned long address);
+extern int reserve_elfcorehdr(u64 *start, u64 *end);
+
+/*
+ * For rounding an address to the next IA64_GRANULE_SIZE or order
+ */
+#define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1))
+#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
+
+#ifdef CONFIG_NUMA
+ extern void call_pernode_memory (unsigned long start, unsigned long len, void *func);
+#else
+# define call_pernode_memory(start, len, func) (*func)(start, len, 0)
+#endif
+
+#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
+
+extern int register_active_ranges(u64 start, u64 len, int nid);
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
+ extern unsigned long VMALLOC_END;
+ extern struct page *vmem_map;
+ extern int find_largest_hole(u64 start, u64 end, void *arg);
+ extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
+ extern int vmemmap_find_next_valid_pfn(int, int);
+#else
+static inline int vmemmap_find_next_valid_pfn(int node, int i)
+{
+ return i + 1;
+}
+#endif
+#endif /* meminit_h */
diff --git a/arch/ia64/include/asm/mman.h b/arch/ia64/include/asm/mman.h
new file mode 100644
index 000000000..15cf100ad
--- /dev/null
+++ b/arch/ia64/include/asm/mman.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Based on <asm-i386/mman.h>.
+ *
+ * Modified 1998-2000, 2002
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#ifndef _ASM_IA64_MMAN_H
+#define _ASM_IA64_MMAN_H
+
+#include <uapi/asm/mman.h>
+
+#ifndef __ASSEMBLY__
+#define arch_mmap_check ia64_mmap_check
+int ia64_mmap_check(unsigned long addr, unsigned long len,
+ unsigned long flags);
+#endif
+#endif /* _ASM_IA64_MMAN_H */
diff --git a/arch/ia64/include/asm/mmiowb.h b/arch/ia64/include/asm/mmiowb.h
new file mode 100644
index 000000000..d67aab4ea
--- /dev/null
+++ b/arch/ia64/include/asm/mmiowb.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_IA64_MMIOWB_H
+#define _ASM_IA64_MMIOWB_H
+
+/**
+ * mmiowb - I/O write barrier
+ *
+ * Ensure ordering of I/O space writes. This will make sure that writes
+ * following the barrier will arrive after all previous writes. For most
+ * ia64 platforms, this is a simple 'mf.a' instruction.
+ */
+#define mmiowb() ia64_mfa()
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* _ASM_IA64_MMIOWB_H */
diff --git a/arch/ia64/include/asm/mmu.h b/arch/ia64/include/asm/mmu.h
new file mode 100644
index 000000000..f75f44f53
--- /dev/null
+++ b/arch/ia64/include/asm/mmu.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __MMU_H
+#define __MMU_H
+
+/*
+ * Type for a context number. We declare it volatile to ensure proper
+ * ordering when it's accessed outside of spinlock'd critical sections
+ * (e.g., as done in activate_mm() and init_new_context()).
+ */
+typedef volatile unsigned long mm_context_t;
+
+typedef unsigned long nv_mm_context_t;
+
+#endif
diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h
new file mode 100644
index 000000000..2da0e2eb0
--- /dev/null
+++ b/arch/ia64/include/asm/mmu_context.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_MMU_CONTEXT_H
+#define _ASM_IA64_MMU_CONTEXT_H
+
+/*
+ * Copyright (C) 1998-2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/*
+ * Routines to manage the allocation of task context numbers. Task context
+ * numbers are used to reduce or eliminate the need to perform TLB flushes
+ * due to context switches. Context numbers are implemented using ia-64
+ * region ids. Since the IA-64 TLB does not consider the region number when
+ * performing a TLB lookup, we need to assign a unique region id to each
+ * region in a process. We use the least significant three bits in aregion
+ * id for this purpose.
+ */
+
+#define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
+
+#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
+
+# include <asm/page.h>
+# ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+#include <linux/percpu.h>
+#include <linux/sched.h>
+#include <linux/mm_types.h>
+#include <linux/spinlock.h>
+
+#include <asm/processor.h>
+#include <asm-generic/mm_hooks.h>
+
+struct ia64_ctx {
+ spinlock_t lock;
+ unsigned int next; /* next context number to use */
+ unsigned int limit; /* available free range */
+ unsigned int max_ctx; /* max. context value supported by all CPUs */
+ /* call wrap_mmu_context when next >= max */
+ unsigned long *bitmap; /* bitmap size is max_ctx+1 */
+ unsigned long *flushmap;/* pending rid to be flushed */
+};
+
+extern struct ia64_ctx ia64_ctx;
+DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
+
+extern void mmu_context_init (void);
+extern void wrap_mmu_context (struct mm_struct *mm);
+
+static inline void
+enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+/*
+ * When the context counter wraps around all TLBs need to be flushed because
+ * an old context number might have been reused. This is signalled by the
+ * ia64_need_tlb_flush per-CPU variable, which is checked in the routine
+ * below. Called by activate_mm(). <efocht@ess.nec.de>
+ */
+static inline void
+delayed_tlb_flush (void)
+{
+ extern void local_flush_tlb_all (void);
+ unsigned long flags;
+
+ if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
+ spin_lock_irqsave(&ia64_ctx.lock, flags);
+ if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
+ local_flush_tlb_all();
+ __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
+ }
+ spin_unlock_irqrestore(&ia64_ctx.lock, flags);
+ }
+}
+
+static inline nv_mm_context_t
+get_mmu_context (struct mm_struct *mm)
+{
+ unsigned long flags;
+ nv_mm_context_t context = mm->context;
+
+ if (likely(context))
+ goto out;
+
+ spin_lock_irqsave(&ia64_ctx.lock, flags);
+ /* re-check, now that we've got the lock: */
+ context = mm->context;
+ if (context == 0) {
+ cpumask_clear(mm_cpumask(mm));
+ if (ia64_ctx.next >= ia64_ctx.limit) {
+ ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
+ ia64_ctx.max_ctx, ia64_ctx.next);
+ ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
+ ia64_ctx.max_ctx, ia64_ctx.next);
+ if (ia64_ctx.next >= ia64_ctx.max_ctx)
+ wrap_mmu_context(mm);
+ }
+ mm->context = context = ia64_ctx.next++;
+ __set_bit(context, ia64_ctx.bitmap);
+ }
+ spin_unlock_irqrestore(&ia64_ctx.lock, flags);
+out:
+ /*
+ * Ensure we're not starting to use "context" before any old
+ * uses of it are gone from our TLB.
+ */
+ delayed_tlb_flush();
+
+ return context;
+}
+
+/*
+ * Initialize context number to some sane value. MM is guaranteed to be a
+ * brand-new address-space, so no TLB flushing is needed, ever.
+ */
+static inline int
+init_new_context (struct task_struct *p, struct mm_struct *mm)
+{
+ mm->context = 0;
+ return 0;
+}
+
+static inline void
+destroy_context (struct mm_struct *mm)
+{
+ /* Nothing to do. */
+}
+
+static inline void
+reload_context (nv_mm_context_t context)
+{
+ unsigned long rid;
+ unsigned long rid_incr = 0;
+ unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
+
+ old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
+ rid = context << 3; /* make space for encoding the region number */
+ rid_incr = 1 << 8;
+
+ /* encode the region id, preferred page size, and VHPT enable bit: */
+ rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
+ rr1 = rr0 + 1*rid_incr;
+ rr2 = rr0 + 2*rid_incr;
+ rr3 = rr0 + 3*rid_incr;
+ rr4 = rr0 + 4*rid_incr;
+#ifdef CONFIG_HUGETLB_PAGE
+ rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
+
+# if RGN_HPAGE != 4
+# error "reload_context assumes RGN_HPAGE is 4"
+# endif
+#endif
+
+ ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4);
+ ia64_srlz_i(); /* srlz.i implies srlz.d */
+}
+
+/*
+ * Must be called with preemption off
+ */
+static inline void
+activate_context (struct mm_struct *mm)
+{
+ nv_mm_context_t context;
+
+ do {
+ context = get_mmu_context(mm);
+ if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+ reload_context(context);
+ /*
+ * in the unlikely event of a TLB-flush by another thread,
+ * redo the load.
+ */
+ } while (unlikely(context != mm->context));
+}
+
+#define deactivate_mm(tsk,mm) do { } while (0)
+
+/*
+ * Switch from address space PREV to address space NEXT.
+ */
+static inline void
+activate_mm (struct mm_struct *prev, struct mm_struct *next)
+{
+ /*
+ * We may get interrupts here, but that's OK because interrupt
+ * handlers cannot touch user-space.
+ */
+ ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
+ activate_context(next);
+}
+
+#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
+
+# endif /* ! __ASSEMBLY__ */
+#endif /* _ASM_IA64_MMU_CONTEXT_H */
diff --git a/arch/ia64/include/asm/mmzone.h b/arch/ia64/include/asm/mmzone.h
new file mode 100644
index 000000000..767201f66
--- /dev/null
+++ b/arch/ia64/include/asm/mmzone.h
@@ -0,0 +1,35 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000,2003 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2002 NEC Corp.
+ * Copyright (c) 2002 Erich Focht <efocht@ess.nec.de>
+ * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
+ */
+#ifndef _ASM_IA64_MMZONE_H
+#define _ASM_IA64_MMZONE_H
+
+#include <linux/numa.h>
+#include <asm/page.h>
+#include <asm/meminit.h>
+
+#ifdef CONFIG_NUMA
+
+static inline int pfn_to_nid(unsigned long pfn)
+{
+ extern int paddr_to_nid(unsigned long);
+ int nid = paddr_to_nid(pfn << PAGE_SHIFT);
+ if (nid < 0)
+ return 0;
+ else
+ return nid;
+}
+
+#define MAX_PHYSNODE_ID 2048
+#endif /* CONFIG_NUMA */
+
+#define NR_NODE_MEMBLKS (MAX_NUMNODES * 4)
+
+#endif /* _ASM_IA64_MMZONE_H */
diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h
new file mode 100644
index 000000000..7271b9c5f
--- /dev/null
+++ b/arch/ia64/include/asm/module.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_MODULE_H
+#define _ASM_IA64_MODULE_H
+
+#include <asm-generic/module.h>
+
+/*
+ * IA-64-specific support for kernel module loader.
+ *
+ * Copyright (C) 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+struct elf64_shdr; /* forward declration */
+
+struct mod_arch_specific {
+ /* Used only at module load time. */
+ struct elf64_shdr *core_plt; /* core PLT section */
+ struct elf64_shdr *init_plt; /* init PLT section */
+ struct elf64_shdr *got; /* global offset table */
+ struct elf64_shdr *opd; /* official procedure descriptors */
+ struct elf64_shdr *unwind; /* unwind-table section */
+ unsigned long gp; /* global-pointer for module */
+ unsigned int next_got_entry; /* index of next available got entry */
+
+ /* Used at module run and cleanup time. */
+ void *core_unw_table; /* core unwind-table cookie returned by unwinder */
+ void *init_unw_table; /* init unwind-table cookie returned by unwinder */
+ void *opd_addr; /* symbolize uses .opd to get to actual function */
+ unsigned long opd_size;
+};
+
+#define ARCH_SHF_SMALL SHF_IA_64_SHORT
+
+#endif /* _ASM_IA64_MODULE_H */
diff --git a/arch/ia64/include/asm/module.lds.h b/arch/ia64/include/asm/module.lds.h
new file mode 100644
index 000000000..eff68f362
--- /dev/null
+++ b/arch/ia64/include/asm/module.lds.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+SECTIONS {
+ /* Group unwind sections into a single section: */
+ .IA_64.unwind_info : { *(.IA_64.unwind_info*) }
+ .IA_64.unwind : { *(.IA_64.unwind*) }
+ /*
+ * Create place-holder sections to hold the PLTs, GOT, and
+ * official procedure-descriptors (.opd).
+ */
+ .core.plt : { BYTE(0) }
+ .init.plt : { BYTE(0) }
+ .got : { BYTE(0) }
+ .opd : { BYTE(0) }
+}
diff --git a/arch/ia64/include/asm/msidef.h b/arch/ia64/include/asm/msidef.h
new file mode 100644
index 000000000..18d0e4226
--- /dev/null
+++ b/arch/ia64/include/asm/msidef.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _IA64_MSI_DEF_H
+#define _IA64_MSI_DEF_H
+
+/*
+ * Shifts for APIC-based data
+ */
+
+#define MSI_DATA_VECTOR_SHIFT 0
+#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
+#define MSI_DATA_VECTOR_MASK 0xffffff00
+
+#define MSI_DATA_DELIVERY_MODE_SHIFT 8
+#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
+#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
+
+#define MSI_DATA_LEVEL_SHIFT 14
+#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
+#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
+
+#define MSI_DATA_TRIGGER_SHIFT 15
+#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
+#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
+
+/*
+ * Shift/mask fields for APIC-based bus address
+ */
+
+#define MSI_ADDR_DEST_ID_SHIFT 4
+#define MSI_ADDR_HEADER 0xfee00000
+
+#define MSI_ADDR_DEST_ID_MASK 0xfff0000f
+#define MSI_ADDR_DEST_ID_CPU(cpu) ((cpu) << MSI_ADDR_DEST_ID_SHIFT)
+
+#define MSI_ADDR_DEST_MODE_SHIFT 2
+#define MSI_ADDR_DEST_MODE_PHYS (0 << MSI_ADDR_DEST_MODE_SHIFT)
+#define MSI_ADDR_DEST_MODE_LOGIC (1 << MSI_ADDR_DEST_MODE_SHIFT)
+
+#define MSI_ADDR_REDIRECTION_SHIFT 3
+#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
+#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
+
+#endif/* _IA64_MSI_DEF_H */
diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h
new file mode 100644
index 000000000..e08662396
--- /dev/null
+++ b/arch/ia64/include/asm/native/inst.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/******************************************************************************
+ * arch/ia64/include/asm/native/inst.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ */
+
+#define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN
+
+#define MOV_FROM_IFA(reg) \
+ mov reg = cr.ifa
+
+#define MOV_FROM_ITIR(reg) \
+ mov reg = cr.itir
+
+#define MOV_FROM_ISR(reg) \
+ mov reg = cr.isr
+
+#define MOV_FROM_IHA(reg) \
+ mov reg = cr.iha
+
+#define MOV_FROM_IPSR(pred, reg) \
+(pred) mov reg = cr.ipsr
+
+#define MOV_FROM_IIM(reg) \
+ mov reg = cr.iim
+
+#define MOV_FROM_IIP(reg) \
+ mov reg = cr.iip
+
+#define MOV_FROM_IVR(reg, clob) \
+ mov reg = cr.ivr
+
+#define MOV_FROM_PSR(pred, reg, clob) \
+(pred) mov reg = psr
+
+#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
+(pred) mov reg = ar.itc
+
+#define MOV_TO_IFA(reg, clob) \
+ mov cr.ifa = reg
+
+#define MOV_TO_ITIR(pred, reg, clob) \
+(pred) mov cr.itir = reg
+
+#define MOV_TO_IHA(pred, reg, clob) \
+(pred) mov cr.iha = reg
+
+#define MOV_TO_IPSR(pred, reg, clob) \
+(pred) mov cr.ipsr = reg
+
+#define MOV_TO_IFS(pred, reg, clob) \
+(pred) mov cr.ifs = reg
+
+#define MOV_TO_IIP(reg, clob) \
+ mov cr.iip = reg
+
+#define MOV_TO_KR(kr, reg, clob0, clob1) \
+ mov IA64_KR(kr) = reg
+
+#define ITC_I(pred, reg, clob) \
+(pred) itc.i reg
+
+#define ITC_D(pred, reg, clob) \
+(pred) itc.d reg
+
+#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
+(pred_i) itc.i reg; \
+(pred_d) itc.d reg
+
+#define THASH(pred, reg0, reg1, clob) \
+(pred) thash reg0 = reg1
+
+#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
+ ssm psr.ic | PSR_DEFAULT_BITS \
+ ;; \
+ srlz.i /* guarantee that interruption collectin is on */ \
+ ;;
+
+#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
+ ssm psr.ic \
+ ;; \
+ srlz.d
+
+#define RSM_PSR_IC(clob) \
+ rsm psr.ic
+
+#define SSM_PSR_I(pred, pred_clob, clob) \
+(pred) ssm psr.i
+
+#define RSM_PSR_I(pred, clob0, clob1) \
+(pred) rsm psr.i
+
+#define RSM_PSR_I_IC(clob0, clob1, clob2) \
+ rsm psr.i | psr.ic
+
+#define RSM_PSR_DT \
+ rsm psr.dt
+
+#define RSM_PSR_BE_I(clob0, clob1) \
+ rsm psr.be | psr.i
+
+#define SSM_PSR_DT_AND_SRLZ_I \
+ ssm psr.dt \
+ ;; \
+ srlz.i
+
+#define BSW_0(clob0, clob1, clob2) \
+ bsw.0
+
+#define BSW_1(clob0, clob1) \
+ bsw.1
+
+#define COVER \
+ cover
+
+#define RFI \
+ rfi
diff --git a/arch/ia64/include/asm/native/irq.h b/arch/ia64/include/asm/native/irq.h
new file mode 100644
index 000000000..aa74915f8
--- /dev/null
+++ b/arch/ia64/include/asm/native/irq.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/******************************************************************************
+ * arch/ia64/include/asm/native/irq.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ */
+
+#ifndef _ASM_IA64_NATIVE_IRQ_H
+#define _ASM_IA64_NATIVE_IRQ_H
+
+#define NR_VECTORS 256
+
+#if (NR_VECTORS + 32 * NR_CPUS) < 1024
+#define IA64_NATIVE_NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
+#else
+#define IA64_NATIVE_NR_IRQS 1024
+#endif
+
+#endif /* _ASM_IA64_NATIVE_IRQ_H */
diff --git a/arch/ia64/include/asm/native/patchlist.h b/arch/ia64/include/asm/native/patchlist.h
new file mode 100644
index 000000000..f13e76757
--- /dev/null
+++ b/arch/ia64/include/asm/native/patchlist.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/******************************************************************************
+ * arch/ia64/include/asm/native/inst.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ */
+
+#define __paravirt_start_gate_fsyscall_patchlist \
+ __ia64_native_start_gate_fsyscall_patchlist
+#define __paravirt_end_gate_fsyscall_patchlist \
+ __ia64_native_end_gate_fsyscall_patchlist
+#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \
+ __ia64_native_start_gate_brl_fsys_bubble_down_patchlist
+#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \
+ __ia64_native_end_gate_brl_fsys_bubble_down_patchlist
+#define __paravirt_start_gate_vtop_patchlist \
+ __ia64_native_start_gate_vtop_patchlist
+#define __paravirt_end_gate_vtop_patchlist \
+ __ia64_native_end_gate_vtop_patchlist
+#define __paravirt_start_gate_mckinley_e9_patchlist \
+ __ia64_native_start_gate_mckinley_e9_patchlist
+#define __paravirt_end_gate_mckinley_e9_patchlist \
+ __ia64_native_end_gate_mckinley_e9_patchlist
diff --git a/arch/ia64/include/asm/nodedata.h b/arch/ia64/include/asm/nodedata.h
new file mode 100644
index 000000000..2fb337b0e
--- /dev/null
+++ b/arch/ia64/include/asm/nodedata.h
@@ -0,0 +1,63 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2002 NEC Corp.
+ * Copyright (c) 2002 Erich Focht <efocht@ess.nec.de>
+ * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
+ */
+#ifndef _ASM_IA64_NODEDATA_H
+#define _ASM_IA64_NODEDATA_H
+
+#include <linux/numa.h>
+
+#include <asm/percpu.h>
+#include <asm/mmzone.h>
+
+#ifdef CONFIG_NUMA
+
+/*
+ * Node Data. One of these structures is located on each node of a NUMA system.
+ */
+
+struct pglist_data;
+struct ia64_node_data {
+ short active_cpu_count;
+ short node;
+ struct pglist_data *pg_data_ptrs[MAX_NUMNODES];
+};
+
+
+/*
+ * Return a pointer to the node_data structure for the executing cpu.
+ */
+#define local_node_data (local_cpu_data->node_data)
+
+/*
+ * Given a node id, return a pointer to the pg_data_t for the node.
+ *
+ * NODE_DATA - should be used in all code not related to system
+ * initialization. It uses pernode data structures to minimize
+ * offnode memory references. However, these structure are not
+ * present during boot. This macro can be used once cpu_init
+ * completes.
+ */
+#define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid])
+
+/*
+ * LOCAL_DATA_ADDR - This is to calculate the address of other node's
+ * "local_node_data" at hot-plug phase. The local_node_data
+ * is pointed by per_cpu_page. Kernel usually use it for
+ * just executing cpu. However, when new node is hot-added,
+ * the addresses of local data for other nodes are necessary
+ * to update all of them.
+ */
+#define LOCAL_DATA_ADDR(pgdat) \
+ ((struct ia64_node_data *)((u64)(pgdat) + \
+ L1_CACHE_ALIGN(sizeof(struct pglist_data))))
+
+#endif /* CONFIG_NUMA */
+
+#endif /* _ASM_IA64_NODEDATA_H */
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
new file mode 100644
index 000000000..c5c253cb9
--- /dev/null
+++ b/arch/ia64/include/asm/numa.h
@@ -0,0 +1,83 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * This file contains NUMA specific prototypes and definitions.
+ *
+ * 2002/08/05 Erich Focht <efocht@ess.nec.de>
+ *
+ */
+#ifndef _ASM_IA64_NUMA_H
+#define _ASM_IA64_NUMA_H
+
+
+#ifdef CONFIG_NUMA
+
+#include <linux/cache.h>
+#include <linux/cpumask.h>
+#include <linux/numa.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+
+#include <asm/mmzone.h>
+
+extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
+extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
+extern pg_data_t *pgdat_list[MAX_NUMNODES];
+
+/* Stuff below this line could be architecture independent */
+
+extern int num_node_memblks; /* total number of memory chunks */
+
+/*
+ * List of node memory chunks. Filled when parsing SRAT table to
+ * obtain information about memory nodes.
+*/
+
+struct node_memblk_s {
+ unsigned long start_paddr;
+ unsigned long size;
+ int nid; /* which logical node contains this chunk? */
+ int bank; /* which mem bank on this node */
+};
+
+struct node_cpuid_s {
+ u16 phys_id; /* id << 8 | eid */
+ int nid; /* logical node containing this CPU */
+};
+
+extern struct node_memblk_s node_memblk[NR_NODE_MEMBLKS];
+extern struct node_cpuid_s node_cpuid[NR_CPUS];
+
+/*
+ * ACPI 2.0 SLIT (System Locality Information Table)
+ * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
+ *
+ * This is a matrix with "distances" between nodes, they should be
+ * proportional to the memory access latency ratios.
+ */
+
+extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
+#define slit_distance(from,to) (numa_slit[(from) * MAX_NUMNODES + (to)])
+extern int __node_distance(int from, int to);
+#define node_distance(from,to) __node_distance(from, to)
+
+extern int paddr_to_nid(unsigned long paddr);
+
+#define local_nodeid (cpu_to_node_map[smp_processor_id()])
+
+#define numa_off 0
+
+extern void map_cpu_to_node(int cpu, int nid);
+extern void unmap_cpu_from_node(int cpu, int nid);
+extern void numa_clear_node(int cpu);
+
+#else /* !CONFIG_NUMA */
+#define map_cpu_to_node(cpu, nid) do{}while(0)
+#define unmap_cpu_from_node(cpu, nid) do{}while(0)
+#define paddr_to_nid(addr) 0
+#define numa_clear_node(cpu) do { } while (0)
+#endif /* CONFIG_NUMA */
+
+#endif /* _ASM_IA64_NUMA_H */
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
new file mode 100644
index 000000000..b69a5499d
--- /dev/null
+++ b/arch/ia64/include/asm/page.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_PAGE_H
+#define _ASM_IA64_PAGE_H
+/*
+ * Pagetable related stuff.
+ *
+ * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/intrinsics.h>
+#include <asm/types.h>
+
+/*
+ * The top three bits of an IA64 address are its Region Number.
+ * Different regions are assigned to different purposes.
+ */
+#define RGN_SHIFT (61)
+#define RGN_BASE(r) (__IA64_UL_CONST(r)<<RGN_SHIFT)
+#define RGN_BITS (RGN_BASE(-1))
+
+#define RGN_KERNEL 7 /* Identity mapped region */
+#define RGN_UNCACHED 6 /* Identity mapped I/O region */
+#define RGN_GATE 5 /* Gate page, Kernel text, etc */
+#define RGN_HPAGE 4 /* For Huge TLB pages */
+
+/*
+ * PAGE_SHIFT determines the actual kernel page size.
+ */
+#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
+# define PAGE_SHIFT 12
+#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
+# define PAGE_SHIFT 13
+#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
+# define PAGE_SHIFT 14
+#elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
+# define PAGE_SHIFT 16
+#else
+# error Unsupported page size!
+#endif
+
+#define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+#define PERCPU_PAGE_SHIFT 18 /* log2() of max. size of per-CPU area */
+#define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
+
+
+#ifdef CONFIG_HUGETLB_PAGE
+# define HPAGE_REGION_BASE RGN_BASE(RGN_HPAGE)
+# define HPAGE_SHIFT hpage_shift
+# define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */
+# define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
+# define HPAGE_MASK (~(HPAGE_SIZE - 1))
+
+# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif /* CONFIG_HUGETLB_PAGE */
+
+#ifdef __ASSEMBLY__
+# define __pa(x) ((x) - PAGE_OFFSET)
+# define __va(x) ((x) + PAGE_OFFSET)
+#else /* !__ASSEMBLY */
+# define STRICT_MM_TYPECHECKS
+
+extern void clear_page (void *page);
+extern void copy_page (void *to, void *from);
+
+/*
+ * clear_user_page() and copy_user_page() can't be inline functions because
+ * flush_dcache_page() can't be defined until later...
+ */
+#define clear_user_page(addr, vaddr, page) \
+do { \
+ clear_page(addr); \
+ flush_dcache_page(page); \
+} while (0)
+
+#define copy_user_page(to, from, vaddr, page) \
+do { \
+ copy_page((to), (from)); \
+ flush_dcache_page(page); \
+} while (0)
+
+
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+({ \
+ struct page *page = alloc_page_vma( \
+ GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr); \
+ if (page) \
+ flush_dcache_page(page); \
+ page; \
+})
+
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+extern int ia64_pfn_valid (unsigned long pfn);
+#else
+# define ia64_pfn_valid(pfn) 1
+#endif
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+extern struct page *vmem_map;
+#ifdef CONFIG_DISCONTIGMEM
+# define page_to_pfn(page) ((unsigned long) (page - vmem_map))
+# define pfn_to_page(pfn) (vmem_map + (pfn))
+# define __pfn_to_phys(pfn) PFN_PHYS(pfn)
+#else
+# include <asm-generic/memory_model.h>
+#endif
+#else
+# include <asm-generic/memory_model.h>
+#endif
+
+#ifdef CONFIG_FLATMEM
+# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
+#elif defined(CONFIG_DISCONTIGMEM)
+extern unsigned long min_low_pfn;
+extern unsigned long max_low_pfn;
+# define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
+#endif
+
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+typedef union ia64_va {
+ struct {
+ unsigned long off : 61; /* intra-region offset */
+ unsigned long reg : 3; /* region number */
+ } f;
+ unsigned long l;
+ void *p;
+} ia64_va;
+
+/*
+ * Note: These macros depend on the fact that PAGE_OFFSET has all
+ * region bits set to 1 and all other bits set to zero. They are
+ * expressed in this way to ensure they result in a single "dep"
+ * instruction.
+ */
+#define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
+#define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
+
+#define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
+#define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
+
+#ifdef CONFIG_HUGETLB_PAGE
+# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
+ | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
+# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+extern unsigned int hpage_shift;
+#endif
+
+static __inline__ int
+get_order (unsigned long size)
+{
+ long double d = size - 1;
+ long order;
+
+ order = ia64_getf_exp(d);
+ order = order - PAGE_SHIFT - 0xffff + 1;
+ if (order < 0)
+ order = 0;
+ return order;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef STRICT_MM_TYPECHECKS
+ /*
+ * These are used to make use of C type-checking..
+ */
+ typedef struct { unsigned long pte; } pte_t;
+ typedef struct { unsigned long pmd; } pmd_t;
+#if CONFIG_PGTABLE_LEVELS == 4
+ typedef struct { unsigned long pud; } pud_t;
+#endif
+ typedef struct { unsigned long pgd; } pgd_t;
+ typedef struct { unsigned long pgprot; } pgprot_t;
+ typedef struct page *pgtable_t;
+
+# define pte_val(x) ((x).pte)
+# define pmd_val(x) ((x).pmd)
+#if CONFIG_PGTABLE_LEVELS == 4
+# define pud_val(x) ((x).pud)
+#endif
+# define pgd_val(x) ((x).pgd)
+# define pgprot_val(x) ((x).pgprot)
+
+# define __pte(x) ((pte_t) { (x) } )
+# define __pmd(x) ((pmd_t) { (x) } )
+# define __pgprot(x) ((pgprot_t) { (x) } )
+
+#else /* !STRICT_MM_TYPECHECKS */
+ /*
+ * .. while these make it easier on the compiler
+ */
+# ifndef __ASSEMBLY__
+ typedef unsigned long pte_t;
+ typedef unsigned long pmd_t;
+ typedef unsigned long pgd_t;
+ typedef unsigned long pgprot_t;
+ typedef struct page *pgtable_t;
+# endif
+
+# define pte_val(x) (x)
+# define pmd_val(x) (x)
+# define pgd_val(x) (x)
+# define pgprot_val(x) (x)
+
+# define __pte(x) (x)
+# define __pgd(x) (x)
+# define __pgprot(x) (x)
+#endif /* !STRICT_MM_TYPECHECKS */
+
+#define PAGE_OFFSET RGN_BASE(RGN_KERNEL)
+
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
+
+#define GATE_ADDR RGN_BASE(RGN_GATE)
+
+/*
+ * 0xa000000000000000+2*PERCPU_PAGE_SIZE
+ * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
+ */
+#define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000))
+#define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
+#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
+
+#define __HAVE_ARCH_GATE_AREA 1
+
+#endif /* _ASM_IA64_PAGE_H */
diff --git a/arch/ia64/include/asm/pal.h b/arch/ia64/include/asm/pal.h
new file mode 100644
index 000000000..f9d2b3b2d
--- /dev/null
+++ b/arch/ia64/include/asm/pal.h
@@ -0,0 +1,1826 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_PAL_H
+#define _ASM_IA64_PAL_H
+
+/*
+ * Processor Abstraction Layer definitions.
+ *
+ * This is based on Intel IA-64 Architecture Software Developer's Manual rev 1.0
+ * chapter 11 IA-64 Processor Abstraction Layer
+ *
+ * Copyright (C) 1998-2001 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
+ * Copyright (C) 2008 Silicon Graphics, Inc. (SGI)
+ *
+ * 99/10/01 davidm Make sure we pass zero for reserved parameters.
+ * 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6.
+ * 00/03/23 cfleck Modified processor min-state save area to match updated PAL & SAL info
+ * 00/05/24 eranian Updated to latest PAL spec, fix structures bugs, added
+ * 00/05/25 eranian Support for stack calls, and static physical calls
+ * 00/06/18 eranian Support for stacked physical calls
+ * 06/10/26 rja Support for Intel Itanium Architecture Software Developer's
+ * Manual Rev 2.2 (Jan 2006)
+ */
+
+/*
+ * Note that some of these calls use a static-register only calling
+ * convention which has nothing to do with the regular calling
+ * convention.
+ */
+#define PAL_CACHE_FLUSH 1 /* flush i/d cache */
+#define PAL_CACHE_INFO 2 /* get detailed i/d cache info */
+#define PAL_CACHE_INIT 3 /* initialize i/d cache */
+#define PAL_CACHE_SUMMARY 4 /* get summary of cache hierarchy */
+#define PAL_MEM_ATTRIB 5 /* list supported memory attributes */
+#define PAL_PTCE_INFO 6 /* purge TLB info */
+#define PAL_VM_INFO 7 /* return supported virtual memory features */
+#define PAL_VM_SUMMARY 8 /* return summary on supported vm features */
+#define PAL_BUS_GET_FEATURES 9 /* return processor bus interface features settings */
+#define PAL_BUS_SET_FEATURES 10 /* set processor bus features */
+#define PAL_DEBUG_INFO 11 /* get number of debug registers */
+#define PAL_FIXED_ADDR 12 /* get fixed component of processors's directed address */
+#define PAL_FREQ_BASE 13 /* base frequency of the platform */
+#define PAL_FREQ_RATIOS 14 /* ratio of processor, bus and ITC frequency */
+#define PAL_PERF_MON_INFO 15 /* return performance monitor info */
+#define PAL_PLATFORM_ADDR 16 /* set processor interrupt block and IO port space addr */
+#define PAL_PROC_GET_FEATURES 17 /* get configurable processor features & settings */
+#define PAL_PROC_SET_FEATURES 18 /* enable/disable configurable processor features */
+#define PAL_RSE_INFO 19 /* return rse information */
+#define PAL_VERSION 20 /* return version of PAL code */
+#define PAL_MC_CLEAR_LOG 21 /* clear all processor log info */
+#define PAL_MC_DRAIN 22 /* drain operations which could result in an MCA */
+#define PAL_MC_EXPECTED 23 /* set/reset expected MCA indicator */
+#define PAL_MC_DYNAMIC_STATE 24 /* get processor dynamic state */
+#define PAL_MC_ERROR_INFO 25 /* get processor MCA info and static state */
+#define PAL_MC_RESUME 26 /* Return to interrupted process */
+#define PAL_MC_REGISTER_MEM 27 /* Register memory for PAL to use during MCAs and inits */
+#define PAL_HALT 28 /* enter the low power HALT state */
+#define PAL_HALT_LIGHT 29 /* enter the low power light halt state*/
+#define PAL_COPY_INFO 30 /* returns info needed to relocate PAL */
+#define PAL_CACHE_LINE_INIT 31 /* init tags & data of cache line */
+#define PAL_PMI_ENTRYPOINT 32 /* register PMI memory entry points with the processor */
+#define PAL_ENTER_IA_32_ENV 33 /* enter IA-32 system environment */
+#define PAL_VM_PAGE_SIZE 34 /* return vm TC and page walker page sizes */
+
+#define PAL_MEM_FOR_TEST 37 /* get amount of memory needed for late processor test */
+#define PAL_CACHE_PROT_INFO 38 /* get i/d cache protection info */
+#define PAL_REGISTER_INFO 39 /* return AR and CR register information*/
+#define PAL_SHUTDOWN 40 /* enter processor shutdown state */
+#define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */
+#define PAL_LOGICAL_TO_PHYSICAL 42 /* returns information on logical to physical processor mapping */
+#define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */
+#define PAL_GET_HW_POLICY 48 /* Get current hardware resource sharing policy */
+#define PAL_SET_HW_POLICY 49 /* Set current hardware resource sharing policy */
+#define PAL_VP_INFO 50 /* Information about virtual processor features */
+#define PAL_MC_HW_TRACKING 51 /* Hardware tracking status */
+
+#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */
+#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */
+#define PAL_TEST_PROC 258 /* perform late processor self-test */
+#define PAL_CACHE_READ 259 /* read tag & data of cacheline for diagnostic testing */
+#define PAL_CACHE_WRITE 260 /* write tag & data of cacheline for diagnostic testing */
+#define PAL_VM_TR_READ 261 /* read contents of translation register */
+#define PAL_GET_PSTATE 262 /* get the current P-state */
+#define PAL_SET_PSTATE 263 /* set the P-state */
+#define PAL_BRAND_INFO 274 /* Processor branding information */
+
+#define PAL_GET_PSTATE_TYPE_LASTSET 0
+#define PAL_GET_PSTATE_TYPE_AVGANDRESET 1
+#define PAL_GET_PSTATE_TYPE_AVGNORESET 2
+#define PAL_GET_PSTATE_TYPE_INSTANT 3
+
+#define PAL_MC_ERROR_INJECT 276 /* Injects processor error or returns injection capabilities */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/fpu.h>
+
+/*
+ * Data types needed to pass information into PAL procedures and
+ * interpret information returned by them.
+ */
+
+/* Return status from the PAL procedure */
+typedef s64 pal_status_t;
+
+#define PAL_STATUS_SUCCESS 0 /* No error */
+#define PAL_STATUS_UNIMPLEMENTED (-1) /* Unimplemented procedure */
+#define PAL_STATUS_EINVAL (-2) /* Invalid argument */
+#define PAL_STATUS_ERROR (-3) /* Error */
+#define PAL_STATUS_CACHE_INIT_FAIL (-4) /* Could not initialize the
+ * specified level and type of
+ * cache without sideeffects
+ * and "restrict" was 1
+ */
+#define PAL_STATUS_REQUIRES_MEMORY (-9) /* Call requires PAL memory buffer */
+
+/* Processor cache level in the hierarchy */
+typedef u64 pal_cache_level_t;
+#define PAL_CACHE_LEVEL_L0 0 /* L0 */
+#define PAL_CACHE_LEVEL_L1 1 /* L1 */
+#define PAL_CACHE_LEVEL_L2 2 /* L2 */
+
+
+/* Processor cache type at a particular level in the hierarchy */
+
+typedef u64 pal_cache_type_t;
+#define PAL_CACHE_TYPE_INSTRUCTION 1 /* Instruction cache */
+#define PAL_CACHE_TYPE_DATA 2 /* Data or unified cache */
+#define PAL_CACHE_TYPE_INSTRUCTION_DATA 3 /* Both Data & Instruction */
+
+
+#define PAL_CACHE_FLUSH_INVALIDATE 1 /* Invalidate clean lines */
+#define PAL_CACHE_FLUSH_CHK_INTRS 2 /* check for interrupts/mc while flushing */
+
+/* Processor cache line size in bytes */
+typedef int pal_cache_line_size_t;
+
+/* Processor cache line state */
+typedef u64 pal_cache_line_state_t;
+#define PAL_CACHE_LINE_STATE_INVALID 0 /* Invalid */
+#define PAL_CACHE_LINE_STATE_SHARED 1 /* Shared */
+#define PAL_CACHE_LINE_STATE_EXCLUSIVE 2 /* Exclusive */
+#define PAL_CACHE_LINE_STATE_MODIFIED 3 /* Modified */
+
+typedef struct pal_freq_ratio {
+ u32 den, num; /* numerator & denominator */
+} itc_ratio, proc_ratio;
+
+typedef union pal_cache_config_info_1_s {
+ struct {
+ u64 u : 1, /* 0 Unified cache ? */
+ at : 2, /* 2-1 Cache mem attr*/
+ reserved : 5, /* 7-3 Reserved */
+ associativity : 8, /* 16-8 Associativity*/
+ line_size : 8, /* 23-17 Line size */
+ stride : 8, /* 31-24 Stride */
+ store_latency : 8, /*39-32 Store latency*/
+ load_latency : 8, /* 47-40 Load latency*/
+ store_hints : 8, /* 55-48 Store hints*/
+ load_hints : 8; /* 63-56 Load hints */
+ } pcci1_bits;
+ u64 pcci1_data;
+} pal_cache_config_info_1_t;
+
+typedef union pal_cache_config_info_2_s {
+ struct {
+ u32 cache_size; /*cache size in bytes*/
+
+
+ u32 alias_boundary : 8, /* 39-32 aliased addr
+ * separation for max
+ * performance.
+ */
+ tag_ls_bit : 8, /* 47-40 LSb of addr*/
+ tag_ms_bit : 8, /* 55-48 MSb of addr*/
+ reserved : 8; /* 63-56 Reserved */
+ } pcci2_bits;
+ u64 pcci2_data;
+} pal_cache_config_info_2_t;
+
+
+typedef struct pal_cache_config_info_s {
+ pal_status_t pcci_status;
+ pal_cache_config_info_1_t pcci_info_1;
+ pal_cache_config_info_2_t pcci_info_2;
+ u64 pcci_reserved;
+} pal_cache_config_info_t;
+
+#define pcci_ld_hints pcci_info_1.pcci1_bits.load_hints
+#define pcci_st_hints pcci_info_1.pcci1_bits.store_hints
+#define pcci_ld_latency pcci_info_1.pcci1_bits.load_latency
+#define pcci_st_latency pcci_info_1.pcci1_bits.store_latency
+#define pcci_stride pcci_info_1.pcci1_bits.stride
+#define pcci_line_size pcci_info_1.pcci1_bits.line_size
+#define pcci_assoc pcci_info_1.pcci1_bits.associativity
+#define pcci_cache_attr pcci_info_1.pcci1_bits.at
+#define pcci_unified pcci_info_1.pcci1_bits.u
+#define pcci_tag_msb pcci_info_2.pcci2_bits.tag_ms_bit
+#define pcci_tag_lsb pcci_info_2.pcci2_bits.tag_ls_bit
+#define pcci_alias_boundary pcci_info_2.pcci2_bits.alias_boundary
+#define pcci_cache_size pcci_info_2.pcci2_bits.cache_size
+
+
+
+/* Possible values for cache attributes */
+
+#define PAL_CACHE_ATTR_WT 0 /* Write through cache */
+#define PAL_CACHE_ATTR_WB 1 /* Write back cache */
+#define PAL_CACHE_ATTR_WT_OR_WB 2 /* Either write thru or write
+ * back depending on TLB
+ * memory attributes
+ */
+
+
+/* Possible values for cache hints */
+
+#define PAL_CACHE_HINT_TEMP_1 0 /* Temporal level 1 */
+#define PAL_CACHE_HINT_NTEMP_1 1 /* Non-temporal level 1 */
+#define PAL_CACHE_HINT_NTEMP_ALL 3 /* Non-temporal all levels */
+
+/* Processor cache protection information */
+typedef union pal_cache_protection_element_u {
+ u32 pcpi_data;
+ struct {
+ u32 data_bits : 8, /* # data bits covered by
+ * each unit of protection
+ */
+
+ tagprot_lsb : 6, /* Least -do- */
+ tagprot_msb : 6, /* Most Sig. tag address
+ * bit that this
+ * protection covers.
+ */
+ prot_bits : 6, /* # of protection bits */
+ method : 4, /* Protection method */
+ t_d : 2; /* Indicates which part
+ * of the cache this
+ * protection encoding
+ * applies.
+ */
+ } pcp_info;
+} pal_cache_protection_element_t;
+
+#define pcpi_cache_prot_part pcp_info.t_d
+#define pcpi_prot_method pcp_info.method
+#define pcpi_prot_bits pcp_info.prot_bits
+#define pcpi_tagprot_msb pcp_info.tagprot_msb
+#define pcpi_tagprot_lsb pcp_info.tagprot_lsb
+#define pcpi_data_bits pcp_info.data_bits
+
+/* Processor cache part encodings */
+#define PAL_CACHE_PROT_PART_DATA 0 /* Data protection */
+#define PAL_CACHE_PROT_PART_TAG 1 /* Tag protection */
+#define PAL_CACHE_PROT_PART_TAG_DATA 2 /* Tag+data protection (tag is
+ * more significant )
+ */
+#define PAL_CACHE_PROT_PART_DATA_TAG 3 /* Data+tag protection (data is
+ * more significant )
+ */
+#define PAL_CACHE_PROT_PART_MAX 6
+
+
+typedef struct pal_cache_protection_info_s {
+ pal_status_t pcpi_status;
+ pal_cache_protection_element_t pcp_info[PAL_CACHE_PROT_PART_MAX];
+} pal_cache_protection_info_t;
+
+
+/* Processor cache protection method encodings */
+#define PAL_CACHE_PROT_METHOD_NONE 0 /* No protection */
+#define PAL_CACHE_PROT_METHOD_ODD_PARITY 1 /* Odd parity */
+#define PAL_CACHE_PROT_METHOD_EVEN_PARITY 2 /* Even parity */
+#define PAL_CACHE_PROT_METHOD_ECC 3 /* ECC protection */
+
+
+/* Processor cache line identification in the hierarchy */
+typedef union pal_cache_line_id_u {
+ u64 pclid_data;
+ struct {
+ u64 cache_type : 8, /* 7-0 cache type */
+ level : 8, /* 15-8 level of the
+ * cache in the
+ * hierarchy.
+ */
+ way : 8, /* 23-16 way in the set
+ */
+ part : 8, /* 31-24 part of the
+ * cache
+ */
+ reserved : 32; /* 63-32 is reserved*/
+ } pclid_info_read;
+ struct {
+ u64 cache_type : 8, /* 7-0 cache type */
+ level : 8, /* 15-8 level of the
+ * cache in the
+ * hierarchy.
+ */
+ way : 8, /* 23-16 way in the set
+ */
+ part : 8, /* 31-24 part of the
+ * cache
+ */
+ mesi : 8, /* 39-32 cache line
+ * state
+ */
+ start : 8, /* 47-40 lsb of data to
+ * invert
+ */
+ length : 8, /* 55-48 #bits to
+ * invert
+ */
+ trigger : 8; /* 63-56 Trigger error
+ * by doing a load
+ * after the write
+ */
+
+ } pclid_info_write;
+} pal_cache_line_id_u_t;
+
+#define pclid_read_part pclid_info_read.part
+#define pclid_read_way pclid_info_read.way
+#define pclid_read_level pclid_info_read.level
+#define pclid_read_cache_type pclid_info_read.cache_type
+
+#define pclid_write_trigger pclid_info_write.trigger
+#define pclid_write_length pclid_info_write.length
+#define pclid_write_start pclid_info_write.start
+#define pclid_write_mesi pclid_info_write.mesi
+#define pclid_write_part pclid_info_write.part
+#define pclid_write_way pclid_info_write.way
+#define pclid_write_level pclid_info_write.level
+#define pclid_write_cache_type pclid_info_write.cache_type
+
+/* Processor cache line part encodings */
+#define PAL_CACHE_LINE_ID_PART_DATA 0 /* Data */
+#define PAL_CACHE_LINE_ID_PART_TAG 1 /* Tag */
+#define PAL_CACHE_LINE_ID_PART_DATA_PROT 2 /* Data protection */
+#define PAL_CACHE_LINE_ID_PART_TAG_PROT 3 /* Tag protection */
+#define PAL_CACHE_LINE_ID_PART_DATA_TAG_PROT 4 /* Data+tag
+ * protection
+ */
+typedef struct pal_cache_line_info_s {
+ pal_status_t pcli_status; /* Return status of the read cache line
+ * info call.
+ */
+ u64 pcli_data; /* 64-bit data, tag, protection bits .. */
+ u64 pcli_data_len; /* data length in bits */
+ pal_cache_line_state_t pcli_cache_line_state; /* mesi state */
+
+} pal_cache_line_info_t;
+
+
+/* Machine Check related crap */
+
+/* Pending event status bits */
+typedef u64 pal_mc_pending_events_t;
+
+#define PAL_MC_PENDING_MCA (1 << 0)
+#define PAL_MC_PENDING_INIT (1 << 1)
+
+/* Error information type */
+typedef u64 pal_mc_info_index_t;
+
+#define PAL_MC_INFO_PROCESSOR 0 /* Processor */
+#define PAL_MC_INFO_CACHE_CHECK 1 /* Cache check */
+#define PAL_MC_INFO_TLB_CHECK 2 /* Tlb check */
+#define PAL_MC_INFO_BUS_CHECK 3 /* Bus check */
+#define PAL_MC_INFO_REQ_ADDR 4 /* Requestor address */
+#define PAL_MC_INFO_RESP_ADDR 5 /* Responder address */
+#define PAL_MC_INFO_TARGET_ADDR 6 /* Target address */
+#define PAL_MC_INFO_IMPL_DEP 7 /* Implementation
+ * dependent
+ */
+
+#define PAL_TLB_CHECK_OP_PURGE 8
+
+typedef struct pal_process_state_info_s {
+ u64 reserved1 : 2,
+ rz : 1, /* PAL_CHECK processor
+ * rendezvous
+ * successful.
+ */
+
+ ra : 1, /* PAL_CHECK attempted
+ * a rendezvous.
+ */
+ me : 1, /* Distinct multiple
+ * errors occurred
+ */
+
+ mn : 1, /* Min. state save
+ * area has been
+ * registered with PAL
+ */
+
+ sy : 1, /* Storage integrity
+ * synched
+ */
+
+
+ co : 1, /* Continuable */
+ ci : 1, /* MC isolated */
+ us : 1, /* Uncontained storage
+ * damage.
+ */
+
+
+ hd : 1, /* Non-essential hw
+ * lost (no loss of
+ * functionality)
+ * causing the
+ * processor to run in
+ * degraded mode.
+ */
+
+ tl : 1, /* 1 => MC occurred
+ * after an instr was
+ * executed but before
+ * the trap that
+ * resulted from instr
+ * execution was
+ * generated.
+ * (Trap Lost )
+ */
+ mi : 1, /* More information available
+ * call PAL_MC_ERROR_INFO
+ */
+ pi : 1, /* Precise instruction pointer */
+ pm : 1, /* Precise min-state save area */
+
+ dy : 1, /* Processor dynamic
+ * state valid
+ */
+
+
+ in : 1, /* 0 = MC, 1 = INIT */
+ rs : 1, /* RSE valid */
+ cm : 1, /* MC corrected */
+ ex : 1, /* MC is expected */
+ cr : 1, /* Control regs valid*/
+ pc : 1, /* Perf cntrs valid */
+ dr : 1, /* Debug regs valid */
+ tr : 1, /* Translation regs
+ * valid
+ */
+ rr : 1, /* Region regs valid */
+ ar : 1, /* App regs valid */
+ br : 1, /* Branch regs valid */
+ pr : 1, /* Predicate registers
+ * valid
+ */
+
+ fp : 1, /* fp registers valid*/
+ b1 : 1, /* Preserved bank one
+ * general registers
+ * are valid
+ */
+ b0 : 1, /* Preserved bank zero
+ * general registers
+ * are valid
+ */
+ gr : 1, /* General registers
+ * are valid
+ * (excl. banked regs)
+ */
+ dsize : 16, /* size of dynamic
+ * state returned
+ * by the processor
+ */
+
+ se : 1, /* Shared error. MCA in a
+ shared structure */
+ reserved2 : 10,
+ cc : 1, /* Cache check */
+ tc : 1, /* TLB check */
+ bc : 1, /* Bus check */
+ rc : 1, /* Register file check */
+ uc : 1; /* Uarch check */
+
+} pal_processor_state_info_t;
+
+typedef struct pal_cache_check_info_s {
+ u64 op : 4, /* Type of cache
+ * operation that
+ * caused the machine
+ * check.
+ */
+ level : 2, /* Cache level */
+ reserved1 : 2,
+ dl : 1, /* Failure in data part
+ * of cache line
+ */
+ tl : 1, /* Failure in tag part
+ * of cache line
+ */
+ dc : 1, /* Failure in dcache */
+ ic : 1, /* Failure in icache */
+ mesi : 3, /* Cache line state */
+ mv : 1, /* mesi valid */
+ way : 5, /* Way in which the
+ * error occurred
+ */
+ wiv : 1, /* Way field valid */
+ reserved2 : 1,
+ dp : 1, /* Data poisoned on MBE */
+ reserved3 : 6,
+ hlth : 2, /* Health indicator */
+
+ index : 20, /* Cache line index */
+ reserved4 : 2,
+
+ is : 1, /* instruction set (1 == ia32) */
+ iv : 1, /* instruction set field valid */
+ pl : 2, /* privilege level */
+ pv : 1, /* privilege level field valid */
+ mcc : 1, /* Machine check corrected */
+ tv : 1, /* Target address
+ * structure is valid
+ */
+ rq : 1, /* Requester identifier
+ * structure is valid
+ */
+ rp : 1, /* Responder identifier
+ * structure is valid
+ */
+ pi : 1; /* Precise instruction pointer
+ * structure is valid
+ */
+} pal_cache_check_info_t;
+
+typedef struct pal_tlb_check_info_s {
+
+ u64 tr_slot : 8, /* Slot# of TR where
+ * error occurred
+ */
+ trv : 1, /* tr_slot field is valid */
+ reserved1 : 1,
+ level : 2, /* TLB level where failure occurred */
+ reserved2 : 4,
+ dtr : 1, /* Fail in data TR */
+ itr : 1, /* Fail in inst TR */
+ dtc : 1, /* Fail in data TC */
+ itc : 1, /* Fail in inst. TC */
+ op : 4, /* Cache operation */
+ reserved3 : 6,
+ hlth : 2, /* Health indicator */
+ reserved4 : 22,
+
+ is : 1, /* instruction set (1 == ia32) */
+ iv : 1, /* instruction set field valid */
+ pl : 2, /* privilege level */
+ pv : 1, /* privilege level field valid */
+ mcc : 1, /* Machine check corrected */
+ tv : 1, /* Target address
+ * structure is valid
+ */
+ rq : 1, /* Requester identifier
+ * structure is valid
+ */
+ rp : 1, /* Responder identifier
+ * structure is valid
+ */
+ pi : 1; /* Precise instruction pointer
+ * structure is valid
+ */
+} pal_tlb_check_info_t;
+
+typedef struct pal_bus_check_info_s {
+ u64 size : 5, /* Xaction size */
+ ib : 1, /* Internal bus error */
+ eb : 1, /* External bus error */
+ cc : 1, /* Error occurred
+ * during cache-cache
+ * transfer.
+ */
+ type : 8, /* Bus xaction type*/
+ sev : 5, /* Bus error severity*/
+ hier : 2, /* Bus hierarchy level */
+ dp : 1, /* Data poisoned on MBE */
+ bsi : 8, /* Bus error status
+ * info
+ */
+ reserved2 : 22,
+
+ is : 1, /* instruction set (1 == ia32) */
+ iv : 1, /* instruction set field valid */
+ pl : 2, /* privilege level */
+ pv : 1, /* privilege level field valid */
+ mcc : 1, /* Machine check corrected */
+ tv : 1, /* Target address
+ * structure is valid
+ */
+ rq : 1, /* Requester identifier
+ * structure is valid
+ */
+ rp : 1, /* Responder identifier
+ * structure is valid
+ */
+ pi : 1; /* Precise instruction pointer
+ * structure is valid
+ */
+} pal_bus_check_info_t;
+
+typedef struct pal_reg_file_check_info_s {
+ u64 id : 4, /* Register file identifier */
+ op : 4, /* Type of register
+ * operation that
+ * caused the machine
+ * check.
+ */
+ reg_num : 7, /* Register number */
+ rnv : 1, /* reg_num valid */
+ reserved2 : 38,
+
+ is : 1, /* instruction set (1 == ia32) */
+ iv : 1, /* instruction set field valid */
+ pl : 2, /* privilege level */
+ pv : 1, /* privilege level field valid */
+ mcc : 1, /* Machine check corrected */
+ reserved3 : 3,
+ pi : 1; /* Precise instruction pointer
+ * structure is valid
+ */
+} pal_reg_file_check_info_t;
+
+typedef struct pal_uarch_check_info_s {
+ u64 sid : 5, /* Structure identification */
+ level : 3, /* Level of failure */
+ array_id : 4, /* Array identification */
+ op : 4, /* Type of
+ * operation that
+ * caused the machine
+ * check.
+ */
+ way : 6, /* Way of structure */
+ wv : 1, /* way valid */
+ xv : 1, /* index valid */
+ reserved1 : 6,
+ hlth : 2, /* Health indicator */
+ index : 8, /* Index or set of the uarch
+ * structure that failed.
+ */
+ reserved2 : 24,
+
+ is : 1, /* instruction set (1 == ia32) */
+ iv : 1, /* instruction set field valid */
+ pl : 2, /* privilege level */
+ pv : 1, /* privilege level field valid */
+ mcc : 1, /* Machine check corrected */
+ tv : 1, /* Target address
+ * structure is valid
+ */
+ rq : 1, /* Requester identifier
+ * structure is valid
+ */
+ rp : 1, /* Responder identifier
+ * structure is valid
+ */
+ pi : 1; /* Precise instruction pointer
+ * structure is valid
+ */
+} pal_uarch_check_info_t;
+
+typedef union pal_mc_error_info_u {
+ u64 pmei_data;
+ pal_processor_state_info_t pme_processor;
+ pal_cache_check_info_t pme_cache;
+ pal_tlb_check_info_t pme_tlb;
+ pal_bus_check_info_t pme_bus;
+ pal_reg_file_check_info_t pme_reg_file;
+ pal_uarch_check_info_t pme_uarch;
+} pal_mc_error_info_t;
+
+#define pmci_proc_unknown_check pme_processor.uc
+#define pmci_proc_bus_check pme_processor.bc
+#define pmci_proc_tlb_check pme_processor.tc
+#define pmci_proc_cache_check pme_processor.cc
+#define pmci_proc_dynamic_state_size pme_processor.dsize
+#define pmci_proc_gpr_valid pme_processor.gr
+#define pmci_proc_preserved_bank0_gpr_valid pme_processor.b0
+#define pmci_proc_preserved_bank1_gpr_valid pme_processor.b1
+#define pmci_proc_fp_valid pme_processor.fp
+#define pmci_proc_predicate_regs_valid pme_processor.pr
+#define pmci_proc_branch_regs_valid pme_processor.br
+#define pmci_proc_app_regs_valid pme_processor.ar
+#define pmci_proc_region_regs_valid pme_processor.rr
+#define pmci_proc_translation_regs_valid pme_processor.tr
+#define pmci_proc_debug_regs_valid pme_processor.dr
+#define pmci_proc_perf_counters_valid pme_processor.pc
+#define pmci_proc_control_regs_valid pme_processor.cr
+#define pmci_proc_machine_check_expected pme_processor.ex
+#define pmci_proc_machine_check_corrected pme_processor.cm
+#define pmci_proc_rse_valid pme_processor.rs
+#define pmci_proc_machine_check_or_init pme_processor.in
+#define pmci_proc_dynamic_state_valid pme_processor.dy
+#define pmci_proc_operation pme_processor.op
+#define pmci_proc_trap_lost pme_processor.tl
+#define pmci_proc_hardware_damage pme_processor.hd
+#define pmci_proc_uncontained_storage_damage pme_processor.us
+#define pmci_proc_machine_check_isolated pme_processor.ci
+#define pmci_proc_continuable pme_processor.co
+#define pmci_proc_storage_intergrity_synced pme_processor.sy
+#define pmci_proc_min_state_save_area_regd pme_processor.mn
+#define pmci_proc_distinct_multiple_errors pme_processor.me
+#define pmci_proc_pal_attempted_rendezvous pme_processor.ra
+#define pmci_proc_pal_rendezvous_complete pme_processor.rz
+
+
+#define pmci_cache_level pme_cache.level
+#define pmci_cache_line_state pme_cache.mesi
+#define pmci_cache_line_state_valid pme_cache.mv
+#define pmci_cache_line_index pme_cache.index
+#define pmci_cache_instr_cache_fail pme_cache.ic
+#define pmci_cache_data_cache_fail pme_cache.dc
+#define pmci_cache_line_tag_fail pme_cache.tl
+#define pmci_cache_line_data_fail pme_cache.dl
+#define pmci_cache_operation pme_cache.op
+#define pmci_cache_way_valid pme_cache.wv
+#define pmci_cache_target_address_valid pme_cache.tv
+#define pmci_cache_way pme_cache.way
+#define pmci_cache_mc pme_cache.mc
+
+#define pmci_tlb_instr_translation_cache_fail pme_tlb.itc
+#define pmci_tlb_data_translation_cache_fail pme_tlb.dtc
+#define pmci_tlb_instr_translation_reg_fail pme_tlb.itr
+#define pmci_tlb_data_translation_reg_fail pme_tlb.dtr
+#define pmci_tlb_translation_reg_slot pme_tlb.tr_slot
+#define pmci_tlb_mc pme_tlb.mc
+
+#define pmci_bus_status_info pme_bus.bsi
+#define pmci_bus_req_address_valid pme_bus.rq
+#define pmci_bus_resp_address_valid pme_bus.rp
+#define pmci_bus_target_address_valid pme_bus.tv
+#define pmci_bus_error_severity pme_bus.sev
+#define pmci_bus_transaction_type pme_bus.type
+#define pmci_bus_cache_cache_transfer pme_bus.cc
+#define pmci_bus_transaction_size pme_bus.size
+#define pmci_bus_internal_error pme_bus.ib
+#define pmci_bus_external_error pme_bus.eb
+#define pmci_bus_mc pme_bus.mc
+
+/*
+ * NOTE: this min_state_save area struct only includes the 1KB
+ * architectural state save area. The other 3 KB is scratch space
+ * for PAL.
+ */
+
+typedef struct pal_min_state_area_s {
+ u64 pmsa_nat_bits; /* nat bits for saved GRs */
+ u64 pmsa_gr[15]; /* GR1 - GR15 */
+ u64 pmsa_bank0_gr[16]; /* GR16 - GR31 */
+ u64 pmsa_bank1_gr[16]; /* GR16 - GR31 */
+ u64 pmsa_pr; /* predicate registers */
+ u64 pmsa_br0; /* branch register 0 */
+ u64 pmsa_rsc; /* ar.rsc */
+ u64 pmsa_iip; /* cr.iip */
+ u64 pmsa_ipsr; /* cr.ipsr */
+ u64 pmsa_ifs; /* cr.ifs */
+ u64 pmsa_xip; /* previous iip */
+ u64 pmsa_xpsr; /* previous psr */
+ u64 pmsa_xfs; /* previous ifs */
+ u64 pmsa_br1; /* branch register 1 */
+ u64 pmsa_reserved[70]; /* pal_min_state_area should total to 1KB */
+} pal_min_state_area_t;
+
+
+struct ia64_pal_retval {
+ /*
+ * A zero status value indicates call completed without error.
+ * A negative status value indicates reason of call failure.
+ * A positive status value indicates success but an
+ * informational value should be printed (e.g., "reboot for
+ * change to take effect").
+ */
+ s64 status;
+ u64 v0;
+ u64 v1;
+ u64 v2;
+};
+
+/*
+ * Note: Currently unused PAL arguments are generally labeled
+ * "reserved" so the value specified in the PAL documentation
+ * (generally 0) MUST be passed. Reserved parameters are not optional
+ * parameters.
+ */
+extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_stacked (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_phys_static (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_phys_stacked (u64, u64, u64, u64);
+extern void ia64_save_scratch_fpregs (struct ia64_fpreg *);
+extern void ia64_load_scratch_fpregs (struct ia64_fpreg *);
+
+#define PAL_CALL(iprv,a0,a1,a2,a3) do { \
+ struct ia64_fpreg fr[6]; \
+ ia64_save_scratch_fpregs(fr); \
+ iprv = ia64_pal_call_static(a0, a1, a2, a3); \
+ ia64_load_scratch_fpregs(fr); \
+} while (0)
+
+#define PAL_CALL_STK(iprv,a0,a1,a2,a3) do { \
+ struct ia64_fpreg fr[6]; \
+ ia64_save_scratch_fpregs(fr); \
+ iprv = ia64_pal_call_stacked(a0, a1, a2, a3); \
+ ia64_load_scratch_fpregs(fr); \
+} while (0)
+
+#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) do { \
+ struct ia64_fpreg fr[6]; \
+ ia64_save_scratch_fpregs(fr); \
+ iprv = ia64_pal_call_phys_static(a0, a1, a2, a3); \
+ ia64_load_scratch_fpregs(fr); \
+} while (0)
+
+#define PAL_CALL_PHYS_STK(iprv,a0,a1,a2,a3) do { \
+ struct ia64_fpreg fr[6]; \
+ ia64_save_scratch_fpregs(fr); \
+ iprv = ia64_pal_call_phys_stacked(a0, a1, a2, a3); \
+ ia64_load_scratch_fpregs(fr); \
+} while (0)
+
+typedef int (*ia64_pal_handler) (u64, ...);
+extern ia64_pal_handler ia64_pal;
+extern void ia64_pal_handler_init (void *);
+
+extern ia64_pal_handler ia64_pal;
+
+extern pal_cache_config_info_t l0d_cache_config_info;
+extern pal_cache_config_info_t l0i_cache_config_info;
+extern pal_cache_config_info_t l1_cache_config_info;
+extern pal_cache_config_info_t l2_cache_config_info;
+
+extern pal_cache_protection_info_t l0d_cache_protection_info;
+extern pal_cache_protection_info_t l0i_cache_protection_info;
+extern pal_cache_protection_info_t l1_cache_protection_info;
+extern pal_cache_protection_info_t l2_cache_protection_info;
+
+extern pal_cache_config_info_t pal_cache_config_info_get(pal_cache_level_t,
+ pal_cache_type_t);
+
+extern pal_cache_protection_info_t pal_cache_protection_info_get(pal_cache_level_t,
+ pal_cache_type_t);
+
+
+extern void pal_error(int);
+
+
+/* Useful wrappers for the current list of pal procedures */
+
+typedef union pal_bus_features_u {
+ u64 pal_bus_features_val;
+ struct {
+ u64 pbf_reserved1 : 29;
+ u64 pbf_req_bus_parking : 1;
+ u64 pbf_bus_lock_mask : 1;
+ u64 pbf_enable_half_xfer_rate : 1;
+ u64 pbf_reserved2 : 20;
+ u64 pbf_enable_shared_line_replace : 1;
+ u64 pbf_enable_exclusive_line_replace : 1;
+ u64 pbf_disable_xaction_queueing : 1;
+ u64 pbf_disable_resp_err_check : 1;
+ u64 pbf_disable_berr_check : 1;
+ u64 pbf_disable_bus_req_internal_err_signal : 1;
+ u64 pbf_disable_bus_req_berr_signal : 1;
+ u64 pbf_disable_bus_init_event_check : 1;
+ u64 pbf_disable_bus_init_event_signal : 1;
+ u64 pbf_disable_bus_addr_err_check : 1;
+ u64 pbf_disable_bus_addr_err_signal : 1;
+ u64 pbf_disable_bus_data_err_check : 1;
+ } pal_bus_features_s;
+} pal_bus_features_u_t;
+
+extern void pal_bus_features_print (u64);
+
+/* Provide information about configurable processor bus features */
+static inline s64
+ia64_pal_bus_get_features (pal_bus_features_u_t *features_avail,
+ pal_bus_features_u_t *features_status,
+ pal_bus_features_u_t *features_control)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0);
+ if (features_avail)
+ features_avail->pal_bus_features_val = iprv.v0;
+ if (features_status)
+ features_status->pal_bus_features_val = iprv.v1;
+ if (features_control)
+ features_control->pal_bus_features_val = iprv.v2;
+ return iprv.status;
+}
+
+/* Enables/disables specific processor bus features */
+static inline s64
+ia64_pal_bus_set_features (pal_bus_features_u_t feature_select)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS(iprv, PAL_BUS_SET_FEATURES, feature_select.pal_bus_features_val, 0, 0);
+ return iprv.status;
+}
+
+/* Get detailed cache information */
+static inline s64
+ia64_pal_cache_config_info (u64 cache_level, u64 cache_type, pal_cache_config_info_t *conf)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL(iprv, PAL_CACHE_INFO, cache_level, cache_type, 0);
+
+ if (iprv.status == 0) {
+ conf->pcci_status = iprv.status;
+ conf->pcci_info_1.pcci1_data = iprv.v0;
+ conf->pcci_info_2.pcci2_data = iprv.v1;
+ conf->pcci_reserved = iprv.v2;
+ }
+ return iprv.status;
+
+}
+
+/* Get detailed cche protection information */
+static inline s64
+ia64_pal_cache_prot_info (u64 cache_level, u64 cache_type, pal_cache_protection_info_t *prot)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL(iprv, PAL_CACHE_PROT_INFO, cache_level, cache_type, 0);
+
+ if (iprv.status == 0) {
+ prot->pcpi_status = iprv.status;
+ prot->pcp_info[0].pcpi_data = iprv.v0 & 0xffffffff;
+ prot->pcp_info[1].pcpi_data = iprv.v0 >> 32;
+ prot->pcp_info[2].pcpi_data = iprv.v1 & 0xffffffff;
+ prot->pcp_info[3].pcpi_data = iprv.v1 >> 32;
+ prot->pcp_info[4].pcpi_data = iprv.v2 & 0xffffffff;
+ prot->pcp_info[5].pcpi_data = iprv.v2 >> 32;
+ }
+ return iprv.status;
+}
+
+/*
+ * Flush the processor instruction or data caches. *PROGRESS must be
+ * initialized to zero before calling this for the first time..
+ */
+static inline s64
+ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_CACHE_FLUSH, cache_type, invalidate, *progress);
+ if (vector)
+ *vector = iprv.v0;
+ *progress = iprv.v1;
+ return iprv.status;
+}
+
+
+/* Initialize the processor controlled caches */
+static inline s64
+ia64_pal_cache_init (u64 level, u64 cache_type, u64 rest)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, rest);
+ return iprv.status;
+}
+
+/* Initialize the tags and data of a data or unified cache line of
+ * processor controlled cache to known values without the availability
+ * of backing memory.
+ */
+static inline s64
+ia64_pal_cache_line_init (u64 physical_addr, u64 data_value)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_CACHE_LINE_INIT, physical_addr, data_value, 0);
+ return iprv.status;
+}
+
+
+/* Read the data and tag of a processor controlled cache line for diags */
+static inline s64
+ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS_STK(iprv, PAL_CACHE_READ, line_id.pclid_data,
+ physical_addr, 0);
+ return iprv.status;
+}
+
+/* Return summary information about the hierarchy of caches controlled by the processor */
+static inline long ia64_pal_cache_summary(unsigned long *cache_levels,
+ unsigned long *unique_caches)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_CACHE_SUMMARY, 0, 0, 0);
+ if (cache_levels)
+ *cache_levels = iprv.v0;
+ if (unique_caches)
+ *unique_caches = iprv.v1;
+ return iprv.status;
+}
+
+/* Write the data and tag of a processor-controlled cache line for diags */
+static inline s64
+ia64_pal_cache_write (pal_cache_line_id_u_t line_id, u64 physical_addr, u64 data)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS_STK(iprv, PAL_CACHE_WRITE, line_id.pclid_data,
+ physical_addr, data);
+ return iprv.status;
+}
+
+
+/* Return the parameters needed to copy relocatable PAL procedures from ROM to memory */
+static inline s64
+ia64_pal_copy_info (u64 copy_type, u64 num_procs, u64 num_iopics,
+ u64 *buffer_size, u64 *buffer_align)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_COPY_INFO, copy_type, num_procs, num_iopics);
+ if (buffer_size)
+ *buffer_size = iprv.v0;
+ if (buffer_align)
+ *buffer_align = iprv.v1;
+ return iprv.status;
+}
+
+/* Copy relocatable PAL procedures from ROM to memory */
+static inline s64
+ia64_pal_copy_pal (u64 target_addr, u64 alloc_size, u64 processor, u64 *pal_proc_offset)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_COPY_PAL, target_addr, alloc_size, processor);
+ if (pal_proc_offset)
+ *pal_proc_offset = iprv.v0;
+ return iprv.status;
+}
+
+/* Return the number of instruction and data debug register pairs */
+static inline long ia64_pal_debug_info(unsigned long *inst_regs,
+ unsigned long *data_regs)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_DEBUG_INFO, 0, 0, 0);
+ if (inst_regs)
+ *inst_regs = iprv.v0;
+ if (data_regs)
+ *data_regs = iprv.v1;
+
+ return iprv.status;
+}
+
+#ifdef TBD
+/* Switch from IA64-system environment to IA-32 system environment */
+static inline s64
+ia64_pal_enter_ia32_env (ia32_env1, ia32_env2, ia32_env3)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_ENTER_IA_32_ENV, ia32_env1, ia32_env2, ia32_env3);
+ return iprv.status;
+}
+#endif
+
+/* Get unique geographical address of this processor on its bus */
+static inline s64
+ia64_pal_fixed_addr (u64 *global_unique_addr)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_FIXED_ADDR, 0, 0, 0);
+ if (global_unique_addr)
+ *global_unique_addr = iprv.v0;
+ return iprv.status;
+}
+
+/* Get base frequency of the platform if generated by the processor */
+static inline long ia64_pal_freq_base(unsigned long *platform_base_freq)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_FREQ_BASE, 0, 0, 0);
+ if (platform_base_freq)
+ *platform_base_freq = iprv.v0;
+ return iprv.status;
+}
+
+/*
+ * Get the ratios for processor frequency, bus frequency and interval timer to
+ * to base frequency of the platform
+ */
+static inline s64
+ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio *bus_ratio,
+ struct pal_freq_ratio *itc_ratio)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_FREQ_RATIOS, 0, 0, 0);
+ if (proc_ratio)
+ *(u64 *)proc_ratio = iprv.v0;
+ if (bus_ratio)
+ *(u64 *)bus_ratio = iprv.v1;
+ if (itc_ratio)
+ *(u64 *)itc_ratio = iprv.v2;
+ return iprv.status;
+}
+
+/*
+ * Get the current hardware resource sharing policy of the processor
+ */
+static inline s64
+ia64_pal_get_hw_policy (u64 proc_num, u64 *cur_policy, u64 *num_impacted,
+ u64 *la)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_GET_HW_POLICY, proc_num, 0, 0);
+ if (cur_policy)
+ *cur_policy = iprv.v0;
+ if (num_impacted)
+ *num_impacted = iprv.v1;
+ if (la)
+ *la = iprv.v2;
+ return iprv.status;
+}
+
+/* Make the processor enter HALT or one of the implementation dependent low
+ * power states where prefetching and execution are suspended and cache and
+ * TLB coherency is not maintained.
+ */
+static inline s64
+ia64_pal_halt (u64 halt_state)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_HALT, halt_state, 0, 0);
+ return iprv.status;
+}
+
+typedef union pal_power_mgmt_info_u {
+ u64 ppmi_data;
+ struct {
+ u64 exit_latency : 16,
+ entry_latency : 16,
+ power_consumption : 28,
+ im : 1,
+ co : 1,
+ reserved : 2;
+ } pal_power_mgmt_info_s;
+} pal_power_mgmt_info_u_t;
+
+/* Return information about processor's optional power management capabilities. */
+static inline s64
+ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_STK(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0);
+ return iprv.status;
+}
+
+/* Get the current P-state information */
+static inline s64
+ia64_pal_get_pstate (u64 *pstate_index, unsigned long type)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_STK(iprv, PAL_GET_PSTATE, type, 0, 0);
+ *pstate_index = iprv.v0;
+ return iprv.status;
+}
+
+/* Set the P-state */
+static inline s64
+ia64_pal_set_pstate (u64 pstate_index)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_STK(iprv, PAL_SET_PSTATE, pstate_index, 0, 0);
+ return iprv.status;
+}
+
+/* Processor branding information*/
+static inline s64
+ia64_pal_get_brand_info (char *brand_info)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_STK(iprv, PAL_BRAND_INFO, 0, (u64)brand_info, 0);
+ return iprv.status;
+}
+
+/* Cause the processor to enter LIGHT HALT state, where prefetching and execution are
+ * suspended, but cache and TLB coherency is maintained.
+ */
+static inline s64
+ia64_pal_halt_light (void)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_HALT_LIGHT, 0, 0, 0);
+ return iprv.status;
+}
+
+/* Clear all the processor error logging registers and reset the indicator that allows
+ * the error logging registers to be written. This procedure also checks the pending
+ * machine check bit and pending INIT bit and reports their states.
+ */
+static inline s64
+ia64_pal_mc_clear_log (u64 *pending_vector)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_CLEAR_LOG, 0, 0, 0);
+ if (pending_vector)
+ *pending_vector = iprv.v0;
+ return iprv.status;
+}
+
+/* Ensure that all outstanding transactions in a processor are completed or that any
+ * MCA due to thes outstanding transaction is taken.
+ */
+static inline s64
+ia64_pal_mc_drain (void)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_DRAIN, 0, 0, 0);
+ return iprv.status;
+}
+
+/* Return the machine check dynamic processor state */
+static inline s64
+ia64_pal_mc_dynamic_state (u64 info_type, u64 dy_buffer, u64 *size)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, info_type, dy_buffer, 0);
+ if (size)
+ *size = iprv.v0;
+ return iprv.status;
+}
+
+/* Return processor machine check information */
+static inline s64
+ia64_pal_mc_error_info (u64 info_index, u64 type_index, u64 *size, u64 *error_info)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_ERROR_INFO, info_index, type_index, 0);
+ if (size)
+ *size = iprv.v0;
+ if (error_info)
+ *error_info = iprv.v1;
+ return iprv.status;
+}
+
+/* Injects the requested processor error or returns info on
+ * supported injection capabilities for current processor implementation
+ */
+static inline s64
+ia64_pal_mc_error_inject_phys (u64 err_type_info, u64 err_struct_info,
+ u64 err_data_buffer, u64 *capabilities, u64 *resources)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS_STK(iprv, PAL_MC_ERROR_INJECT, err_type_info,
+ err_struct_info, err_data_buffer);
+ if (capabilities)
+ *capabilities= iprv.v0;
+ if (resources)
+ *resources= iprv.v1;
+ return iprv.status;
+}
+
+static inline s64
+ia64_pal_mc_error_inject_virt (u64 err_type_info, u64 err_struct_info,
+ u64 err_data_buffer, u64 *capabilities, u64 *resources)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_STK(iprv, PAL_MC_ERROR_INJECT, err_type_info,
+ err_struct_info, err_data_buffer);
+ if (capabilities)
+ *capabilities= iprv.v0;
+ if (resources)
+ *resources= iprv.v1;
+ return iprv.status;
+}
+
+/* Inform PALE_CHECK whether a machine check is expected so that PALE_CHECK willnot
+ * attempt to correct any expected machine checks.
+ */
+static inline s64
+ia64_pal_mc_expected (u64 expected, u64 *previous)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_EXPECTED, expected, 0, 0);
+ if (previous)
+ *previous = iprv.v0;
+ return iprv.status;
+}
+
+typedef union pal_hw_tracking_u {
+ u64 pht_data;
+ struct {
+ u64 itc :4, /* Instruction cache tracking */
+ dct :4, /* Date cache tracking */
+ itt :4, /* Instruction TLB tracking */
+ ddt :4, /* Data TLB tracking */
+ reserved:48;
+ } pal_hw_tracking_s;
+} pal_hw_tracking_u_t;
+
+/*
+ * Hardware tracking status.
+ */
+static inline s64
+ia64_pal_mc_hw_tracking (u64 *status)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_HW_TRACKING, 0, 0, 0);
+ if (status)
+ *status = iprv.v0;
+ return iprv.status;
+}
+
+/* Register a platform dependent location with PAL to which it can save
+ * minimal processor state in the event of a machine check or initialization
+ * event.
+ */
+static inline s64
+ia64_pal_mc_register_mem (u64 physical_addr, u64 size, u64 *req_size)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, size, 0);
+ if (req_size)
+ *req_size = iprv.v0;
+ return iprv.status;
+}
+
+/* Restore minimal architectural processor state, set CMC interrupt if necessary
+ * and resume execution
+ */
+static inline s64
+ia64_pal_mc_resume (u64 set_cmci, u64 save_ptr)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_RESUME, set_cmci, save_ptr, 0);
+ return iprv.status;
+}
+
+/* Return the memory attributes implemented by the processor */
+static inline s64
+ia64_pal_mem_attrib (u64 *mem_attrib)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MEM_ATTRIB, 0, 0, 0);
+ if (mem_attrib)
+ *mem_attrib = iprv.v0 & 0xff;
+ return iprv.status;
+}
+
+/* Return the amount of memory needed for second phase of processor
+ * self-test and the required alignment of memory.
+ */
+static inline s64
+ia64_pal_mem_for_test (u64 *bytes_needed, u64 *alignment)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MEM_FOR_TEST, 0, 0, 0);
+ if (bytes_needed)
+ *bytes_needed = iprv.v0;
+ if (alignment)
+ *alignment = iprv.v1;
+ return iprv.status;
+}
+
+typedef union pal_perf_mon_info_u {
+ u64 ppmi_data;
+ struct {
+ u64 generic : 8,
+ width : 8,
+ cycles : 8,
+ retired : 8,
+ reserved : 32;
+ } pal_perf_mon_info_s;
+} pal_perf_mon_info_u_t;
+
+/* Return the performance monitor information about what can be counted
+ * and how to configure the monitors to count the desired events.
+ */
+static inline s64
+ia64_pal_perf_mon_info (u64 *pm_buffer, pal_perf_mon_info_u_t *pm_info)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_PERF_MON_INFO, (unsigned long) pm_buffer, 0, 0);
+ if (pm_info)
+ pm_info->ppmi_data = iprv.v0;
+ return iprv.status;
+}
+
+/* Specifies the physical address of the processor interrupt block
+ * and I/O port space.
+ */
+static inline s64
+ia64_pal_platform_addr (u64 type, u64 physical_addr)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_PLATFORM_ADDR, type, physical_addr, 0);
+ return iprv.status;
+}
+
+/* Set the SAL PMI entrypoint in memory */
+static inline s64
+ia64_pal_pmi_entrypoint (u64 sal_pmi_entry_addr)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_PMI_ENTRYPOINT, sal_pmi_entry_addr, 0, 0);
+ return iprv.status;
+}
+
+struct pal_features_s;
+/* Provide information about configurable processor features */
+static inline s64
+ia64_pal_proc_get_features (u64 *features_avail,
+ u64 *features_status,
+ u64 *features_control,
+ u64 features_set)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, features_set, 0);
+ if (iprv.status == 0) {
+ *features_avail = iprv.v0;
+ *features_status = iprv.v1;
+ *features_control = iprv.v2;
+ }
+ return iprv.status;
+}
+
+/* Enable/disable processor dependent features */
+static inline s64
+ia64_pal_proc_set_features (u64 feature_select)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, feature_select, 0, 0);
+ return iprv.status;
+}
+
+/*
+ * Put everything in a struct so we avoid the global offset table whenever
+ * possible.
+ */
+typedef struct ia64_ptce_info_s {
+ unsigned long base;
+ u32 count[2];
+ u32 stride[2];
+} ia64_ptce_info_t;
+
+/* Return the information required for the architected loop used to purge
+ * (initialize) the entire TC
+ */
+static inline s64
+ia64_get_ptce (ia64_ptce_info_t *ptce)
+{
+ struct ia64_pal_retval iprv;
+
+ if (!ptce)
+ return -1;
+
+ PAL_CALL(iprv, PAL_PTCE_INFO, 0, 0, 0);
+ if (iprv.status == 0) {
+ ptce->base = iprv.v0;
+ ptce->count[0] = iprv.v1 >> 32;
+ ptce->count[1] = iprv.v1 & 0xffffffff;
+ ptce->stride[0] = iprv.v2 >> 32;
+ ptce->stride[1] = iprv.v2 & 0xffffffff;
+ }
+ return iprv.status;
+}
+
+/* Return info about implemented application and control registers. */
+static inline s64
+ia64_pal_register_info (u64 info_request, u64 *reg_info_1, u64 *reg_info_2)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_REGISTER_INFO, info_request, 0, 0);
+ if (reg_info_1)
+ *reg_info_1 = iprv.v0;
+ if (reg_info_2)
+ *reg_info_2 = iprv.v1;
+ return iprv.status;
+}
+
+typedef union pal_hints_u {
+ unsigned long ph_data;
+ struct {
+ unsigned long si : 1,
+ li : 1,
+ reserved : 62;
+ } pal_hints_s;
+} pal_hints_u_t;
+
+/* Return information about the register stack and RSE for this processor
+ * implementation.
+ */
+static inline long ia64_pal_rse_info(unsigned long *num_phys_stacked,
+ pal_hints_u_t *hints)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_RSE_INFO, 0, 0, 0);
+ if (num_phys_stacked)
+ *num_phys_stacked = iprv.v0;
+ if (hints)
+ hints->ph_data = iprv.v1;
+ return iprv.status;
+}
+
+/*
+ * Set the current hardware resource sharing policy of the processor
+ */
+static inline s64
+ia64_pal_set_hw_policy (u64 policy)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_SET_HW_POLICY, policy, 0, 0);
+ return iprv.status;
+}
+
+/* Cause the processor to enter SHUTDOWN state, where prefetching and execution are
+ * suspended, but cause cache and TLB coherency to be maintained.
+ * This is usually called in IA-32 mode.
+ */
+static inline s64
+ia64_pal_shutdown (void)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_SHUTDOWN, 0, 0, 0);
+ return iprv.status;
+}
+
+/* Perform the second phase of processor self-test. */
+static inline s64
+ia64_pal_test_proc (u64 test_addr, u64 test_size, u64 attributes, u64 *self_test_state)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_TEST_PROC, test_addr, test_size, attributes);
+ if (self_test_state)
+ *self_test_state = iprv.v0;
+ return iprv.status;
+}
+
+typedef union pal_version_u {
+ u64 pal_version_val;
+ struct {
+ u64 pv_pal_b_rev : 8;
+ u64 pv_pal_b_model : 8;
+ u64 pv_reserved1 : 8;
+ u64 pv_pal_vendor : 8;
+ u64 pv_pal_a_rev : 8;
+ u64 pv_pal_a_model : 8;
+ u64 pv_reserved2 : 16;
+ } pal_version_s;
+} pal_version_u_t;
+
+
+/*
+ * Return PAL version information. While the documentation states that
+ * PAL_VERSION can be called in either physical or virtual mode, some
+ * implementations only allow physical calls. We don't call it very often,
+ * so the overhead isn't worth eliminating.
+ */
+static inline s64
+ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS(iprv, PAL_VERSION, 0, 0, 0);
+ if (pal_min_version)
+ pal_min_version->pal_version_val = iprv.v0;
+
+ if (pal_cur_version)
+ pal_cur_version->pal_version_val = iprv.v1;
+
+ return iprv.status;
+}
+
+typedef union pal_tc_info_u {
+ u64 pti_val;
+ struct {
+ u64 num_sets : 8,
+ associativity : 8,
+ num_entries : 16,
+ pf : 1,
+ unified : 1,
+ reduce_tr : 1,
+ reserved : 29;
+ } pal_tc_info_s;
+} pal_tc_info_u_t;
+
+#define tc_reduce_tr pal_tc_info_s.reduce_tr
+#define tc_unified pal_tc_info_s.unified
+#define tc_pf pal_tc_info_s.pf
+#define tc_num_entries pal_tc_info_s.num_entries
+#define tc_associativity pal_tc_info_s.associativity
+#define tc_num_sets pal_tc_info_s.num_sets
+
+
+/* Return information about the virtual memory characteristics of the processor
+ * implementation.
+ */
+static inline s64
+ia64_pal_vm_info (u64 tc_level, u64 tc_type, pal_tc_info_u_t *tc_info, u64 *tc_pages)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_VM_INFO, tc_level, tc_type, 0);
+ if (tc_info)
+ tc_info->pti_val = iprv.v0;
+ if (tc_pages)
+ *tc_pages = iprv.v1;
+ return iprv.status;
+}
+
+/* Get page size information about the virtual memory characteristics of the processor
+ * implementation.
+ */
+static inline s64 ia64_pal_vm_page_size(u64 *tr_pages, u64 *vw_pages)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_VM_PAGE_SIZE, 0, 0, 0);
+ if (tr_pages)
+ *tr_pages = iprv.v0;
+ if (vw_pages)
+ *vw_pages = iprv.v1;
+ return iprv.status;
+}
+
+typedef union pal_vm_info_1_u {
+ u64 pvi1_val;
+ struct {
+ u64 vw : 1,
+ phys_add_size : 7,
+ key_size : 8,
+ max_pkr : 8,
+ hash_tag_id : 8,
+ max_dtr_entry : 8,
+ max_itr_entry : 8,
+ max_unique_tcs : 8,
+ num_tc_levels : 8;
+ } pal_vm_info_1_s;
+} pal_vm_info_1_u_t;
+
+#define PAL_MAX_PURGES 0xFFFF /* all ones is means unlimited */
+
+typedef union pal_vm_info_2_u {
+ u64 pvi2_val;
+ struct {
+ u64 impl_va_msb : 8,
+ rid_size : 8,
+ max_purges : 16,
+ reserved : 32;
+ } pal_vm_info_2_s;
+} pal_vm_info_2_u_t;
+
+/* Get summary information about the virtual memory characteristics of the processor
+ * implementation.
+ */
+static inline s64
+ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t *vm_info_2)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_VM_SUMMARY, 0, 0, 0);
+ if (vm_info_1)
+ vm_info_1->pvi1_val = iprv.v0;
+ if (vm_info_2)
+ vm_info_2->pvi2_val = iprv.v1;
+ return iprv.status;
+}
+
+typedef union pal_vp_info_u {
+ u64 pvi_val;
+ struct {
+ u64 index: 48, /* virtual feature set info */
+ vmm_id: 16; /* feature set id */
+ } pal_vp_info_s;
+} pal_vp_info_u_t;
+
+/*
+ * Returns information about virtual processor features
+ */
+static inline s64
+ia64_pal_vp_info (u64 feature_set, u64 vp_buffer, u64 *vp_info, u64 *vmm_id)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_VP_INFO, feature_set, vp_buffer, 0);
+ if (vp_info)
+ *vp_info = iprv.v0;
+ if (vmm_id)
+ *vmm_id = iprv.v1;
+ return iprv.status;
+}
+
+typedef union pal_itr_valid_u {
+ u64 piv_val;
+ struct {
+ u64 access_rights_valid : 1,
+ priv_level_valid : 1,
+ dirty_bit_valid : 1,
+ mem_attr_valid : 1,
+ reserved : 60;
+ } pal_tr_valid_s;
+} pal_tr_valid_u_t;
+
+/* Read a translation register */
+static inline s64
+ia64_pal_tr_read (u64 reg_num, u64 tr_type, u64 *tr_buffer, pal_tr_valid_u_t *tr_valid)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS_STK(iprv, PAL_VM_TR_READ, reg_num, tr_type,(u64)ia64_tpa(tr_buffer));
+ if (tr_valid)
+ tr_valid->piv_val = iprv.v0;
+ return iprv.status;
+}
+
+/*
+ * PAL_PREFETCH_VISIBILITY transaction types
+ */
+#define PAL_VISIBILITY_VIRTUAL 0
+#define PAL_VISIBILITY_PHYSICAL 1
+
+/*
+ * PAL_PREFETCH_VISIBILITY return codes
+ */
+#define PAL_VISIBILITY_OK 1
+#define PAL_VISIBILITY_OK_REMOTE_NEEDED 0
+#define PAL_VISIBILITY_INVAL_ARG -2
+#define PAL_VISIBILITY_ERROR -3
+
+static inline s64
+ia64_pal_prefetch_visibility (s64 trans_type)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_PREFETCH_VISIBILITY, trans_type, 0, 0);
+ return iprv.status;
+}
+
+/* data structure for getting information on logical to physical mappings */
+typedef union pal_log_overview_u {
+ struct {
+ u64 num_log :16, /* Total number of logical
+ * processors on this die
+ */
+ tpc :8, /* Threads per core */
+ reserved3 :8, /* Reserved */
+ cpp :8, /* Cores per processor */
+ reserved2 :8, /* Reserved */
+ ppid :8, /* Physical processor ID */
+ reserved1 :8; /* Reserved */
+ } overview_bits;
+ u64 overview_data;
+} pal_log_overview_t;
+
+typedef union pal_proc_n_log_info1_u{
+ struct {
+ u64 tid :16, /* Thread id */
+ reserved2 :16, /* Reserved */
+ cid :16, /* Core id */
+ reserved1 :16; /* Reserved */
+ } ppli1_bits;
+ u64 ppli1_data;
+} pal_proc_n_log_info1_t;
+
+typedef union pal_proc_n_log_info2_u {
+ struct {
+ u64 la :16, /* Logical address */
+ reserved :48; /* Reserved */
+ } ppli2_bits;
+ u64 ppli2_data;
+} pal_proc_n_log_info2_t;
+
+typedef struct pal_logical_to_physical_s
+{
+ pal_log_overview_t overview;
+ pal_proc_n_log_info1_t ppli1;
+ pal_proc_n_log_info2_t ppli2;
+} pal_logical_to_physical_t;
+
+#define overview_num_log overview.overview_bits.num_log
+#define overview_tpc overview.overview_bits.tpc
+#define overview_cpp overview.overview_bits.cpp
+#define overview_ppid overview.overview_bits.ppid
+#define log1_tid ppli1.ppli1_bits.tid
+#define log1_cid ppli1.ppli1_bits.cid
+#define log2_la ppli2.ppli2_bits.la
+
+/* Get information on logical to physical processor mappings. */
+static inline s64
+ia64_pal_logical_to_phys(u64 proc_number, pal_logical_to_physical_t *mapping)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL(iprv, PAL_LOGICAL_TO_PHYSICAL, proc_number, 0, 0);
+
+ if (iprv.status == PAL_STATUS_SUCCESS)
+ {
+ mapping->overview.overview_data = iprv.v0;
+ mapping->ppli1.ppli1_data = iprv.v1;
+ mapping->ppli2.ppli2_data = iprv.v2;
+ }
+
+ return iprv.status;
+}
+
+typedef struct pal_cache_shared_info_s
+{
+ u64 num_shared;
+ pal_proc_n_log_info1_t ppli1;
+ pal_proc_n_log_info2_t ppli2;
+} pal_cache_shared_info_t;
+
+/* Get information on logical to physical processor mappings. */
+static inline s64
+ia64_pal_cache_shared_info(u64 level,
+ u64 type,
+ u64 proc_number,
+ pal_cache_shared_info_t *info)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL(iprv, PAL_CACHE_SHARED_INFO, level, type, proc_number);
+
+ if (iprv.status == PAL_STATUS_SUCCESS) {
+ info->num_shared = iprv.v0;
+ info->ppli1.ppli1_data = iprv.v1;
+ info->ppli2.ppli2_data = iprv.v2;
+ }
+
+ return iprv.status;
+}
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PAL_H */
diff --git a/arch/ia64/include/asm/param.h b/arch/ia64/include/asm/param.h
new file mode 100644
index 000000000..f0b786227
--- /dev/null
+++ b/arch/ia64/include/asm/param.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Fundamental kernel parameters.
+ *
+ * Based on <asm-i386/param.h>.
+ *
+ * Modified 1998, 1999, 2002-2003
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#ifndef _ASM_IA64_PARAM_H
+#define _ASM_IA64_PARAM_H
+
+#include <uapi/asm/param.h>
+
+# define HZ CONFIG_HZ
+# define USER_HZ HZ
+# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
+#endif /* _ASM_IA64_PARAM_H */
diff --git a/arch/ia64/include/asm/parport.h b/arch/ia64/include/asm/parport.h
new file mode 100644
index 000000000..360ca9bf2
--- /dev/null
+++ b/arch/ia64/include/asm/parport.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * parport.h: platform-specific PC-style parport initialisation
+ *
+ * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
+ *
+ * This file should only be included by drivers/parport/parport_pc.c.
+ */
+
+#ifndef _ASM_IA64_PARPORT_H
+#define _ASM_IA64_PARPORT_H 1
+
+static int parport_pc_find_isa_ports(int autoirq, int autodma);
+
+static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
+{
+ return parport_pc_find_isa_ports(autoirq, autodma);
+}
+
+#endif /* _ASM_IA64_PARPORT_H */
diff --git a/arch/ia64/include/asm/patch.h b/arch/ia64/include/asm/patch.h
new file mode 100644
index 000000000..bd487ed22
--- /dev/null
+++ b/arch/ia64/include/asm/patch.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_PATCH_H
+#define _ASM_IA64_PATCH_H
+
+/*
+ * Copyright (C) 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * There are a number of reasons for patching instructions. Rather than duplicating code
+ * all over the place, we put the common stuff here. Reasons for patching: in-kernel
+ * module-loader, virtual-to-physical patch-list, McKinley Errata 9 workaround, and gate
+ * shared library. Undoubtedly, some of these reasons will disappear and others will
+ * be added over time.
+ */
+#include <linux/elf.h>
+#include <linux/types.h>
+
+extern void ia64_patch (u64 insn_addr, u64 mask, u64 val); /* patch any insn slot */
+extern void ia64_patch_imm64 (u64 insn_addr, u64 val); /* patch "movl" w/abs. value*/
+extern void ia64_patch_imm60 (u64 insn_addr, u64 val); /* patch "brl" w/ip-rel value */
+
+extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end);
+extern void ia64_patch_vtop (unsigned long start, unsigned long end);
+extern void ia64_patch_phys_stack_reg(unsigned long val);
+extern void ia64_patch_rse (unsigned long start, unsigned long end);
+extern void ia64_patch_gate (void);
+
+#endif /* _ASM_IA64_PATCH_H */
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
new file mode 100644
index 000000000..8c163d1d0
--- /dev/null
+++ b/arch/ia64/include/asm/pci.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_PCI_H
+#define _ASM_IA64_PCI_H
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+
+#include <asm/io.h>
+#include <asm/hw_irq.h>
+
+struct pci_vector_struct {
+ __u16 segment; /* PCI Segment number */
+ __u16 bus; /* PCI Bus number */
+ __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */
+ __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
+ __u32 irq; /* IRQ assigned */
+};
+
+/*
+ * Can be used to override the logic in pci_scan_bus for skipping already-configured bus
+ * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the
+ * loader.
+ */
+#define pcibios_assign_all_busses() 0
+
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM 0x10000000
+
+#define HAVE_PCI_MMAP
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
+#define arch_can_pci_mmap_wc() 1
+
+#define HAVE_PCI_LEGACY
+extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state);
+
+char *pci_get_legacy_mem(struct pci_bus *bus);
+int pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size);
+int pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size);
+
+struct pci_controller {
+ struct acpi_device *companion;
+ void *iommu;
+ int segment;
+ int node; /* nearest node with memory or NUMA_NO_NODE for global allocation */
+
+ void *platform_data;
+};
+
+
+#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
+#define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment)
+
+extern struct pci_ops pci_root_ops;
+
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ return (pci_domain_nr(bus) != 0);
+}
+
+#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
+}
+
+#endif /* _ASM_IA64_PCI_H */
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
new file mode 100644
index 000000000..f357b9bb3
--- /dev/null
+++ b/arch/ia64/include/asm/percpu.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_PERCPU_H
+#define _ASM_IA64_PERCPU_H
+
+/*
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#ifdef __ASSEMBLY__
+# define THIS_CPU(var) (var) /* use this to mark accesses to per-CPU variables... */
+#else /* !__ASSEMBLY__ */
+
+
+#include <linux/threads.h>
+
+#ifdef CONFIG_SMP
+
+#ifdef HAVE_MODEL_SMALL_ATTRIBUTE
+# define PER_CPU_ATTRIBUTES __attribute__((__model__ (__small__)))
+#endif
+
+#define __my_cpu_offset __ia64_per_cpu_var(local_per_cpu_offset)
+
+extern void *per_cpu_init(void);
+
+#else /* ! SMP */
+
+#define per_cpu_init() (__phys_per_cpu_start)
+
+#endif /* SMP */
+
+#define PER_CPU_BASE_SECTION ".data..percpu"
+
+/*
+ * Be extremely careful when taking the address of this variable! Due to virtual
+ * remapping, it is different from the canonical address returned by this_cpu_ptr(&var)!
+ * On the positive side, using __ia64_per_cpu_var() instead of this_cpu_ptr() is slightly
+ * more efficient.
+ */
+#define __ia64_per_cpu_var(var) (*({ \
+ __verify_pcpu_ptr(&(var)); \
+ ((typeof(var) __kernel __force *)&(var)); \
+}))
+
+#include <asm-generic/percpu.h>
+
+/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
+DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PERCPU_H */
diff --git a/arch/ia64/include/asm/perfmon.h b/arch/ia64/include/asm/perfmon.h
new file mode 100644
index 000000000..e0545869c
--- /dev/null
+++ b/arch/ia64/include/asm/perfmon.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2001-2003 Hewlett-Packard Co
+ * Stephane Eranian <eranian@hpl.hp.com>
+ */
+#ifndef _ASM_IA64_PERFMON_H
+#define _ASM_IA64_PERFMON_H
+
+#include <uapi/asm/perfmon.h>
+
+
+extern long perfmonctl(int fd, int cmd, void *arg, int narg);
+
+typedef struct {
+ void (*handler)(int irq, void *arg, struct pt_regs *regs);
+} pfm_intr_handler_desc_t;
+
+extern void pfm_save_regs (struct task_struct *);
+extern void pfm_load_regs (struct task_struct *);
+
+extern void pfm_exit_thread(struct task_struct *);
+extern int pfm_use_debug_registers(struct task_struct *);
+extern int pfm_release_debug_registers(struct task_struct *);
+extern void pfm_syst_wide_update_task(struct task_struct *, unsigned long info, int is_ctxswin);
+extern void pfm_inherit(struct task_struct *task, struct pt_regs *regs);
+extern void pfm_init_percpu(void);
+extern void pfm_handle_work(void);
+extern int pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *h);
+extern int pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *h);
+
+
+
+/*
+ * Reset PMD register flags
+ */
+#define PFM_PMD_SHORT_RESET 0
+#define PFM_PMD_LONG_RESET 1
+
+typedef union {
+ unsigned int val;
+ struct {
+ unsigned int notify_user:1; /* notify user program of overflow */
+ unsigned int reset_ovfl_pmds:1; /* reset overflowed PMDs */
+ unsigned int block_task:1; /* block monitored task on kernel exit */
+ unsigned int mask_monitoring:1; /* mask monitors via PMCx.plm */
+ unsigned int reserved:28; /* for future use */
+ } bits;
+} pfm_ovfl_ctrl_t;
+
+typedef struct {
+ unsigned char ovfl_pmd; /* index of overflowed PMD */
+ unsigned char ovfl_notify; /* =1 if monitor requested overflow notification */
+ unsigned short active_set; /* event set active at the time of the overflow */
+ pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */
+
+ unsigned long pmd_last_reset; /* last reset value of of the PMD */
+ unsigned long smpl_pmds[4]; /* bitmask of other PMD of interest on overflow */
+ unsigned long smpl_pmds_values[PMU_MAX_PMDS]; /* values for the other PMDs of interest */
+ unsigned long pmd_value; /* current 64-bit value of the PMD */
+ unsigned long pmd_eventid; /* eventid associated with PMD */
+} pfm_ovfl_arg_t;
+
+
+typedef struct {
+ char *fmt_name;
+ pfm_uuid_t fmt_uuid;
+ size_t fmt_arg_size;
+ unsigned long fmt_flags;
+
+ int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg);
+ int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size);
+ int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg);
+ int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp);
+ int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
+ int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
+ int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs);
+
+ struct list_head fmt_list;
+} pfm_buffer_fmt_t;
+
+extern int pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt);
+extern int pfm_unregister_buffer_fmt(pfm_uuid_t uuid);
+
+/*
+ * perfmon interface exported to modules
+ */
+extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
+extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
+extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
+extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
+
+/*
+ * describe the content of the local_cpu_date->pfm_syst_info field
+ */
+#define PFM_CPUINFO_SYST_WIDE 0x1 /* if set a system wide session exists */
+#define PFM_CPUINFO_DCR_PP 0x2 /* if set the system wide session has started */
+#define PFM_CPUINFO_EXCL_IDLE 0x4 /* the system wide session excludes the idle task */
+
+/*
+ * sysctl control structure. visible to sampling formats
+ */
+typedef struct {
+ int debug; /* turn on/off debugging via syslog */
+ int debug_ovfl; /* turn on/off debug printk in overflow handler */
+ int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
+ int expert_mode; /* turn on/off value checking */
+} pfm_sysctl_t;
+extern pfm_sysctl_t pfm_sysctl;
+
+
+#endif /* _ASM_IA64_PERFMON_H */
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
new file mode 100644
index 000000000..9601cfe83
--- /dev/null
+++ b/arch/ia64/include/asm/pgalloc.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_PGALLOC_H
+#define _ASM_IA64_PGALLOC_H
+
+/*
+ * This file contains the functions and defines necessary to allocate
+ * page tables.
+ *
+ * This hopefully works with any (fixed) ia-64 page-size, as defined
+ * in <asm/page.h> (currently 8192).
+ *
+ * Copyright (C) 1998-2001 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
+ */
+
+
+#include <linux/compiler.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/threads.h>
+
+#include <asm-generic/pgalloc.h>
+
+#include <asm/mmu_context.h>
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+}
+
+#if CONFIG_PGTABLE_LEVELS == 4
+static inline void
+p4d_populate(struct mm_struct *mm, p4d_t * p4d_entry, pud_t * pud)
+{
+ p4d_val(*p4d_entry) = __pa(pud);
+}
+
+#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud)
+#endif /* CONFIG_PGTABLE_LEVELS == 4 */
+
+static inline void
+pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
+{
+ pud_val(*pud_entry) = __pa(pmd);
+}
+
+#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
+{
+ pmd_val(*pmd_entry) = page_to_phys(pte);
+}
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
+{
+ pmd_val(*pmd_entry) = __pa(pte);
+}
+
+#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
+
+#endif /* _ASM_IA64_PGALLOC_H */
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
new file mode 100644
index 000000000..6e5c38756
--- /dev/null
+++ b/arch/ia64/include/asm/pgtable.h
@@ -0,0 +1,566 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_PGTABLE_H
+#define _ASM_IA64_PGTABLE_H
+
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the IA-64 page table tree.
+ *
+ * This hopefully works with any (fixed) IA-64 page-size, as defined
+ * in <asm/page.h>.
+ *
+ * Copyright (C) 1998-2005 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#include <asm/mman.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/types.h>
+
+#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */
+
+/*
+ * First, define the various bits in a PTE. Note that the PTE format
+ * matches the VHPT short format, the firt doubleword of the VHPD long
+ * format, and the first doubleword of the TLB insertion format.
+ */
+#define _PAGE_P_BIT 0
+#define _PAGE_A_BIT 5
+#define _PAGE_D_BIT 6
+
+#define _PAGE_P (1 << _PAGE_P_BIT) /* page present bit */
+#define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */
+#define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */
+#define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */
+#define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */
+#define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */
+#define _PAGE_MA_MASK (0x7 << 2)
+#define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */
+#define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */
+#define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */
+#define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */
+#define _PAGE_PL_MASK (3 << 7)
+#define _PAGE_AR_R (0 << 9) /* read only */
+#define _PAGE_AR_RX (1 << 9) /* read & execute */
+#define _PAGE_AR_RW (2 << 9) /* read & write */
+#define _PAGE_AR_RWX (3 << 9) /* read, write & execute */
+#define _PAGE_AR_R_RW (4 << 9) /* read / read & write */
+#define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */
+#define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */
+#define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */
+#define _PAGE_AR_MASK (7 << 9)
+#define _PAGE_AR_SHIFT 9
+#define _PAGE_A (1 << _PAGE_A_BIT) /* page accessed bit */
+#define _PAGE_D (1 << _PAGE_D_BIT) /* page dirty bit */
+#define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
+#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
+#define _PAGE_PROTNONE (__IA64_UL(1) << 63)
+
+#define _PFN_MASK _PAGE_PPN_MASK
+/* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
+#define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
+
+#define _PAGE_SIZE_4K 12
+#define _PAGE_SIZE_8K 13
+#define _PAGE_SIZE_16K 14
+#define _PAGE_SIZE_64K 16
+#define _PAGE_SIZE_256K 18
+#define _PAGE_SIZE_1M 20
+#define _PAGE_SIZE_4M 22
+#define _PAGE_SIZE_16M 24
+#define _PAGE_SIZE_64M 26
+#define _PAGE_SIZE_256M 28
+#define _PAGE_SIZE_1G 30
+#define _PAGE_SIZE_4G 32
+
+#define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
+#define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
+#define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED
+
+/*
+ * How many pointers will a page table level hold expressed in shift
+ */
+#define PTRS_PER_PTD_SHIFT (PAGE_SHIFT-3)
+
+/*
+ * Definitions for fourth level:
+ */
+#define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT))
+
+/*
+ * Definitions for third level:
+ *
+ * PMD_SHIFT determines the size of the area a third-level page table
+ * can map.
+ */
+#define PMD_SHIFT (PAGE_SHIFT + (PTRS_PER_PTD_SHIFT))
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT))
+
+#if CONFIG_PGTABLE_LEVELS == 4
+/*
+ * Definitions for second level:
+ *
+ * PUD_SHIFT determines the size of the area a second-level page table
+ * can map.
+ */
+#define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+#define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT))
+#endif
+
+/*
+ * Definitions for first level:
+ *
+ * PGDIR_SHIFT determines what a first-level page table entry can map.
+ */
+#if CONFIG_PGTABLE_LEVELS == 4
+#define PGDIR_SHIFT (PUD_SHIFT + (PTRS_PER_PTD_SHIFT))
+#else
+#define PGDIR_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
+#endif
+#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+#define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT
+#define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT)
+#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
+#define FIRST_USER_ADDRESS 0UL
+
+/*
+ * All the normal masks have the "page accessed" bits on, as any time
+ * they are used, the page is accessed. They are cleared only by the
+ * page-out routines.
+ */
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A)
+#define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
+#define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+#define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+#define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+#define PAGE_KERNEL_UC __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX | \
+ _PAGE_MA_UC)
+
+# ifndef __ASSEMBLY__
+
+#include <linux/sched/mm.h> /* for mm_struct */
+#include <linux/bitops.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+/*
+ * Next come the mappings that determine how mmap() protection bits
+ * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The
+ * _P version gets used for a private shared memory segment, the _S
+ * version gets used for a shared memory segment with MAP_SHARED on.
+ * In a private shared memory segment, we do a copy-on-write if a task
+ * attempts to write to the page.
+ */
+ /* xwr */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
+#define __P011 PAGE_READONLY /* ditto */
+#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
+#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_EXEC
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
+#define __S011 PAGE_SHARED
+#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
+#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
+#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
+
+#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
+#if CONFIG_PGTABLE_LEVELS == 4
+#define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
+#endif
+#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
+
+
+/*
+ * Some definitions to translate between mem_map, PTEs, and page addresses:
+ */
+
+
+/* Quick test to see if ADDR is a (potentially) valid physical address. */
+static inline long
+ia64_phys_addr_valid (unsigned long addr)
+{
+ return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
+}
+
+/*
+ * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
+ * memory. For the return value to be meaningful, ADDR must be >=
+ * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
+ * require a hash-, or multi-level tree-lookup or something of that
+ * sort) but it guarantees to return TRUE only if accessing the page
+ * at that address does not cause an error. Note that there may be
+ * addresses for which kern_addr_valid() returns FALSE even though an
+ * access would not cause an error (e.g., this is typically true for
+ * memory mapped I/O regions.
+ *
+ * XXX Need to implement this for IA-64.
+ */
+#define kern_addr_valid(addr) (1)
+
+
+/*
+ * Now come the defines and routines to manage and access the three-level
+ * page table.
+ */
+
+
+#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
+extern unsigned long VMALLOC_END;
+#else
+#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
+/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
+# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10)))
+# define vmemmap ((struct page *)VMALLOC_END)
+#else
+# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
+#endif
+#endif
+
+/* fs/proc/kcore.c */
+#define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
+#define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE))
+
+#define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3)
+#define RGN_MAP_LIMIT ((1UL << RGN_MAP_SHIFT) - PAGE_SIZE) /* per region addr limit */
+
+/*
+ * Conversion functions: convert page frame number (pfn) and a protection value to a page
+ * table entry (pte).
+ */
+#define pfn_pte(pfn, pgprot) \
+({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
+
+/* Extract pfn from pte. */
+#define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
+
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+/* This takes a physical page address that is used by the remapping functions */
+#define mk_pte_phys(physpage, pgprot) \
+({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
+
+#define pte_modify(_pte, newprot) \
+ (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
+
+#define pte_none(pte) (!pte_val(pte))
+#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
+#define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL)
+/* pte_page() returns the "struct page *" corresponding to the PTE: */
+#define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET))
+
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd)))
+#define pmd_present(pmd) (pmd_val(pmd) != 0UL)
+#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
+#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
+#define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET))
+
+#define pud_none(pud) (!pud_val(pud))
+#define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud)))
+#define pud_present(pud) (pud_val(pud) != 0UL)
+#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
+#define pud_pgtable(pud) ((pmd_t *) __va(pud_val(pud) & _PFN_MASK))
+#define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET))
+
+#if CONFIG_PGTABLE_LEVELS == 4
+#define p4d_none(p4d) (!p4d_val(p4d))
+#define p4d_bad(p4d) (!ia64_phys_addr_valid(p4d_val(p4d)))
+#define p4d_present(p4d) (p4d_val(p4d) != 0UL)
+#define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL)
+#define p4d_pgtable(p4d) ((pud_t *) __va(p4d_val(p4d) & _PFN_MASK))
+#define p4d_page(p4d) virt_to_page((p4d_val(p4d) + PAGE_OFFSET))
+#endif
+
+/*
+ * The following have defined behavior only work if pte_present() is true.
+ */
+#define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
+#define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
+#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
+#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
+
+/*
+ * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
+ * access rights:
+ */
+#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW))
+#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))
+#define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A))
+#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
+#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
+#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
+#define pte_mkhuge(pte) (__pte(pte_val(pte)))
+
+/*
+ * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to
+ * sync icache and dcache when we insert *new* executable page.
+ * __ia64_sync_icache_dcache() check Pg_arch_1 bit and flush icache
+ * if necessary.
+ *
+ * set_pte() is also called by the kernel, but we can expect that the kernel
+ * flushes icache explicitly if necessary.
+ */
+#define pte_present_exec_user(pte)\
+ ((pte_val(pte) & (_PAGE_P | _PAGE_PL_MASK | _PAGE_AR_RX)) == \
+ (_PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX))
+
+extern void __ia64_sync_icache_dcache(pte_t pteval);
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ /* page is present && page is user && page is executable
+ * && (page swapin or new page or page migraton
+ * || copy_on_write with page copying.)
+ */
+ if (pte_present_exec_user(pteval) &&
+ (!pte_present(*ptep) ||
+ pte_pfn(*ptep) != pte_pfn(pteval)))
+ /* load_module() calles flush_icache_range() explicitly*/
+ __ia64_sync_icache_dcache(pteval);
+ *ptep = pteval;
+}
+
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+
+/*
+ * Make page protection values cacheable, uncacheable, or write-
+ * combining. Note that "protection" is really a misnomer here as the
+ * protection value contains the memory attribute bits, dirty bits, and
+ * various other bits as well.
+ */
+#define pgprot_cacheable(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WB)
+#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
+#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+static inline unsigned long
+pgd_index (unsigned long address)
+{
+ unsigned long region = address >> 61;
+ unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
+
+ return (region << (PAGE_SHIFT - 6)) | l1index;
+}
+#define pgd_index pgd_index
+
+/*
+ * In the kernel's mapped region we know everything is in region number 5, so
+ * as an optimisation its PGD already points to the area for that region.
+ * However, this also means that we cannot use pgd_index() and we must
+ * never add the region here.
+ */
+#define pgd_offset_k(addr) \
+ (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
+
+/* Look up a pgd entry in the gate area. On IA-64, the gate-area
+ resides in the kernel-mapped segment, hence we use pgd_offset_k()
+ here. */
+#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
+
+/* atomic versions of the some PTE manipulations: */
+
+static inline int
+ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+ if (!pte_young(*ptep))
+ return 0;
+ return test_and_clear_bit(_PAGE_A_BIT, ptep);
+#else
+ pte_t pte = *ptep;
+ if (!pte_young(pte))
+ return 0;
+ set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
+ return 1;
+#endif
+}
+
+static inline pte_t
+ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+ return __pte(xchg((long *) ptep, 0));
+#else
+ pte_t pte = *ptep;
+ pte_clear(mm, addr, ptep);
+ return pte;
+#endif
+}
+
+static inline void
+ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+ unsigned long new, old;
+
+ do {
+ old = pte_val(*ptep);
+ new = pte_val(pte_wrprotect(__pte (old)));
+ } while (cmpxchg((unsigned long *) ptep, old, new) != old);
+#else
+ pte_t old_pte = *ptep;
+ set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+#endif
+}
+
+static inline int
+pte_same (pte_t a, pte_t b)
+{
+ return pte_val(a) == pte_val(b);
+}
+
+#define update_mmu_cache(vma, address, ptep) do { } while (0)
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern void paging_init (void);
+
+/*
+ * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
+ * bits in the swap-type field of the swap pte. It would be nice to
+ * enforce that, but we can't easily include <linux/swap.h> here.
+ * (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...).
+ *
+ * Format of swap pte:
+ * bit 0 : present bit (must be zero)
+ * bits 1- 7: swap-type
+ * bits 8-62: swap offset
+ * bit 63 : _PAGE_PROTNONE bit
+ */
+#define __swp_type(entry) (((entry).val >> 1) & 0x7f)
+#define __swp_offset(entry) (((entry).val << 1) >> 9)
+#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 8) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
+extern struct page *zero_page_memmap_ptr;
+#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
+
+/* We provide our own get_unmapped_area to cope with VA holes for userland */
+#define HAVE_ARCH_UNMAPPED_AREA
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
+#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
+#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
+#endif
+
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+/*
+ * Update PTEP with ENTRY, which is guaranteed to be a less
+ * restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and
+ * WRITABLE bits turned on, when the value at PTEP did not. The
+ * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE.
+ *
+ * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without
+ * having to worry about races. On SMP machines, there are only two
+ * cases where this is true:
+ *
+ * (1) *PTEP has the PRESENT bit turned OFF
+ * (2) ENTRY has the DIRTY bit turned ON
+ *
+ * On ia64, we could implement this routine with a cmpxchg()-loop
+ * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY.
+ * However, like on x86, we can get a more streamlined version by
+ * observing that it is OK to drop ACCESSED bit updates when
+ * SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is
+ * result in an extra Access-bit fault, which would then turn on the
+ * ACCESSED bit in the low-level fault handler (iaccess_bit or
+ * daccess_bit in ivt.S).
+ */
+#ifdef CONFIG_SMP
+# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed && __safely_writable) { \
+ set_pte(__ptep, __entry); \
+ flush_tlb_page(__vma, __addr); \
+ } \
+ __changed; \
+})
+#else
+# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed) { \
+ set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \
+ flush_tlb_page(__vma, __addr); \
+ } \
+ __changed; \
+})
+#endif
+
+# ifdef CONFIG_VIRTUAL_MEM_MAP
+ /* arch mem_map init routine is needed due to holes in a virtual mem_map */
+void memmap_init(void);
+void arch_memmap_init(unsigned long size, int nid, unsigned long zone,
+ unsigned long start_pfn);
+# endif /* CONFIG_VIRTUAL_MEM_MAP */
+# endif /* !__ASSEMBLY__ */
+
+/*
+ * Identity-mapped regions use a large page size. We'll call such large pages
+ * "granules". If you can think of a better name that's unambiguous, let me
+ * know...
+ */
+#if defined(CONFIG_IA64_GRANULE_64MB)
+# define IA64_GRANULE_SHIFT _PAGE_SIZE_64M
+#elif defined(CONFIG_IA64_GRANULE_16MB)
+# define IA64_GRANULE_SHIFT _PAGE_SIZE_16M
+#endif
+#define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT)
+/*
+ * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
+ */
+#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
+#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
+
+/* These tell get_user_pages() that the first gate page is accessible from user-level. */
+#define FIXADDR_USER_START GATE_ADDR
+#ifdef HAVE_BUGGY_SEGREL
+# define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE)
+#else
+# define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE)
+#endif
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTE_SAME
+#define __HAVE_ARCH_PGD_OFFSET_GATE
+
+
+#if CONFIG_PGTABLE_LEVELS == 3
+#include <asm-generic/pgtable-nopud.h>
+#endif
+#include <asm-generic/pgtable-nop4d.h>
+
+#endif /* _ASM_IA64_PGTABLE_H */
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
new file mode 100644
index 000000000..05e7c9ad1
--- /dev/null
+++ b/arch/ia64/include/asm/processor.h
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_PROCESSOR_H
+#define _ASM_IA64_PROCESSOR_H
+
+/*
+ * Copyright (C) 1998-2004 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ *
+ * 11/24/98 S.Eranian added ia64_set_iva()
+ * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API
+ * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support
+ */
+
+
+#include <asm/intrinsics.h>
+#include <asm/kregs.h>
+#include <asm/ptrace.h>
+#include <asm/ustack.h>
+
+#define IA64_NUM_PHYS_STACK_REG 96
+#define IA64_NUM_DBG_REGS 8
+
+#define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000)
+#define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000)
+
+/*
+ * TASK_SIZE really is a mis-named. It really is the maximum user
+ * space address (plus one). On IA-64, there are five regions of 2TB
+ * each (assuming 8KB page size), for a total of 8TB of user virtual
+ * address space.
+ */
+#define TASK_SIZE DEFAULT_TASK_SIZE
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (current->thread.map_base)
+
+#define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
+#define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
+#define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
+#define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
+#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
+#define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration
+ sync at ctx sw */
+#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
+#define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
+
+#define IA64_THREAD_UAC_SHIFT 3
+#define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
+#define IA64_THREAD_FPEMU_SHIFT 6
+#define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
+
+
+/*
+ * This shift should be large enough to be able to represent 1000000000/itc_freq with good
+ * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
+ * (this will give enough slack to represent 10 seconds worth of time as a scaled number).
+ */
+#define IA64_NSEC_PER_CYC_SHIFT 30
+
+#ifndef __ASSEMBLY__
+
+#include <linux/cache.h>
+#include <linux/compiler.h>
+#include <linux/threads.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include <asm/fpu.h>
+#include <asm/page.h>
+#include <asm/percpu.h>
+#include <asm/rse.h>
+#include <asm/unwind.h>
+#include <linux/atomic.h>
+#ifdef CONFIG_NUMA
+#include <asm/nodedata.h>
+#endif
+
+/* like above but expressed as bitfields for more efficient access: */
+struct ia64_psr {
+ __u64 reserved0 : 1;
+ __u64 be : 1;
+ __u64 up : 1;
+ __u64 ac : 1;
+ __u64 mfl : 1;
+ __u64 mfh : 1;
+ __u64 reserved1 : 7;
+ __u64 ic : 1;
+ __u64 i : 1;
+ __u64 pk : 1;
+ __u64 reserved2 : 1;
+ __u64 dt : 1;
+ __u64 dfl : 1;
+ __u64 dfh : 1;
+ __u64 sp : 1;
+ __u64 pp : 1;
+ __u64 di : 1;
+ __u64 si : 1;
+ __u64 db : 1;
+ __u64 lp : 1;
+ __u64 tb : 1;
+ __u64 rt : 1;
+ __u64 reserved3 : 4;
+ __u64 cpl : 2;
+ __u64 is : 1;
+ __u64 mc : 1;
+ __u64 it : 1;
+ __u64 id : 1;
+ __u64 da : 1;
+ __u64 dd : 1;
+ __u64 ss : 1;
+ __u64 ri : 2;
+ __u64 ed : 1;
+ __u64 bn : 1;
+ __u64 reserved4 : 19;
+};
+
+union ia64_isr {
+ __u64 val;
+ struct {
+ __u64 code : 16;
+ __u64 vector : 8;
+ __u64 reserved1 : 8;
+ __u64 x : 1;
+ __u64 w : 1;
+ __u64 r : 1;
+ __u64 na : 1;
+ __u64 sp : 1;
+ __u64 rs : 1;
+ __u64 ir : 1;
+ __u64 ni : 1;
+ __u64 so : 1;
+ __u64 ei : 2;
+ __u64 ed : 1;
+ __u64 reserved2 : 20;
+ };
+};
+
+union ia64_lid {
+ __u64 val;
+ struct {
+ __u64 rv : 16;
+ __u64 eid : 8;
+ __u64 id : 8;
+ __u64 ig : 32;
+ };
+};
+
+union ia64_tpr {
+ __u64 val;
+ struct {
+ __u64 ig0 : 4;
+ __u64 mic : 4;
+ __u64 rsv : 8;
+ __u64 mmi : 1;
+ __u64 ig1 : 47;
+ };
+};
+
+union ia64_itir {
+ __u64 val;
+ struct {
+ __u64 rv3 : 2; /* 0-1 */
+ __u64 ps : 6; /* 2-7 */
+ __u64 key : 24; /* 8-31 */
+ __u64 rv4 : 32; /* 32-63 */
+ };
+};
+
+union ia64_rr {
+ __u64 val;
+ struct {
+ __u64 ve : 1; /* enable hw walker */
+ __u64 reserved0: 1; /* reserved */
+ __u64 ps : 6; /* log page size */
+ __u64 rid : 24; /* region id */
+ __u64 reserved1: 32; /* reserved */
+ };
+};
+
+/*
+ * CPU type, hardware bug flags, and per-CPU state. Frequently used
+ * state comes earlier:
+ */
+struct cpuinfo_ia64 {
+ unsigned int softirq_pending;
+ unsigned long itm_delta; /* # of clock cycles between clock ticks */
+ unsigned long itm_next; /* interval timer mask value to use for next clock tick */
+ unsigned long nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
+ unsigned long unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
+ unsigned long unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
+ unsigned long itc_freq; /* frequency of ITC counter */
+ unsigned long proc_freq; /* frequency of processor */
+ unsigned long cyc_per_usec; /* itc_freq/1000000 */
+ unsigned long ptce_base;
+ unsigned int ptce_count[2];
+ unsigned int ptce_stride[2];
+ struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
+
+#ifdef CONFIG_SMP
+ unsigned long loops_per_jiffy;
+ int cpu;
+ unsigned int socket_id; /* physical processor socket id */
+ unsigned short core_id; /* core id */
+ unsigned short thread_id; /* thread id */
+ unsigned short num_log; /* Total number of logical processors on
+ * this socket that were successfully booted */
+ unsigned char cores_per_socket; /* Cores per processor socket */
+ unsigned char threads_per_core; /* Threads per core */
+#endif
+
+ /* CPUID-derived information: */
+ unsigned long ppn;
+ unsigned long features;
+ unsigned char number;
+ unsigned char revision;
+ unsigned char model;
+ unsigned char family;
+ unsigned char archrev;
+ char vendor[16];
+ char *model_name;
+
+#ifdef CONFIG_NUMA
+ struct ia64_node_data *node_data;
+#endif
+};
+
+DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
+
+/*
+ * The "local" data variable. It refers to the per-CPU data of the currently executing
+ * CPU, much like "current" points to the per-task data of the currently executing task.
+ * Do not use the address of local_cpu_data, since it will be different from
+ * cpu_data(smp_processor_id())!
+ */
+#define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info))
+#define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu))
+
+extern void print_cpu_info (struct cpuinfo_ia64 *);
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+#define SET_UNALIGN_CTL(task,value) \
+({ \
+ (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
+ | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
+ 0; \
+})
+#define GET_UNALIGN_CTL(task,addr) \
+({ \
+ put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
+ (int __user *) (addr)); \
+})
+
+#define SET_FPEMU_CTL(task,value) \
+({ \
+ (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \
+ | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \
+ 0; \
+})
+#define GET_FPEMU_CTL(task,addr) \
+({ \
+ put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \
+ (int __user *) (addr)); \
+})
+
+struct thread_struct {
+ __u32 flags; /* various thread flags (see IA64_THREAD_*) */
+ /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
+ __u8 on_ustack; /* executing on user-stacks? */
+ __u8 pad[3];
+ __u64 ksp; /* kernel stack pointer */
+ __u64 map_base; /* base address for get_unmapped_area() */
+ __u64 rbs_bot; /* the base address for the RBS */
+ int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
+ unsigned long dbr[IA64_NUM_DBG_REGS];
+ unsigned long ibr[IA64_NUM_DBG_REGS];
+ struct ia64_fpreg fph[96]; /* saved/loaded on demand */
+};
+
+#define INIT_THREAD { \
+ .flags = 0, \
+ .on_ustack = 0, \
+ .ksp = 0, \
+ .map_base = DEFAULT_MAP_BASE, \
+ .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
+ .last_fph_cpu = -1, \
+ .dbr = {0, }, \
+ .ibr = {0, }, \
+ .fph = {{{{0}}}, } \
+}
+
+#define start_thread(regs,new_ip,new_sp) do { \
+ regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \
+ & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
+ regs->cr_iip = new_ip; \
+ regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
+ regs->ar_rnat = 0; \
+ regs->ar_bspstore = current->thread.rbs_bot; \
+ regs->ar_fpsr = FPSR_DEFAULT; \
+ regs->loadrs = 0; \
+ regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
+ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
+ if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \
+ /* \
+ * Zap scratch regs to avoid leaking bits between processes with different \
+ * uid/privileges. \
+ */ \
+ regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \
+ regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \
+ } \
+} while (0)
+
+/* Forward declarations, a strange C thing... */
+struct mm_struct;
+struct task_struct;
+
+/*
+ * Free all resources held by a thread. This is called after the
+ * parent of DEAD_TASK has collected the exit status of the task via
+ * wait().
+ */
+#define release_thread(dead_task)
+
+/* Get wait channel for task P. */
+extern unsigned long get_wchan (struct task_struct *p);
+
+/* Return instruction pointer of blocked task TSK. */
+#define KSTK_EIP(tsk) \
+ ({ \
+ struct pt_regs *_regs = task_pt_regs(tsk); \
+ _regs->cr_iip + ia64_psr(_regs)->ri; \
+ })
+
+/* Return stack pointer of blocked task TSK. */
+#define KSTK_ESP(tsk) ((tsk)->thread.ksp)
+
+extern void ia64_getreg_unknown_kr (void);
+extern void ia64_setreg_unknown_kr (void);
+
+#define ia64_get_kr(regnum) \
+({ \
+ unsigned long r = 0; \
+ \
+ switch (regnum) { \
+ case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \
+ case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \
+ case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \
+ case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \
+ case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \
+ case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \
+ case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \
+ case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \
+ default: ia64_getreg_unknown_kr(); break; \
+ } \
+ r; \
+})
+
+#define ia64_set_kr(regnum, r) \
+({ \
+ switch (regnum) { \
+ case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \
+ case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \
+ case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \
+ case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \
+ case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \
+ case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \
+ case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \
+ case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \
+ default: ia64_setreg_unknown_kr(); break; \
+ } \
+})
+
+/*
+ * The following three macros can't be inline functions because we don't have struct
+ * task_struct at this point.
+ */
+
+/*
+ * Return TRUE if task T owns the fph partition of the CPU we're running on.
+ * Must be called from code that has preemption disabled.
+ */
+#define ia64_is_local_fpu_owner(t) \
+({ \
+ struct task_struct *__ia64_islfo_task = (t); \
+ (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
+ && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
+})
+
+/*
+ * Mark task T as owning the fph partition of the CPU we're running on.
+ * Must be called from code that has preemption disabled.
+ */
+#define ia64_set_local_fpu_owner(t) do { \
+ struct task_struct *__ia64_slfo_task = (t); \
+ __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
+ ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \
+} while (0)
+
+/* Mark the fph partition of task T as being invalid on all CPUs. */
+#define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1)
+
+extern void __ia64_init_fpu (void);
+extern void __ia64_save_fpu (struct ia64_fpreg *fph);
+extern void __ia64_load_fpu (struct ia64_fpreg *fph);
+extern void ia64_save_debug_regs (unsigned long *save_area);
+extern void ia64_load_debug_regs (unsigned long *save_area);
+
+#define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
+#define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
+
+/* load fp 0.0 into fph */
+static inline void
+ia64_init_fpu (void) {
+ ia64_fph_enable();
+ __ia64_init_fpu();
+ ia64_fph_disable();
+}
+
+/* save f32-f127 at FPH */
+static inline void
+ia64_save_fpu (struct ia64_fpreg *fph) {
+ ia64_fph_enable();
+ __ia64_save_fpu(fph);
+ ia64_fph_disable();
+}
+
+/* load f32-f127 from FPH */
+static inline void
+ia64_load_fpu (struct ia64_fpreg *fph) {
+ ia64_fph_enable();
+ __ia64_load_fpu(fph);
+ ia64_fph_disable();
+}
+
+static inline __u64
+ia64_clear_ic (void)
+{
+ __u64 psr;
+ psr = ia64_getreg(_IA64_REG_PSR);
+ ia64_stop();
+ ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
+ ia64_srlz_i();
+ return psr;
+}
+
+/*
+ * Restore the psr.
+ */
+static inline void
+ia64_set_psr (__u64 psr)
+{
+ ia64_stop();
+ ia64_setreg(_IA64_REG_PSR_L, psr);
+ ia64_srlz_i();
+}
+
+/*
+ * Insert a translation into an instruction and/or data translation
+ * register.
+ */
+static inline void
+ia64_itr (__u64 target_mask, __u64 tr_num,
+ __u64 vmaddr, __u64 pte,
+ __u64 log_page_size)
+{
+ ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
+ ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
+ ia64_stop();
+ if (target_mask & 0x1)
+ ia64_itri(tr_num, pte);
+ if (target_mask & 0x2)
+ ia64_itrd(tr_num, pte);
+}
+
+/*
+ * Insert a translation into the instruction and/or data translation
+ * cache.
+ */
+static inline void
+ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
+ __u64 log_page_size)
+{
+ ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
+ ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
+ ia64_stop();
+ /* as per EAS2.6, itc must be the last instruction in an instruction group */
+ if (target_mask & 0x1)
+ ia64_itci(pte);
+ if (target_mask & 0x2)
+ ia64_itcd(pte);
+}
+
+/*
+ * Purge a range of addresses from instruction and/or data translation
+ * register(s).
+ */
+static inline void
+ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
+{
+ if (target_mask & 0x1)
+ ia64_ptri(vmaddr, (log_size << 2));
+ if (target_mask & 0x2)
+ ia64_ptrd(vmaddr, (log_size << 2));
+}
+
+/* Set the interrupt vector address. The address must be suitably aligned (32KB). */
+static inline void
+ia64_set_iva (void *ivt_addr)
+{
+ ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
+ ia64_srlz_i();
+}
+
+/* Set the page table address and control bits. */
+static inline void
+ia64_set_pta (__u64 pta)
+{
+ /* Note: srlz.i implies srlz.d */
+ ia64_setreg(_IA64_REG_CR_PTA, pta);
+ ia64_srlz_i();
+}
+
+static inline void
+ia64_eoi (void)
+{
+ ia64_setreg(_IA64_REG_CR_EOI, 0);
+ ia64_srlz_d();
+}
+
+#define cpu_relax() ia64_hint(ia64_hint_pause)
+
+static inline int
+ia64_get_irr(unsigned int vector)
+{
+ unsigned int reg = vector / 64;
+ unsigned int bit = vector % 64;
+ unsigned long irr;
+
+ switch (reg) {
+ case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
+ case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break;
+ case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break;
+ case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break;
+ }
+
+ return test_bit(bit, &irr);
+}
+
+static inline void
+ia64_set_lrr0 (unsigned long val)
+{
+ ia64_setreg(_IA64_REG_CR_LRR0, val);
+ ia64_srlz_d();
+}
+
+static inline void
+ia64_set_lrr1 (unsigned long val)
+{
+ ia64_setreg(_IA64_REG_CR_LRR1, val);
+ ia64_srlz_d();
+}
+
+
+/*
+ * Given the address to which a spill occurred, return the unat bit
+ * number that corresponds to this address.
+ */
+static inline __u64
+ia64_unat_pos (void *spill_addr)
+{
+ return ((__u64) spill_addr >> 3) & 0x3f;
+}
+
+/*
+ * Set the NaT bit of an integer register which was spilled at address
+ * SPILL_ADDR. UNAT is the mask to be updated.
+ */
+static inline void
+ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
+{
+ __u64 bit = ia64_unat_pos(spill_addr);
+ __u64 mask = 1UL << bit;
+
+ *unat = (*unat & ~mask) | (nat << bit);
+}
+
+static inline __u64
+ia64_get_ivr (void)
+{
+ __u64 r;
+ ia64_srlz_d();
+ r = ia64_getreg(_IA64_REG_CR_IVR);
+ ia64_srlz_d();
+ return r;
+}
+
+static inline void
+ia64_set_dbr (__u64 regnum, __u64 value)
+{
+ __ia64_set_dbr(regnum, value);
+#ifdef CONFIG_ITANIUM
+ ia64_srlz_d();
+#endif
+}
+
+static inline __u64
+ia64_get_dbr (__u64 regnum)
+{
+ __u64 retval;
+
+ retval = __ia64_get_dbr(regnum);
+#ifdef CONFIG_ITANIUM
+ ia64_srlz_d();
+#endif
+ return retval;
+}
+
+static inline __u64
+ia64_rotr (__u64 w, __u64 n)
+{
+ return (w >> n) | (w << (64 - n));
+}
+
+#define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
+
+/*
+ * Take a mapped kernel address and return the equivalent address
+ * in the region 7 identity mapped virtual area.
+ */
+static inline void *
+ia64_imva (void *addr)
+{
+ void *result;
+ result = (void *) ia64_tpa(addr);
+ return __va(result);
+}
+
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+#define PREFETCH_STRIDE L1_CACHE_BYTES
+
+static inline void
+prefetch (const void *x)
+{
+ ia64_lfetch(ia64_lfhint_none, x);
+}
+
+static inline void
+prefetchw (const void *x)
+{
+ ia64_lfetch_excl(ia64_lfhint_none, x);
+}
+
+#define spin_lock_prefetch(x) prefetchw(x)
+
+extern unsigned long boot_option_idle_override;
+
+enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
+ IDLE_NOMWAIT, IDLE_POLL};
+
+void default_idle(void);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PROCESSOR_H */
diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h
new file mode 100644
index 000000000..081791359
--- /dev/null
+++ b/arch/ia64/include/asm/ptrace.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 1998-2004 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 2003 Intel Co
+ * Suresh Siddha <suresh.b.siddha@intel.com>
+ * Fenghua Yu <fenghua.yu@intel.com>
+ * Arun Sharma <arun.sharma@intel.com>
+ *
+ * 12/07/98 S. Eranian added pt_regs & switch_stack
+ * 12/21/98 D. Mosberger updated to match latest code
+ * 6/17/99 D. Mosberger added second unat member to "struct switch_stack"
+ *
+ */
+#ifndef _ASM_IA64_PTRACE_H
+#define _ASM_IA64_PTRACE_H
+
+#ifndef ASM_OFFSETS_C
+#include <asm/asm-offsets.h>
+#endif
+#include <uapi/asm/ptrace.h>
+
+/*
+ * Base-2 logarithm of number of pages to allocate per task structure
+ * (including register backing store and memory stack):
+ */
+#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
+# define KERNEL_STACK_SIZE_ORDER 3
+#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
+# define KERNEL_STACK_SIZE_ORDER 2
+#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
+# define KERNEL_STACK_SIZE_ORDER 1
+#else
+# define KERNEL_STACK_SIZE_ORDER 0
+#endif
+
+#define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 31) & ~31)
+#define IA64_STK_OFFSET ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
+
+#define KERNEL_STACK_SIZE IA64_STK_OFFSET
+
+#ifndef __ASSEMBLY__
+
+#include <asm/current.h>
+#include <asm/page.h>
+
+/*
+ * We use the ia64_psr(regs)->ri to determine which of the three
+ * instructions in bundle (16 bytes) took the sample. Generate
+ * the canonical representation by adding to instruction pointer.
+ */
+# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
+
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ return regs->r12;
+}
+
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+ return regs->r10 != -1;
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ if (is_syscall_success(regs))
+ return regs->r8;
+ else
+ return -regs->r8;
+}
+
+/* Conserve space in histogram by encoding slot bits in address
+ * bits 2 and 3 rather than bits 0 and 1.
+ */
+#define profile_pc(regs) \
+({ \
+ unsigned long __ip = instruction_pointer(regs); \
+ (__ip & ~3UL) + ((__ip & 3UL) << 2); \
+})
+
+ /* given a pointer to a task_struct, return the user's pt_regs */
+# define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
+# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
+# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
+# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
+# define fsys_mode(task,regs) \
+ ({ \
+ struct task_struct *_task = (task); \
+ struct pt_regs *_regs = (regs); \
+ !user_mode(_regs) && user_stack(_task, _regs); \
+ })
+
+ /*
+ * System call handlers that, upon successful completion, need to return a negative value
+ * should call force_successful_syscall_return() right before returning. On architectures
+ * where the syscall convention provides for a separate error flag (e.g., alpha, ia64,
+ * ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error
+ * flag will not get set. On architectures which do not support a separate error flag,
+ * the macro is a no-op and the spurious error condition needs to be filtered out by some
+ * other means (e.g., in user-level, by passing an extra argument to the syscall handler,
+ * or something along those lines).
+ *
+ * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
+ */
+# define force_successful_syscall_return() (task_pt_regs(current)->r8 = 0)
+
+ struct task_struct; /* forward decl */
+ struct unw_frame_info; /* forward decl */
+
+ extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
+ unsigned long *);
+ extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
+ unsigned long, long *);
+ extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long,
+ unsigned long, long);
+ extern void ia64_flush_fph (struct task_struct *);
+ extern void ia64_sync_fph (struct task_struct *);
+ extern void ia64_sync_krbs(void);
+ extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
+ unsigned long, unsigned long);
+
+ /* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */
+ extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat);
+ /* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */
+ extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat);
+
+ extern void ia64_increment_ip (struct pt_regs *pt);
+ extern void ia64_decrement_ip (struct pt_regs *pt);
+
+ extern void ia64_ptrace_stop(void);
+ #define arch_ptrace_stop(code, info) \
+ ia64_ptrace_stop()
+ #define arch_ptrace_stop_needed(code, info) \
+ (!test_thread_flag(TIF_RESTORE_RSE))
+
+ extern void ptrace_attach_sync_user_rbs (struct task_struct *);
+ #define arch_ptrace_attach(child) \
+ ptrace_attach_sync_user_rbs(child)
+
+ #define arch_has_single_step() (1)
+ #define arch_has_block_step() (1)
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_IA64_PTRACE_H */
diff --git a/arch/ia64/include/asm/sal.h b/arch/ia64/include/asm/sal.h
new file mode 100644
index 000000000..08f5b6aae
--- /dev/null
+++ b/arch/ia64/include/asm/sal.h
@@ -0,0 +1,919 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_SAL_H
+#define _ASM_IA64_SAL_H
+
+/*
+ * System Abstraction Layer definitions.
+ *
+ * This is based on version 2.5 of the manual "IA-64 System
+ * Abstraction Layer".
+ *
+ * Copyright (C) 2001 Intel
+ * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
+ * Copyright (C) 2001 Fred Lewis <frederick.v.lewis@intel.com>
+ * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
+ *
+ * 02/01/04 J. Hall Updated Error Record Structures to conform to July 2001
+ * revision of the SAL spec.
+ * 01/01/03 fvlewis Updated Error Record Structures to conform with Nov. 2000
+ * revision of the SAL spec.
+ * 99/09/29 davidm Updated for SAL 2.6.
+ * 00/03/29 cfleck Updated SAL Error Logging info for processor (SAL 2.6)
+ * (plus examples of platform error info structures from smariset @ Intel)
+ */
+
+#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT 0
+#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT 1
+#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT 2
+#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT 3
+
+#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK (1<<IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT (1<<IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT (1<<IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT (1<<IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/bcd.h>
+#include <linux/spinlock.h>
+#include <linux/efi.h>
+
+#include <asm/pal.h>
+#include <asm/fpu.h>
+
+extern unsigned long sal_systab_phys;
+extern spinlock_t sal_lock;
+
+/* SAL spec _requires_ eight args for each call. */
+#define __IA64_FW_CALL(entry,result,a0,a1,a2,a3,a4,a5,a6,a7) \
+ result = (*entry)(a0,a1,a2,a3,a4,a5,a6,a7)
+
+# define IA64_FW_CALL(entry,result,args...) do { \
+ unsigned long __ia64_sc_flags; \
+ struct ia64_fpreg __ia64_sc_fr[6]; \
+ ia64_save_scratch_fpregs(__ia64_sc_fr); \
+ spin_lock_irqsave(&sal_lock, __ia64_sc_flags); \
+ __IA64_FW_CALL(entry, result, args); \
+ spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags); \
+ ia64_load_scratch_fpregs(__ia64_sc_fr); \
+} while (0)
+
+# define SAL_CALL(result,args...) \
+ IA64_FW_CALL(ia64_sal, result, args);
+
+# define SAL_CALL_NOLOCK(result,args...) do { \
+ unsigned long __ia64_scn_flags; \
+ struct ia64_fpreg __ia64_scn_fr[6]; \
+ ia64_save_scratch_fpregs(__ia64_scn_fr); \
+ local_irq_save(__ia64_scn_flags); \
+ __IA64_FW_CALL(ia64_sal, result, args); \
+ local_irq_restore(__ia64_scn_flags); \
+ ia64_load_scratch_fpregs(__ia64_scn_fr); \
+} while (0)
+
+# define SAL_CALL_REENTRANT(result,args...) do { \
+ struct ia64_fpreg __ia64_scs_fr[6]; \
+ ia64_save_scratch_fpregs(__ia64_scs_fr); \
+ preempt_disable(); \
+ __IA64_FW_CALL(ia64_sal, result, args); \
+ preempt_enable(); \
+ ia64_load_scratch_fpregs(__ia64_scs_fr); \
+} while (0)
+
+#define SAL_SET_VECTORS 0x01000000
+#define SAL_GET_STATE_INFO 0x01000001
+#define SAL_GET_STATE_INFO_SIZE 0x01000002
+#define SAL_CLEAR_STATE_INFO 0x01000003
+#define SAL_MC_RENDEZ 0x01000004
+#define SAL_MC_SET_PARAMS 0x01000005
+#define SAL_REGISTER_PHYSICAL_ADDR 0x01000006
+
+#define SAL_CACHE_FLUSH 0x01000008
+#define SAL_CACHE_INIT 0x01000009
+#define SAL_PCI_CONFIG_READ 0x01000010
+#define SAL_PCI_CONFIG_WRITE 0x01000011
+#define SAL_FREQ_BASE 0x01000012
+#define SAL_PHYSICAL_ID_INFO 0x01000013
+
+#define SAL_UPDATE_PAL 0x01000020
+
+struct ia64_sal_retval {
+ /*
+ * A zero status value indicates call completed without error.
+ * A negative status value indicates reason of call failure.
+ * A positive status value indicates success but an
+ * informational value should be printed (e.g., "reboot for
+ * change to take effect").
+ */
+ long status;
+ unsigned long v0;
+ unsigned long v1;
+ unsigned long v2;
+};
+
+typedef struct ia64_sal_retval (*ia64_sal_handler) (u64, ...);
+
+enum {
+ SAL_FREQ_BASE_PLATFORM = 0,
+ SAL_FREQ_BASE_INTERVAL_TIMER = 1,
+ SAL_FREQ_BASE_REALTIME_CLOCK = 2
+};
+
+/*
+ * The SAL system table is followed by a variable number of variable
+ * length descriptors. The structure of these descriptors follows
+ * below.
+ * The defininition follows SAL specs from July 2000
+ */
+struct ia64_sal_systab {
+ u8 signature[4]; /* should be "SST_" */
+ u32 size; /* size of this table in bytes */
+ u8 sal_rev_minor;
+ u8 sal_rev_major;
+ u16 entry_count; /* # of entries in variable portion */
+ u8 checksum;
+ u8 reserved1[7];
+ u8 sal_a_rev_minor;
+ u8 sal_a_rev_major;
+ u8 sal_b_rev_minor;
+ u8 sal_b_rev_major;
+ /* oem_id & product_id: terminating NUL is missing if string is exactly 32 bytes long. */
+ u8 oem_id[32];
+ u8 product_id[32]; /* ASCII product id */
+ u8 reserved2[8];
+};
+
+enum sal_systab_entry_type {
+ SAL_DESC_ENTRY_POINT = 0,
+ SAL_DESC_MEMORY = 1,
+ SAL_DESC_PLATFORM_FEATURE = 2,
+ SAL_DESC_TR = 3,
+ SAL_DESC_PTC = 4,
+ SAL_DESC_AP_WAKEUP = 5
+};
+
+/*
+ * Entry type: Size:
+ * 0 48
+ * 1 32
+ * 2 16
+ * 3 32
+ * 4 16
+ * 5 16
+ */
+#define SAL_DESC_SIZE(type) "\060\040\020\040\020\020"[(unsigned) type]
+
+typedef struct ia64_sal_desc_entry_point {
+ u8 type;
+ u8 reserved1[7];
+ u64 pal_proc;
+ u64 sal_proc;
+ u64 gp;
+ u8 reserved2[16];
+}ia64_sal_desc_entry_point_t;
+
+typedef struct ia64_sal_desc_memory {
+ u8 type;
+ u8 used_by_sal; /* needs to be mapped for SAL? */
+ u8 mem_attr; /* current memory attribute setting */
+ u8 access_rights; /* access rights set up by SAL */
+ u8 mem_attr_mask; /* mask of supported memory attributes */
+ u8 reserved1;
+ u8 mem_type; /* memory type */
+ u8 mem_usage; /* memory usage */
+ u64 addr; /* physical address of memory */
+ u32 length; /* length (multiple of 4KB pages) */
+ u32 reserved2;
+ u8 oem_reserved[8];
+} ia64_sal_desc_memory_t;
+
+typedef struct ia64_sal_desc_platform_feature {
+ u8 type;
+ u8 feature_mask;
+ u8 reserved1[14];
+} ia64_sal_desc_platform_feature_t;
+
+typedef struct ia64_sal_desc_tr {
+ u8 type;
+ u8 tr_type; /* 0 == instruction, 1 == data */
+ u8 regnum; /* translation register number */
+ u8 reserved1[5];
+ u64 addr; /* virtual address of area covered */
+ u64 page_size; /* encoded page size */
+ u8 reserved2[8];
+} ia64_sal_desc_tr_t;
+
+typedef struct ia64_sal_desc_ptc {
+ u8 type;
+ u8 reserved1[3];
+ u32 num_domains; /* # of coherence domains */
+ u64 domain_info; /* physical address of domain info table */
+} ia64_sal_desc_ptc_t;
+
+typedef struct ia64_sal_ptc_domain_info {
+ u64 proc_count; /* number of processors in domain */
+ u64 proc_list; /* physical address of LID array */
+} ia64_sal_ptc_domain_info_t;
+
+typedef struct ia64_sal_ptc_domain_proc_entry {
+ u64 id : 8; /* id of processor */
+ u64 eid : 8; /* eid of processor */
+} ia64_sal_ptc_domain_proc_entry_t;
+
+
+#define IA64_SAL_AP_EXTERNAL_INT 0
+
+typedef struct ia64_sal_desc_ap_wakeup {
+ u8 type;
+ u8 mechanism; /* 0 == external interrupt */
+ u8 reserved1[6];
+ u64 vector; /* interrupt vector in range 0x10-0xff */
+} ia64_sal_desc_ap_wakeup_t ;
+
+extern ia64_sal_handler ia64_sal;
+extern struct ia64_sal_desc_ptc *ia64_ptc_domain_info;
+
+extern unsigned short sal_revision; /* supported SAL spec revision */
+extern unsigned short sal_version; /* SAL version; OEM dependent */
+#define SAL_VERSION_CODE(major, minor) ((bin2bcd(major) << 8) | bin2bcd(minor))
+
+extern const char *ia64_sal_strerror (long status);
+extern void ia64_sal_init (struct ia64_sal_systab *sal_systab);
+
+/* SAL information type encodings */
+enum {
+ SAL_INFO_TYPE_MCA = 0, /* Machine check abort information */
+ SAL_INFO_TYPE_INIT = 1, /* Init information */
+ SAL_INFO_TYPE_CMC = 2, /* Corrected machine check information */
+ SAL_INFO_TYPE_CPE = 3 /* Corrected platform error information */
+};
+
+/* Encodings for machine check parameter types */
+enum {
+ SAL_MC_PARAM_RENDEZ_INT = 1, /* Rendezvous interrupt */
+ SAL_MC_PARAM_RENDEZ_WAKEUP = 2, /* Wakeup */
+ SAL_MC_PARAM_CPE_INT = 3 /* Corrected Platform Error Int */
+};
+
+/* Encodings for rendezvous mechanisms */
+enum {
+ SAL_MC_PARAM_MECHANISM_INT = 1, /* Use interrupt */
+ SAL_MC_PARAM_MECHANISM_MEM = 2 /* Use memory synchronization variable*/
+};
+
+/* Encodings for vectors which can be registered by the OS with SAL */
+enum {
+ SAL_VECTOR_OS_MCA = 0,
+ SAL_VECTOR_OS_INIT = 1,
+ SAL_VECTOR_OS_BOOT_RENDEZ = 2
+};
+
+/* Encodings for mca_opt parameter sent to SAL_MC_SET_PARAMS */
+#define SAL_MC_PARAM_RZ_ALWAYS 0x1
+#define SAL_MC_PARAM_BINIT_ESCALATE 0x10
+
+/*
+ * Definition of the SAL Error Log from the SAL spec
+ */
+
+/* SAL Error Record Section GUID Definitions */
+#define SAL_PROC_DEV_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf1, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_MEM_DEV_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf2, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_SEL_DEV_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf3, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_PCI_BUS_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf4, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf5, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_PCI_COMP_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf6, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_SPECIFIC_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf7, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_HOST_CTLR_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_BUS_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID \
+ EFI_GUID(0x6cb0a200, 0x893a, 0x11da, 0x96, 0xd2, 0x0, 0x10, 0x83, 0xff, \
+ 0xca, 0x4d)
+
+#define MAX_CACHE_ERRORS 6
+#define MAX_TLB_ERRORS 6
+#define MAX_BUS_ERRORS 1
+
+/* Definition of version according to SAL spec for logging purposes */
+typedef struct sal_log_revision {
+ u8 minor; /* BCD (0..99) */
+ u8 major; /* BCD (0..99) */
+} sal_log_revision_t;
+
+/* Definition of timestamp according to SAL spec for logging purposes */
+typedef struct sal_log_timestamp {
+ u8 slh_second; /* Second (0..59) */
+ u8 slh_minute; /* Minute (0..59) */
+ u8 slh_hour; /* Hour (0..23) */
+ u8 slh_reserved;
+ u8 slh_day; /* Day (1..31) */
+ u8 slh_month; /* Month (1..12) */
+ u8 slh_year; /* Year (00..99) */
+ u8 slh_century; /* Century (19, 20, 21, ...) */
+} sal_log_timestamp_t;
+
+/* Definition of log record header structures */
+typedef struct sal_log_record_header {
+ u64 id; /* Unique monotonically increasing ID */
+ sal_log_revision_t revision; /* Major and Minor revision of header */
+ u8 severity; /* Error Severity */
+ u8 validation_bits; /* 0: platform_guid, 1: !timestamp */
+ u32 len; /* Length of this error log in bytes */
+ sal_log_timestamp_t timestamp; /* Timestamp */
+ efi_guid_t platform_guid; /* Unique OEM Platform ID */
+} sal_log_record_header_t;
+
+#define sal_log_severity_recoverable 0
+#define sal_log_severity_fatal 1
+#define sal_log_severity_corrected 2
+
+/*
+ * Error Recovery Info (ERI) bit decode. From SAL Spec section B.2.2 Table B-3
+ * Error Section Error_Recovery_Info Field Definition.
+ */
+#define ERI_NOT_VALID 0x0 /* Error Recovery Field is not valid */
+#define ERI_NOT_ACCESSIBLE 0x30 /* Resource not accessible */
+#define ERI_CONTAINMENT_WARN 0x22 /* Corrupt data propagated */
+#define ERI_UNCORRECTED_ERROR 0x20 /* Uncorrected error */
+#define ERI_COMPONENT_RESET 0x24 /* Component must be reset */
+#define ERI_CORR_ERROR_LOG 0x21 /* Corrected error, needs logging */
+#define ERI_CORR_ERROR_THRESH 0x29 /* Corrected error threshold exceeded */
+
+/* Definition of log section header structures */
+typedef struct sal_log_sec_header {
+ efi_guid_t guid; /* Unique Section ID */
+ sal_log_revision_t revision; /* Major and Minor revision of Section */
+ u8 error_recovery_info; /* Platform error recovery status */
+ u8 reserved;
+ u32 len; /* Section length */
+} sal_log_section_hdr_t;
+
+typedef struct sal_log_mod_error_info {
+ struct {
+ u64 check_info : 1,
+ requestor_identifier : 1,
+ responder_identifier : 1,
+ target_identifier : 1,
+ precise_ip : 1,
+ reserved : 59;
+ } valid;
+ u64 check_info;
+ u64 requestor_identifier;
+ u64 responder_identifier;
+ u64 target_identifier;
+ u64 precise_ip;
+} sal_log_mod_error_info_t;
+
+typedef struct sal_processor_static_info {
+ struct {
+ u64 minstate : 1,
+ br : 1,
+ cr : 1,
+ ar : 1,
+ rr : 1,
+ fr : 1,
+ reserved : 58;
+ } valid;
+ pal_min_state_area_t min_state_area;
+ u64 br[8];
+ u64 cr[128];
+ u64 ar[128];
+ u64 rr[8];
+ struct ia64_fpreg __attribute__ ((packed)) fr[128];
+} sal_processor_static_info_t;
+
+struct sal_cpuid_info {
+ u64 regs[5];
+ u64 reserved;
+};
+
+typedef struct sal_log_processor_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 proc_error_map : 1,
+ proc_state_param : 1,
+ proc_cr_lid : 1,
+ psi_static_struct : 1,
+ num_cache_check : 4,
+ num_tlb_check : 4,
+ num_bus_check : 4,
+ num_reg_file_check : 4,
+ num_ms_check : 4,
+ cpuid_info : 1,
+ reserved1 : 39;
+ } valid;
+ u64 proc_error_map;
+ u64 proc_state_parameter;
+ u64 proc_cr_lid;
+ /*
+ * The rest of this structure consists of variable-length arrays, which can't be
+ * expressed in C.
+ */
+ sal_log_mod_error_info_t info[0];
+ /*
+ * This is what the rest looked like if C supported variable-length arrays:
+ *
+ * sal_log_mod_error_info_t cache_check_info[.valid.num_cache_check];
+ * sal_log_mod_error_info_t tlb_check_info[.valid.num_tlb_check];
+ * sal_log_mod_error_info_t bus_check_info[.valid.num_bus_check];
+ * sal_log_mod_error_info_t reg_file_check_info[.valid.num_reg_file_check];
+ * sal_log_mod_error_info_t ms_check_info[.valid.num_ms_check];
+ * struct sal_cpuid_info cpuid_info;
+ * sal_processor_static_info_t processor_static_info;
+ */
+} sal_log_processor_info_t;
+
+/* Given a sal_log_processor_info_t pointer, return a pointer to the processor_static_info: */
+#define SAL_LPI_PSI_INFO(l) \
+({ sal_log_processor_info_t *_l = (l); \
+ ((sal_processor_static_info_t *) \
+ ((char *) _l->info + ((_l->valid.num_cache_check + _l->valid.num_tlb_check \
+ + _l->valid.num_bus_check + _l->valid.num_reg_file_check \
+ + _l->valid.num_ms_check) * sizeof(sal_log_mod_error_info_t) \
+ + sizeof(struct sal_cpuid_info)))); \
+})
+
+/* platform error log structures */
+
+typedef struct sal_log_mem_dev_err_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 error_status : 1,
+ physical_addr : 1,
+ addr_mask : 1,
+ node : 1,
+ card : 1,
+ module : 1,
+ bank : 1,
+ device : 1,
+ row : 1,
+ column : 1,
+ bit_position : 1,
+ requestor_id : 1,
+ responder_id : 1,
+ target_id : 1,
+ bus_spec_data : 1,
+ oem_id : 1,
+ oem_data : 1,
+ reserved : 47;
+ } valid;
+ u64 error_status;
+ u64 physical_addr;
+ u64 addr_mask;
+ u16 node;
+ u16 card;
+ u16 module;
+ u16 bank;
+ u16 device;
+ u16 row;
+ u16 column;
+ u16 bit_position;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u64 bus_spec_data;
+ u8 oem_id[16];
+ u8 oem_data[1]; /* Variable length data */
+} sal_log_mem_dev_err_info_t;
+
+typedef struct sal_log_sel_dev_err_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 record_id : 1,
+ record_type : 1,
+ generator_id : 1,
+ evm_rev : 1,
+ sensor_type : 1,
+ sensor_num : 1,
+ event_dir : 1,
+ event_data1 : 1,
+ event_data2 : 1,
+ event_data3 : 1,
+ reserved : 54;
+ } valid;
+ u16 record_id;
+ u8 record_type;
+ u8 timestamp[4];
+ u16 generator_id;
+ u8 evm_rev;
+ u8 sensor_type;
+ u8 sensor_num;
+ u8 event_dir;
+ u8 event_data1;
+ u8 event_data2;
+ u8 event_data3;
+} sal_log_sel_dev_err_info_t;
+
+typedef struct sal_log_pci_bus_err_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 err_status : 1,
+ err_type : 1,
+ bus_id : 1,
+ bus_address : 1,
+ bus_data : 1,
+ bus_cmd : 1,
+ requestor_id : 1,
+ responder_id : 1,
+ target_id : 1,
+ oem_data : 1,
+ reserved : 54;
+ } valid;
+ u64 err_status;
+ u16 err_type;
+ u16 bus_id;
+ u32 reserved;
+ u64 bus_address;
+ u64 bus_data;
+ u64 bus_cmd;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u8 oem_data[1]; /* Variable length data */
+} sal_log_pci_bus_err_info_t;
+
+typedef struct sal_log_smbios_dev_err_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 event_type : 1,
+ length : 1,
+ time_stamp : 1,
+ data : 1,
+ reserved1 : 60;
+ } valid;
+ u8 event_type;
+ u8 length;
+ u8 time_stamp[6];
+ u8 data[1]; /* data of variable length, length == slsmb_length */
+} sal_log_smbios_dev_err_info_t;
+
+typedef struct sal_log_pci_comp_err_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 err_status : 1,
+ comp_info : 1,
+ num_mem_regs : 1,
+ num_io_regs : 1,
+ reg_data_pairs : 1,
+ oem_data : 1,
+ reserved : 58;
+ } valid;
+ u64 err_status;
+ struct {
+ u16 vendor_id;
+ u16 device_id;
+ u8 class_code[3];
+ u8 func_num;
+ u8 dev_num;
+ u8 bus_num;
+ u8 seg_num;
+ u8 reserved[5];
+ } comp_info;
+ u32 num_mem_regs;
+ u32 num_io_regs;
+ u64 reg_data_pairs[1];
+ /*
+ * array of address/data register pairs is num_mem_regs + num_io_regs elements
+ * long. Each array element consists of a u64 address followed by a u64 data
+ * value. The oem_data array immediately follows the reg_data_pairs array
+ */
+ u8 oem_data[1]; /* Variable length data */
+} sal_log_pci_comp_err_info_t;
+
+typedef struct sal_log_plat_specific_err_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 err_status : 1,
+ guid : 1,
+ oem_data : 1,
+ reserved : 61;
+ } valid;
+ u64 err_status;
+ efi_guid_t guid;
+ u8 oem_data[1]; /* platform specific variable length data */
+} sal_log_plat_specific_err_info_t;
+
+typedef struct sal_log_host_ctlr_err_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 err_status : 1,
+ requestor_id : 1,
+ responder_id : 1,
+ target_id : 1,
+ bus_spec_data : 1,
+ oem_data : 1,
+ reserved : 58;
+ } valid;
+ u64 err_status;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u64 bus_spec_data;
+ u8 oem_data[1]; /* Variable length OEM data */
+} sal_log_host_ctlr_err_info_t;
+
+typedef struct sal_log_plat_bus_err_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 err_status : 1,
+ requestor_id : 1,
+ responder_id : 1,
+ target_id : 1,
+ bus_spec_data : 1,
+ oem_data : 1,
+ reserved : 58;
+ } valid;
+ u64 err_status;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u64 bus_spec_data;
+ u8 oem_data[1]; /* Variable length OEM data */
+} sal_log_plat_bus_err_info_t;
+
+/* Overall platform error section structure */
+typedef union sal_log_platform_err_info {
+ sal_log_mem_dev_err_info_t mem_dev_err;
+ sal_log_sel_dev_err_info_t sel_dev_err;
+ sal_log_pci_bus_err_info_t pci_bus_err;
+ sal_log_smbios_dev_err_info_t smbios_dev_err;
+ sal_log_pci_comp_err_info_t pci_comp_err;
+ sal_log_plat_specific_err_info_t plat_specific_err;
+ sal_log_host_ctlr_err_info_t host_ctlr_err;
+ sal_log_plat_bus_err_info_t plat_bus_err;
+} sal_log_platform_err_info_t;
+
+/* SAL log over-all, multi-section error record structure (processor+platform) */
+typedef struct err_rec {
+ sal_log_record_header_t sal_elog_header;
+ sal_log_processor_info_t proc_err;
+ sal_log_platform_err_info_t plat_err;
+ u8 oem_data_pad[1024];
+} ia64_err_rec_t;
+
+/*
+ * Now define a couple of inline functions for improved type checking
+ * and convenience.
+ */
+
+extern s64 ia64_sal_cache_flush (u64 cache_type);
+extern void __init check_sal_cache_flush (void);
+
+/* Initialize all the processor and platform level instruction and data caches */
+static inline s64
+ia64_sal_cache_init (void)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_CACHE_INIT, 0, 0, 0, 0, 0, 0, 0);
+ return isrv.status;
+}
+
+/*
+ * Clear the processor and platform information logged by SAL with respect to the machine
+ * state at the time of MCA's, INITs, CMCs, or CPEs.
+ */
+static inline s64
+ia64_sal_clear_state_info (u64 sal_info_type)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL_REENTRANT(isrv, SAL_CLEAR_STATE_INFO, sal_info_type, 0,
+ 0, 0, 0, 0, 0);
+ return isrv.status;
+}
+
+
+/* Get the processor and platform information logged by SAL with respect to the machine
+ * state at the time of the MCAs, INITs, CMCs, or CPEs.
+ */
+static inline u64
+ia64_sal_get_state_info (u64 sal_info_type, u64 *sal_info)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
+ sal_info, 0, 0, 0, 0);
+ if (isrv.status)
+ return 0;
+
+ return isrv.v0;
+}
+
+/*
+ * Get the maximum size of the information logged by SAL with respect to the machine state
+ * at the time of MCAs, INITs, CMCs, or CPEs.
+ */
+static inline u64
+ia64_sal_get_state_info_size (u64 sal_info_type)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO_SIZE, sal_info_type, 0,
+ 0, 0, 0, 0, 0);
+ if (isrv.status)
+ return 0;
+ return isrv.v0;
+}
+
+/*
+ * Causes the processor to go into a spin loop within SAL where SAL awaits a wakeup from
+ * the monarch processor. Must not lock, because it will not return on any cpu until the
+ * monarch processor sends a wake up.
+ */
+static inline s64
+ia64_sal_mc_rendez (void)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL_NOLOCK(isrv, SAL_MC_RENDEZ, 0, 0, 0, 0, 0, 0, 0);
+ return isrv.status;
+}
+
+/*
+ * Allow the OS to specify the interrupt number to be used by SAL to interrupt OS during
+ * the machine check rendezvous sequence as well as the mechanism to wake up the
+ * non-monarch processor at the end of machine check processing.
+ * Returns the complete ia64_sal_retval because some calls return more than just a status
+ * value.
+ */
+static inline struct ia64_sal_retval
+ia64_sal_mc_set_params (u64 param_type, u64 i_or_m, u64 i_or_m_val, u64 timeout, u64 rz_always)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_MC_SET_PARAMS, param_type, i_or_m, i_or_m_val,
+ timeout, rz_always, 0, 0);
+ return isrv;
+}
+
+/* Read from PCI configuration space */
+static inline s64
+ia64_sal_pci_config_read (u64 pci_config_addr, int type, u64 size, u64 *value)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_PCI_CONFIG_READ, pci_config_addr, size, type, 0, 0, 0, 0);
+ if (value)
+ *value = isrv.v0;
+ return isrv.status;
+}
+
+/* Write to PCI configuration space */
+static inline s64
+ia64_sal_pci_config_write (u64 pci_config_addr, int type, u64 size, u64 value)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_PCI_CONFIG_WRITE, pci_config_addr, size, value,
+ type, 0, 0, 0);
+ return isrv.status;
+}
+
+/*
+ * Register physical addresses of locations needed by SAL when SAL procedures are invoked
+ * in virtual mode.
+ */
+static inline s64
+ia64_sal_register_physical_addr (u64 phys_entry, u64 phys_addr)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_REGISTER_PHYSICAL_ADDR, phys_entry, phys_addr,
+ 0, 0, 0, 0, 0);
+ return isrv.status;
+}
+
+/*
+ * Register software dependent code locations within SAL. These locations are handlers or
+ * entry points where SAL will pass control for the specified event. These event handlers
+ * are for the bott rendezvous, MCAs and INIT scenarios.
+ */
+static inline s64
+ia64_sal_set_vectors (u64 vector_type,
+ u64 handler_addr1, u64 gp1, u64 handler_len1,
+ u64 handler_addr2, u64 gp2, u64 handler_len2)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_SET_VECTORS, vector_type,
+ handler_addr1, gp1, handler_len1,
+ handler_addr2, gp2, handler_len2);
+
+ return isrv.status;
+}
+
+/* Update the contents of PAL block in the non-volatile storage device */
+static inline s64
+ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size,
+ u64 *error_code, u64 *scratch_buf_size_needed)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_UPDATE_PAL, param_buf, scratch_buf, scratch_buf_size,
+ 0, 0, 0, 0);
+ if (error_code)
+ *error_code = isrv.v0;
+ if (scratch_buf_size_needed)
+ *scratch_buf_size_needed = isrv.v1;
+ return isrv.status;
+}
+
+/* Get physical processor die mapping in the platform. */
+static inline s64
+ia64_sal_physical_id_info(u16 *splid)
+{
+ struct ia64_sal_retval isrv;
+
+ if (sal_revision < SAL_VERSION_CODE(3,2))
+ return -1;
+
+ SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0);
+ if (splid)
+ *splid = isrv.v0;
+ return isrv.status;
+}
+
+extern unsigned long sal_platform_features;
+
+extern int (*salinfo_platform_oemdata)(const u8 *, u8 **, u64 *);
+
+struct sal_ret_values {
+ long r8; long r9; long r10; long r11;
+};
+
+#define IA64_SAL_OEMFUNC_MIN 0x02000000
+#define IA64_SAL_OEMFUNC_MAX 0x03ffffff
+
+extern int ia64_sal_oemcall(struct ia64_sal_retval *, u64, u64, u64, u64, u64,
+ u64, u64, u64);
+extern int ia64_sal_oemcall_nolock(struct ia64_sal_retval *, u64, u64, u64,
+ u64, u64, u64, u64, u64);
+extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64,
+ u64, u64, u64, u64, u64);
+extern long
+ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
+ unsigned long *drift_info);
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * System Abstraction Layer Specification
+ * Section 3.2.5.1: OS_BOOT_RENDEZ to SAL return State.
+ * Note: region regs are stored first in head.S _start. Hence they must
+ * stay up front.
+ */
+struct sal_to_os_boot {
+ u64 rr[8]; /* Region Registers */
+ u64 br[6]; /* br0:
+ * return addr into SAL boot rendez routine */
+ u64 gr1; /* SAL:GP */
+ u64 gr12; /* SAL:SP */
+ u64 gr13; /* SAL: Task Pointer */
+ u64 fpsr;
+ u64 pfs;
+ u64 rnat;
+ u64 unat;
+ u64 bspstore;
+ u64 dcr; /* Default Control Register */
+ u64 iva;
+ u64 pta;
+ u64 itv;
+ u64 pmv;
+ u64 cmcv;
+ u64 lrr[2];
+ u64 gr[4];
+ u64 pr; /* Predicate registers */
+ u64 lc; /* Loop Count */
+ struct ia64_fpreg fp[20];
+};
+
+/*
+ * Global array allocated for NR_CPUS at boot time
+ */
+extern struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
+
+extern void ia64_jump_to_sal(struct sal_to_os_boot *);
+#endif
+
+extern void ia64_sal_handler_init(void *entry_point, void *gpval);
+
+#define PALO_MAX_TLB_PURGES 0xFFFF
+#define PALO_SIG "PALO"
+
+struct palo_table {
+ u8 signature[4]; /* Should be "PALO" */
+ u32 length;
+ u8 minor_revision;
+ u8 major_revision;
+ u8 checksum;
+ u8 reserved1[5];
+ u16 max_tlb_purges;
+ u8 reserved2[6];
+};
+
+#define NPTCG_FROM_PAL 0
+#define NPTCG_FROM_PALO 1
+#define NPTCG_FROM_KERNEL_PARAMETER 2
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_SAL_H */
diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h
new file mode 100644
index 000000000..3a033d200
--- /dev/null
+++ b/arch/ia64/include/asm/sections.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_SECTIONS_H
+#define _ASM_IA64_SECTIONS_H
+
+/*
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/elf.h>
+#include <linux/uaccess.h>
+#include <asm-generic/sections.h>
+
+extern char __phys_per_cpu_start[];
+#ifdef CONFIG_SMP
+extern char __cpu0_per_cpu[];
+#endif
+extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
+extern char __start___rse_patchlist[], __end___rse_patchlist[];
+extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
+extern char __start___phys_stack_reg_patchlist[], __end___phys_stack_reg_patchlist[];
+extern char __start_gate_section[];
+extern char __start_gate_mckinley_e9_patchlist[], __end_gate_mckinley_e9_patchlist[];
+extern char __start_gate_vtop_patchlist[], __end_gate_vtop_patchlist[];
+extern char __start_gate_fsyscall_patchlist[], __end_gate_fsyscall_patchlist[];
+extern char __start_gate_brl_fsys_bubble_down_patchlist[], __end_gate_brl_fsys_bubble_down_patchlist[];
+extern char __start_unwind[], __end_unwind[];
+extern char __start_ivt_text[], __end_ivt_text[];
+
+#define HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR 1
+
+#undef dereference_function_descriptor
+static inline void *dereference_function_descriptor(void *ptr)
+{
+ struct fdesc *desc = ptr;
+ void *p;
+
+ if (!get_kernel_nofault(p, (void *)&desc->ip))
+ ptr = p;
+ return ptr;
+}
+
+#undef dereference_kernel_function_descriptor
+static inline void *dereference_kernel_function_descriptor(void *ptr)
+{
+ if (ptr < (void *)__start_opd || ptr >= (void *)__end_opd)
+ return ptr;
+ return dereference_function_descriptor(ptr);
+}
+
+#endif /* _ASM_IA64_SECTIONS_H */
diff --git a/arch/ia64/include/asm/serial.h b/arch/ia64/include/asm/serial.h
new file mode 100644
index 000000000..068be1158
--- /dev/null
+++ b/arch/ia64/include/asm/serial.h
@@ -0,0 +1,17 @@
+/*
+ * Derived from the i386 version.
+ */
+
+/*
+ * This assumes you have a 1.8432 MHz clock for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires the faster clock.
+ */
+#define BASE_BAUD ( 1843200 / 16 )
+
+/*
+ * All legacy serial ports should be enumerated via ACPI namespace, so
+ * we need not list them here.
+ */
diff --git a/arch/ia64/include/asm/shmparam.h b/arch/ia64/include/asm/shmparam.h
new file mode 100644
index 000000000..43bd8324a
--- /dev/null
+++ b/arch/ia64/include/asm/shmparam.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_SHMPARAM_H
+#define _ASM_IA64_SHMPARAM_H
+
+/*
+ * SHMLBA controls minimum alignment at which shared memory segments
+ * get attached. The IA-64 architecture says that there may be a
+ * performance degradation when there are virtual aliases within 1MB.
+ * To reduce the chance of this, we set SHMLBA to 1MB. --davidm 00/12/20
+ */
+#define SHMLBA (1024*1024)
+
+#endif /* _ASM_IA64_SHMPARAM_H */
diff --git a/arch/ia64/include/asm/signal.h b/arch/ia64/include/asm/signal.h
new file mode 100644
index 000000000..80f067f9b
--- /dev/null
+++ b/arch/ia64/include/asm/signal.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Modified 1998-2001, 2003
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ *
+ * Unfortunately, this file is being included by bits/signal.h in
+ * glibc-2.x. Hence the #ifdef __KERNEL__ ugliness.
+ */
+#ifndef _ASM_IA64_SIGNAL_H
+#define _ASM_IA64_SIGNAL_H
+
+#include <uapi/asm/signal.h>
+
+
+#define _NSIG 64
+#define _NSIG_BPW 64
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+# ifndef __ASSEMBLY__
+
+/* Most things should be clean enough to redefine this at will, if care
+ is taken to make libc match. */
+
+typedef unsigned long old_sigset_t;
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+# include <asm/sigcontext.h>
+
+# endif /* !__ASSEMBLY__ */
+#endif /* _ASM_IA64_SIGNAL_H */
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
new file mode 100644
index 000000000..aa92234c0
--- /dev/null
+++ b/arch/ia64/include/asm/smp.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SMP Support
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * (c) Copyright 2001-2003, 2005 Hewlett-Packard Development Company, L.P.
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ */
+#ifndef _ASM_IA64_SMP_H
+#define _ASM_IA64_SMP_H
+
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+#include <linux/cpumask.h>
+#include <linux/bitops.h>
+#include <linux/irqreturn.h>
+
+#include <asm/param.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+
+static inline unsigned int
+ia64_get_lid (void)
+{
+ union {
+ struct {
+ unsigned long reserved : 16;
+ unsigned long eid : 8;
+ unsigned long id : 8;
+ unsigned long ignored : 32;
+ } f;
+ unsigned long bits;
+ } lid;
+
+ lid.bits = ia64_getreg(_IA64_REG_CR_LID);
+ return lid.f.id << 8 | lid.f.eid;
+}
+
+#define hard_smp_processor_id() ia64_get_lid()
+
+#ifdef CONFIG_SMP
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+extern struct smp_boot_data {
+ int cpu_count;
+ int cpu_phys_id[NR_CPUS];
+} smp_boot_data __initdata;
+
+extern char no_int_routing;
+
+extern cpumask_t cpu_core_map[NR_CPUS];
+DECLARE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
+extern int smp_num_siblings;
+extern void __iomem *ipi_base_addr;
+
+extern volatile int ia64_cpu_to_sapicid[];
+#define cpu_physical_id(i) ia64_cpu_to_sapicid[i]
+
+extern unsigned long ap_wakeup_vector;
+
+/*
+ * Function to map hard smp processor id to logical id. Slow, so don't use this in
+ * performance-critical code.
+ */
+static inline int
+cpu_logical_id (int cpuid)
+{
+ int i;
+
+ for (i = 0; i < NR_CPUS; ++i)
+ if (cpu_physical_id(i) == cpuid)
+ break;
+ return i;
+}
+
+/* Upping and downing of CPUs */
+extern int __cpu_disable (void);
+extern void __cpu_die (unsigned int cpu);
+extern void cpu_die (void) __attribute__ ((noreturn));
+extern void __init smp_build_cpu_map(void);
+
+extern void __init init_smp_config (void);
+extern void smp_do_timer (struct pt_regs *regs);
+
+extern irqreturn_t handle_IPI(int irq, void *dev_id);
+extern void smp_send_reschedule (int cpu);
+extern void identify_siblings (struct cpuinfo_ia64 *);
+extern int is_multithreading_enabled(void);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+#else /* CONFIG_SMP */
+
+#define cpu_logical_id(i) 0
+#define cpu_physical_id(i) ia64_get_lid()
+
+#endif /* CONFIG_SMP */
+#endif /* _ASM_IA64_SMP_H */
diff --git a/arch/ia64/include/asm/sn/intr.h b/arch/ia64/include/asm/sn/intr.h
new file mode 100644
index 000000000..3885a77b2
--- /dev/null
+++ b/arch/ia64/include/asm/sn/intr.h
@@ -0,0 +1,15 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_INTR_H
+#define _ASM_IA64_SN_INTR_H
+
+#define SGI_XPC_ACTIVATE 0x30
+#define SGI_XPC_NOTIFY 0xe7
+
+#endif /* _ASM_IA64_SN_INTR_H */
diff --git a/arch/ia64/include/asm/sn/sn_sal.h b/arch/ia64/include/asm/sn/sn_sal.h
new file mode 100644
index 000000000..d437aa433
--- /dev/null
+++ b/arch/ia64/include/asm/sn/sn_sal.h
@@ -0,0 +1,124 @@
+#ifndef _ASM_IA64_SN_SN_SAL_H
+#define _ASM_IA64_SN_SN_SAL_H
+
+/*
+ * System Abstraction Layer definitions for IA64
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <asm/sal.h>
+
+// SGI Specific Calls
+#define SN_SAL_GET_PARTITION_ADDR 0x02000009
+#define SN_SAL_MEMPROTECT 0x0200003e
+
+#define SN_SAL_WATCHLIST_ALLOC 0x02000070
+#define SN_SAL_WATCHLIST_FREE 0x02000071
+
+/*
+ * SAL Error Codes
+ */
+#define SALRET_MORE_PASSES 1
+#define SALRET_OK 0
+#define SALRET_NOT_IMPLEMENTED (-1)
+#define SALRET_INVALID_ARG (-2)
+#define SALRET_ERROR (-3)
+
+/*
+ * Returns the physical address of the partition's reserved page through
+ * an iterative number of calls.
+ *
+ * On first call, 'cookie' and 'len' should be set to 0, and 'addr'
+ * set to the nasid of the partition whose reserved page's address is
+ * being sought.
+ * On subsequent calls, pass the values, that were passed back on the
+ * previous call.
+ *
+ * While the return status equals SALRET_MORE_PASSES, keep calling
+ * this function after first copying 'len' bytes starting at 'addr'
+ * into 'buf'. Once the return status equals SALRET_OK, 'addr' will
+ * be the physical address of the partition's reserved page. If the
+ * return status equals neither of these, an error as occurred.
+ */
+static inline s64
+sn_partition_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len)
+{
+ struct ia64_sal_retval rv;
+ ia64_sal_oemcall_reentrant(&rv, SN_SAL_GET_PARTITION_ADDR, *cookie,
+ *addr, buf, *len, 0, 0, 0);
+ *cookie = rv.v0;
+ *addr = rv.v1;
+ *len = rv.v2;
+ return rv.status;
+}
+
+/*
+ * Change memory access protections for a physical address range.
+ * nasid_array is not used on Altix, but may be in future architectures.
+ * Available memory protection access classes are defined after the function.
+ */
+static inline int
+sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_MEMPROTECT, paddr, len,
+ (u64)nasid_array, perms, 0, 0, 0);
+ return ret_stuff.status;
+}
+#define SN_MEMPROT_ACCESS_CLASS_0 0x14a080
+#define SN_MEMPROT_ACCESS_CLASS_1 0x2520c2
+#define SN_MEMPROT_ACCESS_CLASS_2 0x14a1ca
+#define SN_MEMPROT_ACCESS_CLASS_3 0x14a290
+#define SN_MEMPROT_ACCESS_CLASS_6 0x084080
+#define SN_MEMPROT_ACCESS_CLASS_7 0x021080
+
+union sn_watchlist_u {
+ u64 val;
+ struct {
+ u64 blade : 16,
+ size : 32,
+ filler : 16;
+ };
+};
+
+static inline int
+sn_mq_watchlist_alloc(int blade, void *mq, unsigned int mq_size,
+ unsigned long *intr_mmr_offset)
+{
+ struct ia64_sal_retval rv;
+ unsigned long addr;
+ union sn_watchlist_u size_blade;
+ int watchlist;
+
+ addr = (unsigned long)mq;
+ size_blade.size = mq_size;
+ size_blade.blade = blade;
+
+ /*
+ * bios returns watchlist number or negative error number.
+ */
+ ia64_sal_oemcall_nolock(&rv, SN_SAL_WATCHLIST_ALLOC, addr,
+ size_blade.val, (u64)intr_mmr_offset,
+ (u64)&watchlist, 0, 0, 0);
+ if (rv.status < 0)
+ return rv.status;
+
+ return watchlist;
+}
+
+static inline int
+sn_mq_watchlist_free(int blade, int watchlist_num)
+{
+ struct ia64_sal_retval rv;
+ ia64_sal_oemcall_nolock(&rv, SN_SAL_WATCHLIST_FREE, blade,
+ watchlist_num, 0, 0, 0, 0, 0);
+ return rv.status;
+}
+#endif /* _ASM_IA64_SN_SN_SAL_H */
diff --git a/arch/ia64/include/asm/sparsemem.h b/arch/ia64/include/asm/sparsemem.h
new file mode 100644
index 000000000..dd8c166ff
--- /dev/null
+++ b/arch/ia64/include/asm/sparsemem.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_SPARSEMEM_H
+#define _ASM_IA64_SPARSEMEM_H
+
+#ifdef CONFIG_SPARSEMEM
+/*
+ * SECTION_SIZE_BITS 2^N: how big each section will be
+ * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
+ */
+
+#define SECTION_SIZE_BITS (30)
+#define MAX_PHYSMEM_BITS (50)
+#ifdef CONFIG_FORCE_MAX_ZONEORDER
+#if ((CONFIG_FORCE_MAX_ZONEORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS)
+#undef SECTION_SIZE_BITS
+#define SECTION_SIZE_BITS (CONFIG_FORCE_MAX_ZONEORDER - 1 + PAGE_SHIFT)
+#endif
+#endif
+
+#endif /* CONFIG_SPARSEMEM */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int memory_add_physaddr_to_nid(u64 addr);
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+#endif
+
+#endif /* _ASM_IA64_SPARSEMEM_H */
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
new file mode 100644
index 000000000..5f620e663
--- /dev/null
+++ b/arch/ia64/include/asm/spinlock.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_SPINLOCK_H
+#define _ASM_IA64_SPINLOCK_H
+
+/*
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ *
+ * This file is used for SMP configurations only.
+ */
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include <linux/atomic.h>
+#include <asm/intrinsics.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+#define arch_spin_lock_init(x) ((x)->lock = 0)
+
+/*
+ * Ticket locks are conceptually two parts, one indicating the current head of
+ * the queue, and the other indicating the current tail. The lock is acquired
+ * by atomically noting the tail and incrementing it by one (thus adding
+ * ourself to the queue and noting our position), then waiting until the head
+ * becomes equal to the the initial value of the tail.
+ * The pad bits in the middle are used to prevent the next_ticket number
+ * overflowing into the now_serving number.
+ *
+ * 31 17 16 15 14 0
+ * +----------------------------------------------------+
+ * | now_serving | padding | next_ticket |
+ * +----------------------------------------------------+
+ */
+
+#define TICKET_SHIFT 17
+#define TICKET_BITS 15
+#define TICKET_MASK ((1 << TICKET_BITS) - 1)
+
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
+{
+ int *p = (int *)&lock->lock, ticket, serve;
+
+ ticket = ia64_fetchadd(1, p, acq);
+
+ if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
+ return;
+
+ ia64_invala();
+
+ for (;;) {
+ asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
+
+ if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
+ return;
+ cpu_relax();
+ }
+}
+
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
+{
+ int tmp = READ_ONCE(lock->lock);
+
+ if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
+ return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
+ return 0;
+}
+
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
+{
+ unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
+
+ /* This could be optimised with ARCH_HAS_MMIOWB */
+ mmiowb();
+ asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
+ WRITE_ONCE(*p, (tmp + 2) & ~1);
+}
+
+static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
+{
+ long tmp = READ_ONCE(lock->lock);
+
+ return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
+}
+
+static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
+{
+ long tmp = READ_ONCE(lock->lock);
+
+ return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
+}
+
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return !(((lock.lock >> TICKET_SHIFT) ^ lock.lock) & TICKET_MASK);
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ return __ticket_spin_is_locked(lock);
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+ return __ticket_spin_is_contended(lock);
+}
+#define arch_spin_is_contended arch_spin_is_contended
+
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ __ticket_spin_lock(lock);
+}
+
+static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ return __ticket_spin_trylock(lock);
+}
+
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ __ticket_spin_unlock(lock);
+}
+
+static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
+ unsigned long flags)
+{
+ arch_spin_lock(lock);
+}
+#define arch_spin_lock_flags arch_spin_lock_flags
+
+#ifdef ASM_SUPPORTED
+
+static __always_inline void
+arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+{
+ __asm__ __volatile__ (
+ "tbit.nz p6, p0 = %1,%2\n"
+ "br.few 3f\n"
+ "1:\n"
+ "fetchadd4.rel r2 = [%0], -1;;\n"
+ "(p6) ssm psr.i\n"
+ "2:\n"
+ "hint @pause\n"
+ "ld4 r2 = [%0];;\n"
+ "cmp4.lt p7,p0 = r2, r0\n"
+ "(p7) br.cond.spnt.few 2b\n"
+ "(p6) rsm psr.i\n"
+ ";;\n"
+ "3:\n"
+ "fetchadd4.acq r2 = [%0], 1;;\n"
+ "cmp4.lt p7,p0 = r2, r0\n"
+ "(p7) br.cond.spnt.few 1b\n"
+ : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
+ : "p6", "p7", "r2", "memory");
+}
+
+#define arch_read_lock_flags arch_read_lock_flags
+#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
+
+#else /* !ASM_SUPPORTED */
+
+#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
+
+#define arch_read_lock(rw) \
+do { \
+ arch_rwlock_t *__read_lock_ptr = (rw); \
+ \
+ while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
+ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
+ while (*(volatile int *)__read_lock_ptr < 0) \
+ cpu_relax(); \
+ } \
+} while (0)
+
+#endif /* !ASM_SUPPORTED */
+
+#define arch_read_unlock(rw) \
+do { \
+ arch_rwlock_t *__read_lock_ptr = (rw); \
+ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
+} while (0)
+
+#ifdef ASM_SUPPORTED
+
+static __always_inline void
+arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+{
+ __asm__ __volatile__ (
+ "tbit.nz p6, p0 = %1, %2\n"
+ "mov ar.ccv = r0\n"
+ "dep r29 = -1, r0, 31, 1\n"
+ "br.few 3f;;\n"
+ "1:\n"
+ "(p6) ssm psr.i\n"
+ "2:\n"
+ "hint @pause\n"
+ "ld4 r2 = [%0];;\n"
+ "cmp4.eq p0,p7 = r0, r2\n"
+ "(p7) br.cond.spnt.few 2b\n"
+ "(p6) rsm psr.i\n"
+ ";;\n"
+ "3:\n"
+ "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
+ "cmp4.eq p0,p7 = r0, r2\n"
+ "(p7) br.cond.spnt.few 1b;;\n"
+ : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
+ : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
+}
+
+#define arch_write_lock_flags arch_write_lock_flags
+#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
+
+#define arch_write_trylock(rw) \
+({ \
+ register long result; \
+ \
+ __asm__ __volatile__ ( \
+ "mov ar.ccv = r0\n" \
+ "dep r29 = -1, r0, 31, 1;;\n" \
+ "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
+ : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
+ (result == 0); \
+})
+
+static inline void arch_write_unlock(arch_rwlock_t *x)
+{
+ u8 *y = (u8 *)x;
+ barrier();
+ asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
+}
+
+#else /* !ASM_SUPPORTED */
+
+#define arch_write_lock(l) \
+({ \
+ __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
+ __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
+ do { \
+ while (*ia64_write_lock_ptr) \
+ ia64_barrier(); \
+ ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
+ } while (ia64_val); \
+})
+
+#define arch_write_trylock(rw) \
+({ \
+ __u64 ia64_val; \
+ __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
+ ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
+ (ia64_val == 0); \
+})
+
+static inline void arch_write_unlock(arch_rwlock_t *x)
+{
+ barrier();
+ x->write_lock = 0;
+}
+
+#endif /* !ASM_SUPPORTED */
+
+static inline int arch_read_trylock(arch_rwlock_t *x)
+{
+ union {
+ arch_rwlock_t lock;
+ __u32 word;
+ } old, new;
+ old.lock = new.lock = *x;
+ old.lock.write_lock = new.lock.write_lock = 0;
+ ++new.lock.read_counter;
+ return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
+}
+
+#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
new file mode 100644
index 000000000..6e345fefc
--- /dev/null
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_SPINLOCK_TYPES_H
+#define _ASM_IA64_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int lock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile unsigned int read_counter : 31;
+ volatile unsigned int write_lock : 1;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 }
+
+#endif
diff --git a/arch/ia64/include/asm/string.h b/arch/ia64/include/asm/string.h
new file mode 100644
index 000000000..8b84df0db
--- /dev/null
+++ b/arch/ia64/include/asm/string.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_STRING_H
+#define _ASM_IA64_STRING_H
+
+/*
+ * Here is where we want to put optimized versions of the string
+ * routines.
+ *
+ * Copyright (C) 1998-2000, 2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#define __HAVE_ARCH_STRLEN 1 /* see arch/ia64/lib/strlen.S */
+#define __HAVE_ARCH_MEMSET 1 /* see arch/ia64/lib/memset.S */
+#define __HAVE_ARCH_MEMCPY 1 /* see arch/ia64/lib/memcpy.S */
+
+extern __kernel_size_t strlen (const char *);
+extern void *memcpy (void *, const void *, __kernel_size_t);
+extern void *memset (void *, int, __kernel_size_t);
+
+#endif /* _ASM_IA64_STRING_H */
diff --git a/arch/ia64/include/asm/switch_to.h b/arch/ia64/include/asm/switch_to.h
new file mode 100644
index 000000000..a5a4e0946
--- /dev/null
+++ b/arch/ia64/include/asm/switch_to.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Low-level task switching. This is based on information published in
+ * the Processor Abstraction Layer and the System Abstraction Layer
+ * manual.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+#ifndef _ASM_IA64_SWITCH_TO_H
+#define _ASM_IA64_SWITCH_TO_H
+
+#include <linux/percpu.h>
+
+struct task_struct;
+
+/*
+ * Context switch from one thread to another. If the two threads have
+ * different address spaces, schedule() has already taken care of
+ * switching to the new address space by calling switch_mm().
+ *
+ * Disabling access to the fph partition and the debug-register
+ * context switch MUST be done before calling ia64_switch_to() since a
+ * newly created thread returns directly to
+ * ia64_ret_from_syscall_clear_r8.
+ */
+extern struct task_struct *ia64_switch_to (void *next_task);
+
+extern void ia64_save_extra (struct task_struct *task);
+extern void ia64_load_extra (struct task_struct *task);
+
+#define IA64_HAS_EXTRA_STATE(t) \
+ ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID))
+
+#define __switch_to(prev,next,last) do { \
+ if (IA64_HAS_EXTRA_STATE(prev)) \
+ ia64_save_extra(prev); \
+ if (IA64_HAS_EXTRA_STATE(next)) \
+ ia64_load_extra(next); \
+ ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
+ (last) = ia64_switch_to((next)); \
+} while (0)
+
+#ifdef CONFIG_SMP
+/*
+ * In the SMP case, we save the fph state when context-switching away from a thread that
+ * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can
+ * pick up the state from task->thread.fph, avoiding the complication of having to fetch
+ * the latest fph state from another CPU. In other words: eager save, lazy restore.
+ */
+# define switch_to(prev,next,last) do { \
+ if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \
+ ia64_psr(task_pt_regs(prev))->mfh = 0; \
+ (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
+ __ia64_save_fpu((prev)->thread.fph); \
+ } \
+ __switch_to(prev, next, last); \
+ /* "next" in old context is "current" in new context */ \
+ if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \
+ (task_cpu(current) != \
+ task_thread_info(current)->last_cpu))) { \
+ task_thread_info(current)->last_cpu = task_cpu(current); \
+ } \
+} while (0)
+#else
+# define switch_to(prev,next,last) __switch_to(prev, next, last)
+#endif
+
+#endif /* _ASM_IA64_SWITCH_TO_H */
diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h
new file mode 100644
index 000000000..0d23c0049
--- /dev/null
+++ b/arch/ia64/include/asm/syscall.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright (C) 2008 Intel Corp. Shaohua Li <shaohua.li@intel.com>
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H 1
+
+#include <uapi/linux/audit.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+
+static inline long syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if ((long)regs->cr_ifs < 0) /* Not a syscall */
+ return -1;
+
+ return regs->r15;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ /* do nothing */
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->r10 == -1 ? -regs->r8:0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->r8;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ if (error) {
+ /* error < 0, but ia64 uses > 0 return value */
+ regs->r8 = -error;
+ regs->r10 = -1;
+ } else {
+ regs->r8 = val;
+ regs->r10 = 0;
+ }
+}
+
+extern void ia64_syscall_get_set_arguments(struct task_struct *task,
+ struct pt_regs *regs, unsigned long *args, int rw);
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ ia64_syscall_get_set_arguments(task, regs, args, 0);
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ ia64_syscall_get_set_arguments(task, regs, args, 1);
+}
+
+static inline int syscall_get_arch(struct task_struct *task)
+{
+ return AUDIT_ARCH_IA64;
+}
+#endif /* _ASM_SYSCALL_H */
diff --git a/arch/ia64/include/asm/termios.h b/arch/ia64/include/asm/termios.h
new file mode 100644
index 000000000..589c02644
--- /dev/null
+++ b/arch/ia64/include/asm/termios.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Modified 1999
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ *
+ * 99/01/28 Added N_IRDA and N_SMSBLOCK
+ */
+#ifndef _ASM_IA64_TERMIOS_H
+#define _ASM_IA64_TERMIOS_H
+
+#include <uapi/asm/termios.h>
+
+
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
+ unsigned short __tmp; \
+ get_user(__tmp,&(termio)->x); \
+ *(unsigned short *) &(termios)->x = __tmp; \
+}
+
+#define user_termio_to_kernel_termios(termios, termio) \
+({ \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
+ copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
+})
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+#define kernel_termios_to_user_termio(termio, termios) \
+({ \
+ put_user((termios)->c_iflag, &(termio)->c_iflag); \
+ put_user((termios)->c_oflag, &(termio)->c_oflag); \
+ put_user((termios)->c_cflag, &(termio)->c_cflag); \
+ put_user((termios)->c_lflag, &(termio)->c_lflag); \
+ put_user((termios)->c_line, &(termio)->c_line); \
+ copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
+})
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
+#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+#endif /* _ASM_IA64_TERMIOS_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
new file mode 100644
index 000000000..51d20cb37
--- /dev/null
+++ b/arch/ia64/include/asm/thread_info.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#ifndef _ASM_IA64_THREAD_INFO_H
+#define _ASM_IA64_THREAD_INFO_H
+
+#ifndef ASM_OFFSETS_C
+#include <asm/asm-offsets.h>
+#endif
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+
+#define THREAD_SIZE KERNEL_STACK_SIZE
+
+#ifndef __ASSEMBLY__
+
+/*
+ * On IA-64, we want to keep the task structure and kernel stack together, so they can be
+ * mapped by a single TLB entry and so they can be addressed by the "current" pointer
+ * without having to do pointer masking.
+ */
+struct thread_info {
+ struct task_struct *task; /* XXX not really needed, except for dup_task_struct() */
+ __u32 flags; /* thread_info flags (see TIF_*) */
+ __u32 cpu; /* current CPU */
+ __u32 last_cpu; /* Last CPU thread ran on */
+ __u32 status; /* Thread synchronous flags */
+ mm_segment_t addr_limit; /* user-level address space limit */
+ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ __u64 utime;
+ __u64 stime;
+ __u64 gtime;
+ __u64 hardirq_time;
+ __u64 softirq_time;
+ __u64 idle_time;
+ __u64 ac_stamp;
+ __u64 ac_leave;
+ __u64 ac_stime;
+ __u64 ac_utime;
+#endif
+};
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .flags = 0, \
+ .cpu = 0, \
+ .addr_limit = KERNEL_DS, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+}
+
+#ifndef ASM_OFFSETS_C
+/* how to get the thread information struct from C */
+#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
+#define alloc_thread_stack_node(tsk, node) \
+ ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
+#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+#else
+#define current_thread_info() ((struct thread_info *) 0)
+#define alloc_thread_stack_node(tsk, node) ((unsigned long *) 0)
+#define task_thread_info(tsk) ((struct thread_info *) 0)
+#endif
+#define free_thread_stack(tsk) /* nothing */
+#define task_stack_page(tsk) ((void *)(tsk))
+
+#define __HAVE_THREAD_FUNCTIONS
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+#define setup_thread_stack(p, org) \
+ *task_thread_info(p) = *task_thread_info(org); \
+ task_thread_info(p)->ac_stime = 0; \
+ task_thread_info(p)->ac_utime = 0; \
+ task_thread_info(p)->task = (p);
+#else
+#define setup_thread_stack(p, org) \
+ *task_thread_info(p) = *task_thread_info(org); \
+ task_thread_info(p)->task = (p);
+#endif
+#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
+
+#define alloc_task_struct_node(node) \
+({ \
+ struct page *page = alloc_pages_node(node, GFP_KERNEL | __GFP_COMP, \
+ KERNEL_STACK_SIZE_ORDER); \
+ struct task_struct *ret = page ? page_address(page) : NULL; \
+ \
+ ret; \
+})
+#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
+
+#endif /* !__ASSEMBLY */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to access
+ * - pending work-to-be-done flags are in least-significant 16 bits, other flags
+ * in top 16 bits
+ */
+#define TIF_SIGPENDING 0 /* signal pending */
+#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
+#define TIF_SYSCALL_TRACE 2 /* syscall trace active */
+#define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */
+#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
+#define TIF_NOTIFY_SIGNAL 5 /* signal notification exist */
+#define TIF_NOTIFY_RESUME 6 /* resumption notification requested */
+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
+#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
+#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
+#define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */
+#define TIF_POLLING_NRFLAG 22 /* idle is polling for TIF_NEED_RESCHED */
+
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
+#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
+#define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE)
+#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+
+/* "work to do on user-return" bits */
+#define TIF_ALLWORK_MASK (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SYSCALL_AUDIT|\
+ _TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_NOTIFY_SIGNAL)
+/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
+#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
+
+#endif /* _ASM_IA64_THREAD_INFO_H */
diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h
new file mode 100644
index 000000000..7ccc077a6
--- /dev/null
+++ b/arch/ia64/include/asm/timex.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_TIMEX_H
+#define _ASM_IA64_TIMEX_H
+
+/*
+ * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+/*
+ * 2001/01/18 davidm Removed CLOCK_TICK_RATE. It makes no sense on IA-64.
+ * Also removed cacheflush_time as it's entirely unused.
+ */
+
+#include <asm/intrinsics.h>
+#include <asm/processor.h>
+
+typedef unsigned long cycles_t;
+
+extern void (*ia64_udelay)(unsigned long usecs);
+
+/*
+ * For performance reasons, we don't want to define CLOCK_TICK_TRATE as
+ * local_cpu_data->itc_rate. Fortunately, we don't have to, either: according to George
+ * Anzinger, 1/CLOCK_TICK_RATE is taken as the resolution of the timer clock. The time
+ * calculation assumes that you will use enough of these so that your tick size <= 1/HZ.
+ * If the calculation shows that your CLOCK_TICK_RATE can not supply exactly 1/HZ ticks,
+ * the actual value is calculated and used to update the wall clock each jiffie. Setting
+ * the CLOCK_TICK_RATE to x*HZ insures that the calculation will find no errors. Hence we
+ * pick a multiple of HZ which gives us a (totally virtual) CLOCK_TICK_RATE of about
+ * 100MHz.
+ */
+#define CLOCK_TICK_RATE (HZ * 100000UL)
+
+static inline cycles_t
+get_cycles (void)
+{
+ cycles_t ret;
+
+ ret = ia64_getreg(_IA64_REG_AR_ITC);
+ return ret;
+}
+#define get_cycles get_cycles
+
+extern void ia64_cpu_local_tick (void);
+extern unsigned long long ia64_native_sched_clock (void);
+
+#endif /* _ASM_IA64_TIMEX_H */
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
new file mode 100644
index 000000000..8d9da6f08
--- /dev/null
+++ b/arch/ia64/include/asm/tlb.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_TLB_H
+#define _ASM_IA64_TLB_H
+/*
+ * Based on <asm-generic/tlb.h>.
+ *
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+/*
+ * Removing a translation from a page table (including TLB-shootdown) is a four-step
+ * procedure:
+ *
+ * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
+ * (this is a no-op on ia64).
+ * (2) Clear the relevant portions of the page-table
+ * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
+ * (4) Release the pages that were freed up in step (2).
+ *
+ * Note that the ordering of these steps is crucial to avoid races on MP machines.
+ *
+ * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When
+ * unmapping a portion of the virtual address space, these hooks are called according to
+ * the following template:
+ *
+ * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
+ * {
+ * for each vma that needs a shootdown do {
+ * tlb_start_vma(tlb, vma);
+ * for each page-table-entry PTE that needs to be removed do {
+ * tlb_remove_tlb_entry(tlb, pte, address);
+ * if (pte refers to a normal page) {
+ * tlb_remove_page(tlb, page);
+ * }
+ * }
+ * tlb_end_vma(tlb, vma);
+ * }
+ * }
+ * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
+ */
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+
+#include <asm/processor.h>
+#include <asm/tlbflush.h>
+
+#include <asm-generic/tlb.h>
+
+#endif /* _ASM_IA64_TLB_H */
diff --git a/arch/ia64/include/asm/tlbflush.h b/arch/ia64/include/asm/tlbflush.h
new file mode 100644
index 000000000..ceac10c4d
--- /dev/null
+++ b/arch/ia64/include/asm/tlbflush.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_TLBFLUSH_H
+#define _ASM_IA64_TLBFLUSH_H
+
+/*
+ * Copyright (C) 2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#include <linux/mm.h>
+
+#include <asm/intrinsics.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+
+struct ia64_tr_entry {
+ u64 ifa;
+ u64 itir;
+ u64 pte;
+ u64 rr;
+}; /*Record for tr entry!*/
+
+extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
+extern void ia64_ptr_entry(u64 target_mask, int slot);
+extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
+
+/*
+ region register macros
+*/
+#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
+#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
+#define RR_VE_MASK 0x0000000000000001L
+#define RR_VE_SHIFT 0
+#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
+#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
+#define RR_PS_MASK 0x00000000000000fcL
+#define RR_PS_SHIFT 2
+#define RR_RID_MASK 0x00000000ffffff00L
+#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
+
+/*
+ * Now for some TLB flushing routines. This is the kind of stuff that
+ * can be very expensive, so try to avoid them whenever possible.
+ */
+extern void setup_ptcg_sem(int max_purges, int from_palo);
+
+/*
+ * Flush everything (kernel mapping may also have changed due to
+ * vmalloc/vfree).
+ */
+extern void local_flush_tlb_all (void);
+
+#ifdef CONFIG_SMP
+ extern void smp_flush_tlb_all (void);
+ extern void smp_flush_tlb_mm (struct mm_struct *mm);
+ extern void smp_flush_tlb_cpumask (cpumask_t xcpumask);
+# define flush_tlb_all() smp_flush_tlb_all()
+#else
+# define flush_tlb_all() local_flush_tlb_all()
+# define smp_flush_tlb_cpumask(m) local_flush_tlb_all()
+#endif
+
+static inline void
+local_finish_flush_tlb_mm (struct mm_struct *mm)
+{
+ if (mm == current->active_mm)
+ activate_context(mm);
+}
+
+/*
+ * Flush a specified user mapping. This is called, e.g., as a result of fork() and
+ * exit(). fork() ends up here because the copy-on-write mechanism needs to write-protect
+ * the PTEs of the parent task.
+ */
+static inline void
+flush_tlb_mm (struct mm_struct *mm)
+{
+ if (!mm)
+ return;
+
+ set_bit(mm->context, ia64_ctx.flushmap);
+ mm->context = 0;
+
+ if (atomic_read(&mm->mm_users) == 0)
+ return; /* happens as a result of exit_mmap() */
+
+#ifdef CONFIG_SMP
+ smp_flush_tlb_mm(mm);
+#else
+ local_finish_flush_tlb_mm(mm);
+#endif
+}
+
+extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
+
+/*
+ * Page-granular tlb flush.
+ */
+static inline void
+flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
+{
+#ifdef CONFIG_SMP
+ flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
+#else
+ if (vma->vm_mm == current->active_mm)
+ ia64_ptcl(addr, (PAGE_SHIFT << 2));
+ else
+ vma->vm_mm->context = 0;
+#endif
+}
+
+/*
+ * Flush the local TLB. Invoked from another cpu using an IPI.
+ */
+#ifdef CONFIG_SMP
+void smp_local_flush_tlb(void);
+#else
+#define smp_local_flush_tlb()
+#endif
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all(); /* XXX fix me */
+}
+
+#endif /* _ASM_IA64_TLBFLUSH_H */
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
new file mode 100644
index 000000000..43567240b
--- /dev/null
+++ b/arch/ia64/include/asm/topology.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2002, Erich Focht, NEC
+ *
+ * All rights reserved.
+ */
+#ifndef _ASM_IA64_TOPOLOGY_H
+#define _ASM_IA64_TOPOLOGY_H
+
+#include <asm/acpi.h>
+#include <asm/numa.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_NUMA
+
+/* Nodes w/o CPUs are preferred for memory allocations, see build_zonelists */
+#define PENALTY_FOR_NODE_WITH_CPUS 255
+
+/*
+ * Nodes within this distance are eligible for reclaim by zone_reclaim() when
+ * zone_reclaim_mode is enabled.
+ */
+#define RECLAIM_DISTANCE 15
+
+/*
+ * Returns a bitmask of CPUs on Node 'node'.
+ */
+#define cpumask_of_node(node) ((node) == -1 ? \
+ cpu_all_mask : \
+ &node_to_cpu_mask[node])
+
+/*
+ * Determines the node for a given pci bus
+ */
+#define pcibus_to_node(bus) PCI_CONTROLLER(bus)->node
+
+void build_cpu_to_node_map(void);
+
+#endif /* CONFIG_NUMA */
+
+#ifdef CONFIG_SMP
+#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id)
+#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
+#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
+#define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
+#endif
+
+extern void arch_fix_phys_package_id(int num, u32 slot);
+
+#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
+ cpu_all_mask : \
+ cpumask_of_node(pcibus_to_node(bus)))
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_IA64_TOPOLOGY_H */
diff --git a/arch/ia64/include/asm/types.h b/arch/ia64/include/asm/types.h
new file mode 100644
index 000000000..5ddc7703d
--- /dev/null
+++ b/arch/ia64/include/asm/types.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file is never included by application software unless explicitly
+ * requested (e.g., via linux/types.h) in which case the application is
+ * Linux specific so (user-) name space pollution is not a major issue.
+ * However, for interoperability, libraries still need to be careful to
+ * avoid naming clashes.
+ *
+ * Based on <asm-alpha/types.h>.
+ *
+ * Modified 1998-2000, 2002
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#ifndef _ASM_IA64_TYPES_H
+#define _ASM_IA64_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
+
+#ifdef __ASSEMBLY__
+#else
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+
+struct fnptr {
+ unsigned long ip;
+ unsigned long gp;
+};
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_IA64_TYPES_H */
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
new file mode 100644
index 000000000..179243c3d
--- /dev/null
+++ b/arch/ia64/include/asm/uaccess.h
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_UACCESS_H
+#define _ASM_IA64_UACCESS_H
+
+/*
+ * This file defines various macros to transfer memory areas across
+ * the user/kernel boundary. This needs to be done carefully because
+ * this code is executed in kernel mode and uses user-specified
+ * addresses. Thus, we need to be careful not to let the user to
+ * trick us into accessing kernel memory that would normally be
+ * inaccessible. This code is also fairly performance sensitive,
+ * so we want to spend as little time doing safety checks as
+ * possible.
+ *
+ * To make matters a bit more interesting, these macros sometimes also
+ * called from within the kernel itself, in which case the address
+ * validity check must be skipped. The get_fs() macro tells us what
+ * to do: if get_fs()==USER_DS, checking is performed, if
+ * get_fs()==KERNEL_DS, checking is bypassed.
+ *
+ * Note that even if the memory area specified by the user is in a
+ * valid address range, it is still possible that we'll get a page
+ * fault while accessing it. This is handled by filling out an
+ * exception handler fixup entry for each instruction that has the
+ * potential to fault. When such a fault occurs, the page fault
+ * handler checks to see whether the faulting instruction has a fixup
+ * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
+ * then resumes execution at the continuation point.
+ *
+ * Based on <asm-alpha/uaccess.h>.
+ *
+ * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/page-flags.h>
+
+#include <asm/intrinsics.h>
+#include <linux/pgtable.h>
+#include <asm/io.h>
+#include <asm/extable.h>
+
+/*
+ * For historical reasons, the following macros are grossly misnamed:
+ */
+#define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */
+#define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */
+
+#define get_fs() (current_thread_info()->addr_limit)
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
+
+/*
+ * When accessing user memory, we need to make sure the entire area really is in
+ * user-level space. In order to do this efficiently, we make sure that the page at
+ * address TASK_SIZE is never valid. We also need to make sure that the address doesn't
+ * point inside the virtually mapped linear page table.
+ */
+static inline int __access_ok(const void __user *p, unsigned long size)
+{
+ unsigned long addr = (unsigned long)p;
+ unsigned long seg = get_fs().seg;
+ return likely(addr <= seg) &&
+ (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT));
+}
+#define access_ok(addr, size) __access_ok((addr), (size))
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * Careful to not
+ * (a) re-use the arguments for side effects (sizeof/typeof is ok)
+ * (b) require any knowledge of processes at this stage
+ */
+#define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
+#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
+
+/*
+ * The "__xxx" versions do not do address space checking, useful when
+ * doing multiple accesses to the same area (the programmer has to do the
+ * checks by hand with "access_ok()")
+ */
+#define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
+#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+#ifdef ASM_SUPPORTED
+ struct __large_struct { unsigned long buf[100]; };
+# define __m(x) (*(struct __large_struct __user *)(x))
+
+/* We need to declare the __ex_table section before we can use it in .xdata. */
+asm (".section \"__ex_table\", \"a\"\n\t.previous");
+
+# define __get_user_size(val, addr, n, err) \
+do { \
+ register long __gu_r8 asm ("r8") = 0; \
+ register long __gu_r9 asm ("r9"); \
+ asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \
+ "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n" \
+ "[1:]" \
+ : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8)); \
+ (err) = __gu_r8; \
+ (val) = __gu_r9; \
+} while (0)
+
+/*
+ * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it. This
+ * is because they do not write to any memory gcc knows about, so there are no aliasing
+ * issues.
+ */
+# define __put_user_size(val, addr, n, err) \
+do { \
+ register long __pu_r8 asm ("r8") = 0; \
+ asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \
+ "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \
+ "[1:]" \
+ : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8)); \
+ (err) = __pu_r8; \
+} while (0)
+
+#else /* !ASM_SUPPORTED */
+# define RELOC_TYPE 2 /* ip-rel */
+# define __get_user_size(val, addr, n, err) \
+do { \
+ __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE); \
+ (err) = ia64_getreg(_IA64_REG_R8); \
+ (val) = ia64_getreg(_IA64_REG_R9); \
+} while (0)
+# define __put_user_size(val, addr, n, err) \
+do { \
+ __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, \
+ (__force unsigned long) (val)); \
+ (err) = ia64_getreg(_IA64_REG_R8); \
+} while (0)
+#endif /* !ASM_SUPPORTED */
+
+extern void __get_user_unknown (void);
+
+/*
+ * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
+ * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while
+ * using r8/r9.
+ */
+#define __do_get_user(check, x, ptr, size) \
+({ \
+ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ __typeof__ (size) __gu_size = (size); \
+ long __gu_err = -EFAULT; \
+ unsigned long __gu_val = 0; \
+ if (!check || __access_ok(__gu_ptr, size)) \
+ switch (__gu_size) { \
+ case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \
+ case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \
+ case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break; \
+ case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \
+ default: __get_user_unknown(); break; \
+ } \
+ (x) = (__force __typeof__(*(__gu_ptr))) __gu_val; \
+ __gu_err; \
+})
+
+#define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size)
+#define __get_user_check(x, ptr, size) __do_get_user(1, x, ptr, size)
+
+extern void __put_user_unknown (void);
+
+/*
+ * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
+ * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8.
+ */
+#define __do_put_user(check, x, ptr, size) \
+({ \
+ __typeof__ (x) __pu_x = (x); \
+ __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \
+ __typeof__ (size) __pu_size = (size); \
+ long __pu_err = -EFAULT; \
+ \
+ if (!check || __access_ok(__pu_ptr, __pu_size)) \
+ switch (__pu_size) { \
+ case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \
+ case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \
+ case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break; \
+ case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break; \
+ default: __put_user_unknown(); break; \
+ } \
+ __pu_err; \
+})
+
+#define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size)
+#define __put_user_check(x, ptr, size) __do_put_user(1, x, ptr, size)
+
+/*
+ * Complex access routines
+ */
+extern unsigned long __must_check __copy_user (void __user *to, const void __user *from,
+ unsigned long count);
+
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long count)
+{
+ return __copy_user(to, (__force void __user *) from, count);
+}
+
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long count)
+{
+ return __copy_user((__force void __user *) to, from, count);
+}
+
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+
+extern unsigned long __do_clear_user (void __user *, unsigned long);
+
+#define __clear_user(to, n) __do_clear_user(to, n)
+
+#define clear_user(to, n) \
+({ \
+ unsigned long __cu_len = (n); \
+ if (__access_ok(to, __cu_len)) \
+ __cu_len = __do_clear_user(to, __cu_len); \
+ __cu_len; \
+})
+
+
+/*
+ * Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else
+ * strlen.
+ */
+extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len);
+
+#define strncpy_from_user(to, from, n) \
+({ \
+ const char __user * __sfu_from = (from); \
+ long __sfu_ret = -EFAULT; \
+ if (__access_ok(__sfu_from, 0)) \
+ __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \
+ __sfu_ret; \
+})
+
+/*
+ * Returns: 0 if exception before NUL or reaching the supplied limit
+ * (N), a value greater than N if the limit would be exceeded, else
+ * strlen.
+ */
+extern unsigned long __strnlen_user (const char __user *, long);
+
+#define strnlen_user(str, len) \
+({ \
+ const char __user *__su_str = (str); \
+ unsigned long __su_ret = 0; \
+ if (__access_ok(__su_str, 0)) \
+ __su_ret = __strnlen_user(__su_str, len); \
+ __su_ret; \
+})
+
+#define ARCH_HAS_TRANSLATE_MEM_PTR 1
+static __inline__ void *
+xlate_dev_mem_ptr(phys_addr_t p)
+{
+ struct page *page;
+ void *ptr;
+
+ page = pfn_to_page(p >> PAGE_SHIFT);
+ if (PageUncached(page))
+ ptr = (void *)p + __IA64_UNCACHED_OFFSET;
+ else
+ ptr = __va(p);
+
+ return ptr;
+}
+
+/*
+ * Convert a virtual cached kernel memory pointer to an uncached pointer
+ */
+static __inline__ void *
+xlate_dev_kmem_ptr(void *p)
+{
+ struct page *page;
+ void *ptr;
+
+ page = virt_to_page((unsigned long)p);
+ if (PageUncached(page))
+ ptr = (void *)__pa(p) + __IA64_UNCACHED_OFFSET;
+ else
+ ptr = p;
+
+ return ptr;
+}
+
+#endif /* _ASM_IA64_UACCESS_H */
diff --git a/arch/ia64/include/asm/unaligned.h b/arch/ia64/include/asm/unaligned.h
new file mode 100644
index 000000000..328942e3c
--- /dev/null
+++ b/arch/ia64/include/asm/unaligned.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_UNALIGNED_H
+#define _ASM_IA64_UNALIGNED_H
+
+#include <linux/unaligned/le_struct.h>
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/generic.h>
+
+#define get_unaligned __get_unaligned_le
+#define put_unaligned __put_unaligned_le
+
+#endif /* _ASM_IA64_UNALIGNED_H */
diff --git a/arch/ia64/include/asm/uncached.h b/arch/ia64/include/asm/uncached.h
new file mode 100644
index 000000000..98f447fc7
--- /dev/null
+++ b/arch/ia64/include/asm/uncached.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2001-2008 Silicon Graphics, Inc. All rights reserved.
+ *
+ * Prototypes for the uncached page allocator
+ */
+
+extern unsigned long uncached_alloc_page(int starting_nid, int n_pages);
+extern void uncached_free_page(unsigned long uc_addr, int n_pages);
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
new file mode 100644
index 000000000..9ba6110b1
--- /dev/null
+++ b/arch/ia64/include/asm/unistd.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IA-64 Linux syscall numbers and inline-functions.
+ *
+ * Copyright (C) 1998-2005 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#ifndef _ASM_IA64_UNISTD_H
+#define _ASM_IA64_UNISTD_H
+
+#include <uapi/asm/unistd.h>
+
+#define NR_syscalls __NR_syscalls /* length of syscall table */
+
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SYS_UTIME
+
+#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
+
+#include <linux/types.h>
+#include <linux/linkage.h>
+#include <linux/compiler.h>
+
+extern long __ia64_syscall (long a0, long a1, long a2, long a3, long a4, long nr);
+
+asmlinkage unsigned long sys_mmap(
+ unsigned long addr, unsigned long len,
+ int prot, int flags,
+ int fd, long off);
+asmlinkage unsigned long sys_mmap2(
+ unsigned long addr, unsigned long len,
+ int prot, int flags,
+ int fd, long pgoff);
+struct pt_regs;
+asmlinkage long sys_ia64_pipe(void);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/include/asm/unwind.h b/arch/ia64/include/asm/unwind.h
new file mode 100644
index 000000000..c5bd4b3e3
--- /dev/null
+++ b/arch/ia64/include/asm/unwind.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_UNWIND_H
+#define _ASM_IA64_UNWIND_H
+
+/*
+ * Copyright (C) 1999-2000, 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * A simple API for unwinding kernel stacks. This is used for
+ * debugging and error reporting purposes. The kernel doesn't need
+ * full-blown stack unwinding with all the bells and whitles, so there
+ * is not much point in implementing the full IA-64 unwind API (though
+ * it would of course be possible to implement the kernel API on top
+ * of it).
+ */
+
+struct task_struct; /* forward declaration */
+struct switch_stack; /* forward declaration */
+
+enum unw_application_register {
+ UNW_AR_BSP,
+ UNW_AR_BSPSTORE,
+ UNW_AR_PFS,
+ UNW_AR_RNAT,
+ UNW_AR_UNAT,
+ UNW_AR_LC,
+ UNW_AR_EC,
+ UNW_AR_FPSR,
+ UNW_AR_RSC,
+ UNW_AR_CCV,
+ UNW_AR_CSD,
+ UNW_AR_SSD
+};
+
+/*
+ * The following declarations are private to the unwind
+ * implementation:
+ */
+
+struct unw_stack {
+ unsigned long limit;
+ unsigned long top;
+};
+
+#define UNW_FLAG_INTERRUPT_FRAME (1UL << 0)
+
+/*
+ * No user of this module should every access this structure directly
+ * as it is subject to change. It is declared here solely so we can
+ * use automatic variables.
+ */
+struct unw_frame_info {
+ struct unw_stack regstk;
+ struct unw_stack memstk;
+ unsigned int flags;
+ short hint;
+ short prev_script;
+
+ /* current frame info: */
+ unsigned long bsp; /* backing store pointer value */
+ unsigned long sp; /* stack pointer value */
+ unsigned long psp; /* previous sp value */
+ unsigned long ip; /* instruction pointer value */
+ unsigned long pr; /* current predicate values */
+ unsigned long *cfm_loc; /* cfm save location (or NULL) */
+ unsigned long pt; /* struct pt_regs location */
+
+ struct task_struct *task;
+ struct switch_stack *sw;
+
+ /* preserved state: */
+ unsigned long *bsp_loc; /* previous bsp save location */
+ unsigned long *bspstore_loc;
+ unsigned long *pfs_loc;
+ unsigned long *rnat_loc;
+ unsigned long *rp_loc;
+ unsigned long *pri_unat_loc;
+ unsigned long *unat_loc;
+ unsigned long *pr_loc;
+ unsigned long *lc_loc;
+ unsigned long *fpsr_loc;
+ struct unw_ireg {
+ unsigned long *loc;
+ struct unw_ireg_nat {
+ unsigned long type : 3; /* enum unw_nat_type */
+ signed long off : 61; /* NaT word is at loc+nat.off */
+ } nat;
+ } r4, r5, r6, r7;
+ unsigned long *b1_loc, *b2_loc, *b3_loc, *b4_loc, *b5_loc;
+ struct ia64_fpreg *f2_loc, *f3_loc, *f4_loc, *f5_loc, *fr_loc[16];
+};
+
+/*
+ * The official API follows below:
+ */
+
+struct unw_table_entry {
+ u64 start_offset;
+ u64 end_offset;
+ u64 info_offset;
+};
+
+/*
+ * Initialize unwind support.
+ */
+extern void unw_init (void);
+
+extern void *unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
+ const void *table_start, const void *table_end);
+
+extern void unw_remove_unwind_table (void *handle);
+
+/*
+ * Prepare to unwind blocked task t.
+ */
+extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t);
+
+extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t,
+ struct switch_stack *sw);
+
+/*
+ * Prepare to unwind the currently running thread.
+ */
+extern void unw_init_running (void (*callback)(struct unw_frame_info *info, void *arg), void *arg);
+
+/*
+ * Unwind to previous to frame. Returns 0 if successful, negative
+ * number in case of an error.
+ */
+extern int unw_unwind (struct unw_frame_info *info);
+
+/*
+ * Unwind until the return pointer is in user-land (or until an error
+ * occurs). Returns 0 if successful, negative number in case of
+ * error.
+ */
+extern int unw_unwind_to_user (struct unw_frame_info *info);
+
+#define unw_is_intr_frame(info) (((info)->flags & UNW_FLAG_INTERRUPT_FRAME) != 0)
+
+static inline int
+unw_get_ip (struct unw_frame_info *info, unsigned long *valp)
+{
+ *valp = (info)->ip;
+ return 0;
+}
+
+static inline int
+unw_get_sp (struct unw_frame_info *info, unsigned long *valp)
+{
+ *valp = (info)->sp;
+ return 0;
+}
+
+static inline int
+unw_get_psp (struct unw_frame_info *info, unsigned long *valp)
+{
+ *valp = (info)->psp;
+ return 0;
+}
+
+static inline int
+unw_get_bsp (struct unw_frame_info *info, unsigned long *valp)
+{
+ *valp = (info)->bsp;
+ return 0;
+}
+
+static inline int
+unw_get_cfm (struct unw_frame_info *info, unsigned long *valp)
+{
+ *valp = *(info)->cfm_loc;
+ return 0;
+}
+
+static inline int
+unw_set_cfm (struct unw_frame_info *info, unsigned long val)
+{
+ *(info)->cfm_loc = val;
+ return 0;
+}
+
+static inline int
+unw_get_rp (struct unw_frame_info *info, unsigned long *val)
+{
+ if (!info->rp_loc)
+ return -1;
+ *val = *info->rp_loc;
+ return 0;
+}
+
+extern int unw_access_gr (struct unw_frame_info *, int, unsigned long *, char *, int);
+extern int unw_access_br (struct unw_frame_info *, int, unsigned long *, int);
+extern int unw_access_fr (struct unw_frame_info *, int, struct ia64_fpreg *, int);
+extern int unw_access_ar (struct unw_frame_info *, int, unsigned long *, int);
+extern int unw_access_pr (struct unw_frame_info *, unsigned long *, int);
+
+static inline int
+unw_set_gr (struct unw_frame_info *i, int n, unsigned long v, char nat)
+{
+ return unw_access_gr(i, n, &v, &nat, 1);
+}
+
+static inline int
+unw_set_br (struct unw_frame_info *i, int n, unsigned long v)
+{
+ return unw_access_br(i, n, &v, 1);
+}
+
+static inline int
+unw_set_fr (struct unw_frame_info *i, int n, struct ia64_fpreg v)
+{
+ return unw_access_fr(i, n, &v, 1);
+}
+
+static inline int
+unw_set_ar (struct unw_frame_info *i, int n, unsigned long v)
+{
+ return unw_access_ar(i, n, &v, 1);
+}
+
+static inline int
+unw_set_pr (struct unw_frame_info *i, unsigned long v)
+{
+ return unw_access_pr(i, &v, 1);
+}
+
+#define unw_get_gr(i,n,v,nat) unw_access_gr(i,n,v,nat,0)
+#define unw_get_br(i,n,v) unw_access_br(i,n,v,0)
+#define unw_get_fr(i,n,v) unw_access_fr(i,n,v,0)
+#define unw_get_ar(i,n,v) unw_access_ar(i,n,v,0)
+#define unw_get_pr(i,v) unw_access_pr(i,v,0)
+
+#endif /* _ASM_UNWIND_H */
diff --git a/arch/ia64/include/asm/user.h b/arch/ia64/include/asm/user.h
new file mode 100644
index 000000000..0ba486651
--- /dev/null
+++ b/arch/ia64/include/asm/user.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_USER_H
+#define _ASM_IA64_USER_H
+
+/*
+ * Core file format: The core file is written in such a way that gdb
+ * can understand it and provide useful information to the user (under
+ * linux we use the `trad-core' bfd). The file contents are as
+ * follows:
+ *
+ * upage: 1 page consisting of a user struct that tells gdb
+ * what is present in the file. Directly after this is a
+ * copy of the task_struct, which is currently not used by gdb,
+ * but it may come in handy at some point. All of the registers
+ * are stored as part of the upage. The upage should always be
+ * only one page long.
+ * data: The data segment follows next. We use current->end_text to
+ * current->brk to pick up all of the user variables, plus any memory
+ * that may have been sbrk'ed. No attempt is made to determine if a
+ * page is demand-zero or if a page is totally unused, we just cover
+ * the entire range. All of the addresses are rounded in such a way
+ * that an integral number of pages is written.
+ * stack: We need the stack information in order to get a meaningful
+ * backtrace. We need to write the data from usp to
+ * current->start_stack, so we round each of these in order to be able
+ * to write an integer number of pages.
+ *
+ * Modified 1998, 1999, 2001
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+#include <linux/ptrace.h>
+#include <linux/types.h>
+
+#include <asm/page.h>
+
+#define EF_SIZE 3072 /* XXX fix me */
+
+struct user {
+ unsigned long regs[EF_SIZE/8+32]; /* integer and fp regs */
+ size_t u_tsize; /* text size (pages) */
+ size_t u_dsize; /* data size (pages) */
+ size_t u_ssize; /* stack size (pages) */
+ unsigned long start_code; /* text starting address */
+ unsigned long start_data; /* data starting address */
+ unsigned long start_stack; /* stack starting address */
+ long int signal; /* signal causing core dump */
+ unsigned long u_ar0; /* help gdb find registers */
+ unsigned long magic; /* identifies a core file */
+ char u_comm[32]; /* user command name */
+};
+
+#define NBPG PAGE_SIZE
+#define UPAGES 1
+#define HOST_TEXT_START_ADDR (u.start_code)
+#define HOST_DATA_START_ADDR (u.start_data)
+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+
+#endif /* _ASM_IA64_USER_H */
diff --git a/arch/ia64/include/asm/ustack.h b/arch/ia64/include/asm/ustack.h
new file mode 100644
index 000000000..112d40a0f
--- /dev/null
+++ b/arch/ia64/include/asm/ustack.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_USTACK_H
+#define _ASM_IA64_USTACK_H
+
+#include <asm/page.h>
+#include <uapi/asm/ustack.h>
+
+/* The absolute hard limit for stack size is 1/2 of the mappable space in the region */
+#define MAX_USER_STACK_SIZE (RGN_MAP_LIMIT/2)
+#define STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT)
+#define STACK_TOP_MAX STACK_TOP
+#endif /* _ASM_IA64_USTACK_H */
diff --git a/arch/ia64/include/asm/uv/uv.h b/arch/ia64/include/asm/uv/uv.h
new file mode 100644
index 000000000..48d4526bf
--- /dev/null
+++ b/arch/ia64/include/asm/uv/uv.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_UV_UV_H
+#define _ASM_IA64_UV_UV_H
+
+#ifdef CONFIG_IA64_SGI_UV
+extern bool ia64_is_uv;
+
+static inline int is_uv_system(void)
+{
+ return ia64_is_uv;
+}
+
+void __init uv_probe_system_type(void);
+void __init uv_setup(char **cmdline_p);
+#else /* CONFIG_IA64_SGI_UV */
+static inline int is_uv_system(void)
+{
+ return false;
+}
+
+static inline void __init uv_probe_system_type(void)
+{
+}
+
+static inline void __init uv_setup(char **cmdline_p)
+{
+}
+#endif /* CONFIG_IA64_SGI_UV */
+
+#endif /* _ASM_IA64_UV_UV_H */
diff --git a/arch/ia64/include/asm/uv/uv_hub.h b/arch/ia64/include/asm/uv/uv_hub.h
new file mode 100644
index 000000000..2a88c7204
--- /dev/null
+++ b/arch/ia64/include/asm/uv/uv_hub.h
@@ -0,0 +1,315 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV architectural definitions
+ *
+ * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef __ASM_IA64_UV_HUB_H__
+#define __ASM_IA64_UV_HUB_H__
+
+#include <linux/numa.h>
+#include <linux/percpu.h>
+#include <asm/types.h>
+#include <asm/percpu.h>
+
+
+/*
+ * Addressing Terminology
+ *
+ * M - The low M bits of a physical address represent the offset
+ * into the blade local memory. RAM memory on a blade is physically
+ * contiguous (although various IO spaces may punch holes in
+ * it)..
+ *
+ * N - Number of bits in the node portion of a socket physical
+ * address.
+ *
+ * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
+ * routers always have low bit of 1, C/MBricks have low bit
+ * equal to 0. Most addressing macros that target UV hub chips
+ * right shift the NASID by 1 to exclude the always-zero bit.
+ * NASIDs contain up to 15 bits.
+ *
+ * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
+ * of nasids.
+ *
+ * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
+ * of the nasid for socket usage.
+ *
+ *
+ * NumaLink Global Physical Address Format:
+ * +--------------------------------+---------------------+
+ * |00..000| GNODE | NodeOffset |
+ * +--------------------------------+---------------------+
+ * |<-------53 - M bits --->|<--------M bits ----->
+ *
+ * M - number of node offset bits (35 .. 40)
+ *
+ *
+ * Memory/UV-HUB Processor Socket Address Format:
+ * +----------------+---------------+---------------------+
+ * |00..000000000000| PNODE | NodeOffset |
+ * +----------------+---------------+---------------------+
+ * <--- N bits --->|<--------M bits ----->
+ *
+ * M - number of node offset bits (35 .. 40)
+ * N - number of PNODE bits (0 .. 10)
+ *
+ * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64).
+ * The actual values are configuration dependent and are set at
+ * boot time. M & N values are set by the hardware/BIOS at boot.
+ */
+
+
+/*
+ * Maximum number of bricks in all partitions and in all coherency domains.
+ * This is the total number of bricks accessible in the numalink fabric. It
+ * includes all C & M bricks. Routers are NOT included.
+ *
+ * This value is also the value of the maximum number of non-router NASIDs
+ * in the numalink fabric.
+ *
+ * NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused.
+ */
+#define UV_MAX_NUMALINK_BLADES 16384
+
+/*
+ * Maximum number of C/Mbricks within a software SSI (hardware may support
+ * more).
+ */
+#define UV_MAX_SSI_BLADES 1
+
+/*
+ * The largest possible NASID of a C or M brick (+ 2)
+ */
+#define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_NODES * 2)
+
+/*
+ * The following defines attributes of the HUB chip. These attributes are
+ * frequently referenced and are kept in the per-cpu data areas of each cpu.
+ * They are kept together in a struct to minimize cache misses.
+ */
+struct uv_hub_info_s {
+ unsigned long global_mmr_base;
+ unsigned long gpa_mask;
+ unsigned long gnode_upper;
+ unsigned long lowmem_remap_top;
+ unsigned long lowmem_remap_base;
+ unsigned short pnode;
+ unsigned short pnode_mask;
+ unsigned short coherency_domain_number;
+ unsigned short numa_blade_id;
+ unsigned char blade_processor_id;
+ unsigned char m_val;
+ unsigned char n_val;
+};
+DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
+#define uv_hub_info this_cpu_ptr(&__uv_hub_info)
+#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
+
+/*
+ * Local & Global MMR space macros.
+ * Note: macros are intended to be used ONLY by inline functions
+ * in this file - not by other kernel code.
+ * n - NASID (full 15-bit global nasid)
+ * g - GNODE (full 15-bit global nasid, right shifted 1)
+ * p - PNODE (local part of nsids, right shifted 1)
+ */
+#define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
+#define UV_PNODE_TO_NASID(p) (((p) << 1) | uv_hub_info->gnode_upper)
+
+#define UV_LOCAL_MMR_BASE 0xf4000000UL
+#define UV_GLOBAL_MMR32_BASE 0xf8000000UL
+#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
+
+#define UV_GLOBAL_MMR32_PNODE_SHIFT 15
+#define UV_GLOBAL_MMR64_PNODE_SHIFT 26
+
+#define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
+
+#define UV_GLOBAL_MMR64_PNODE_BITS(p) \
+ ((unsigned long)(p) << UV_GLOBAL_MMR64_PNODE_SHIFT)
+
+/*
+ * Macros for converting between kernel virtual addresses, socket local physical
+ * addresses, and UV global physical addresses.
+ * Note: use the standard __pa() & __va() macros for converting
+ * between socket virtual and socket physical addresses.
+ */
+
+/* socket phys RAM --> UV global physical address */
+static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
+{
+ if (paddr < uv_hub_info->lowmem_remap_top)
+ paddr += uv_hub_info->lowmem_remap_base;
+ return paddr | uv_hub_info->gnode_upper;
+}
+
+
+/* socket virtual --> UV global physical address */
+static inline unsigned long uv_gpa(void *v)
+{
+ return __pa(v) | uv_hub_info->gnode_upper;
+}
+
+/* socket virtual --> UV global physical address */
+static inline void *uv_vgpa(void *v)
+{
+ return (void *)uv_gpa(v);
+}
+
+/* UV global physical address --> socket virtual */
+static inline void *uv_va(unsigned long gpa)
+{
+ return __va(gpa & uv_hub_info->gpa_mask);
+}
+
+/* pnode, offset --> socket virtual */
+static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
+{
+ return __va(((unsigned long)pnode << uv_hub_info->m_val) | offset);
+}
+
+
+/*
+ * Access global MMRs using the low memory MMR32 space. This region supports
+ * faster MMR access but not all MMRs are accessible in this space.
+ */
+static inline unsigned long *uv_global_mmr32_address(int pnode,
+ unsigned long offset)
+{
+ return __va(UV_GLOBAL_MMR32_BASE |
+ UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
+}
+
+static inline void uv_write_global_mmr32(int pnode, unsigned long offset,
+ unsigned long val)
+{
+ *uv_global_mmr32_address(pnode, offset) = val;
+}
+
+static inline unsigned long uv_read_global_mmr32(int pnode,
+ unsigned long offset)
+{
+ return *uv_global_mmr32_address(pnode, offset);
+}
+
+/*
+ * Access Global MMR space using the MMR space located at the top of physical
+ * memory.
+ */
+static inline unsigned long *uv_global_mmr64_address(int pnode,
+ unsigned long offset)
+{
+ return __va(UV_GLOBAL_MMR64_BASE |
+ UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
+}
+
+static inline void uv_write_global_mmr64(int pnode, unsigned long offset,
+ unsigned long val)
+{
+ *uv_global_mmr64_address(pnode, offset) = val;
+}
+
+static inline unsigned long uv_read_global_mmr64(int pnode,
+ unsigned long offset)
+{
+ return *uv_global_mmr64_address(pnode, offset);
+}
+
+/*
+ * Access hub local MMRs. Faster than using global space but only local MMRs
+ * are accessible.
+ */
+static inline unsigned long *uv_local_mmr_address(unsigned long offset)
+{
+ return __va(UV_LOCAL_MMR_BASE | offset);
+}
+
+static inline unsigned long uv_read_local_mmr(unsigned long offset)
+{
+ return *uv_local_mmr_address(offset);
+}
+
+static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
+{
+ *uv_local_mmr_address(offset) = val;
+}
+
+/*
+ * Structures and definitions for converting between cpu, node, pnode, and blade
+ * numbers.
+ */
+
+/* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */
+static inline int uv_blade_processor_id(void)
+{
+ return smp_processor_id();
+}
+
+/* Blade number of current cpu. Numnbered 0 .. <#blades -1> */
+static inline int uv_numa_blade_id(void)
+{
+ return 0;
+}
+
+/* Convert a cpu number to the the UV blade number */
+static inline int uv_cpu_to_blade_id(int cpu)
+{
+ return 0;
+}
+
+/* Convert linux node number to the UV blade number */
+static inline int uv_node_to_blade_id(int nid)
+{
+ return 0;
+}
+
+/* Convert a blade id to the PNODE of the blade */
+static inline int uv_blade_to_pnode(int bid)
+{
+ return 0;
+}
+
+/* Determine the number of possible cpus on a blade */
+static inline int uv_blade_nr_possible_cpus(int bid)
+{
+ return num_possible_cpus();
+}
+
+/* Determine the number of online cpus on a blade */
+static inline int uv_blade_nr_online_cpus(int bid)
+{
+ return num_online_cpus();
+}
+
+/* Convert a cpu id to the PNODE of the blade containing the cpu */
+static inline int uv_cpu_to_pnode(int cpu)
+{
+ return 0;
+}
+
+/* Convert a linux node number to the PNODE of the blade */
+static inline int uv_node_to_pnode(int nid)
+{
+ return 0;
+}
+
+/* Maximum possible number of blades */
+static inline int uv_num_possible_blades(void)
+{
+ return 1;
+}
+
+static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
+{
+ /* not currently needed on ia64 */
+}
+
+
+#endif /* __ASM_IA64_UV_HUB__ */
+
diff --git a/arch/ia64/include/asm/uv/uv_mmrs.h b/arch/ia64/include/asm/uv/uv_mmrs.h
new file mode 100644
index 000000000..fe0b8f05e
--- /dev/null
+++ b/arch/ia64/include/asm/uv/uv_mmrs.h
@@ -0,0 +1,825 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV MMR definitions
+ *
+ * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_UV_UV_MMRS_H
+#define _ASM_IA64_UV_UV_MMRS_H
+
+#define UV_MMR_ENABLE (1UL << 63)
+
+/* ========================================================================= */
+/* UVH_BAU_DATA_CONFIG */
+/* ========================================================================= */
+#define UVH_BAU_DATA_CONFIG 0x61680UL
+#define UVH_BAU_DATA_CONFIG_32 0x0438
+
+#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
+#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
+#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11
+#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12
+#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_BAU_DATA_CONFIG_P_SHFT 13
+#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_BAU_DATA_CONFIG_T_SHFT 15
+#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_BAU_DATA_CONFIG_M_SHFT 16
+#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32
+#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_bau_data_config_u {
+ unsigned long v;
+ struct uvh_bau_data_config_s {
+ unsigned long vector_ : 8; /* RW */
+ unsigned long dm : 3; /* RW */
+ unsigned long destmode : 1; /* RW */
+ unsigned long status : 1; /* RO */
+ unsigned long p : 1; /* RO */
+ unsigned long rsvd_14 : 1; /* */
+ unsigned long t : 1; /* RO */
+ unsigned long m : 1; /* RW */
+ unsigned long rsvd_17_31: 15; /* */
+ unsigned long apic_id : 32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_EVENT_OCCURRED0 */
+/* ========================================================================= */
+#define UVH_EVENT_OCCURRED0 0x70000UL
+#define UVH_EVENT_OCCURRED0_32 0x005e8
+
+#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0
+#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
+#define UVH_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
+#define UVH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
+#define UVH_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
+#define UVH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
+#define UVH_EVENT_OCCURRED0_LH_HCERR_SHFT 3
+#define UVH_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
+#define UVH_EVENT_OCCURRED0_RH_HCERR_SHFT 4
+#define UVH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
+#define UVH_EVENT_OCCURRED0_XN_HCERR_SHFT 5
+#define UVH_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
+#define UVH_EVENT_OCCURRED0_SI_HCERR_SHFT 6
+#define UVH_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
+#define UVH_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
+#define UVH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
+#define UVH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
+#define UVH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
+#define UVH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
+#define UVH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
+#define UVH_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
+#define UVH_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
+#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
+#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
+#define UVH_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
+#define UVH_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
+#define UVH_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
+#define UVH_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
+#define UVH_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
+#define UVH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
+#define UVH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
+#define UVH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
+#define UVH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
+#define UVH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
+#define UVH_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
+#define UVH_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
+#define UVH_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
+#define UVH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
+#define UVH_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
+#define UVH_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
+#define UVH_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
+#define UVH_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
+#define UVH_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
+#define UVH_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
+#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
+#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
+#define UVH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
+#define UVH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
+#define UVH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
+#define UVH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
+#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
+#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
+#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
+#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
+#define UVH_EVENT_OCCURRED0_LTC_INT_SHFT 43
+#define UVH_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
+#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
+#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
+#define UVH_EVENT_OCCURRED0_IPI_INT_SHFT 45
+#define UVH_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
+#define UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
+#define UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
+#define UVH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
+#define UVH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
+#define UVH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
+#define UVH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
+#define UVH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
+#define UVH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
+#define UVH_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
+#define UVH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
+#define UVH_EVENT_OCCURRED0_RTC0_SHFT 51
+#define UVH_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
+#define UVH_EVENT_OCCURRED0_RTC1_SHFT 52
+#define UVH_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
+#define UVH_EVENT_OCCURRED0_RTC2_SHFT 53
+#define UVH_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
+#define UVH_EVENT_OCCURRED0_RTC3_SHFT 54
+#define UVH_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
+#define UVH_EVENT_OCCURRED0_BAU_DATA_SHFT 55
+#define UVH_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
+#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
+#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
+union uvh_event_occurred0_u {
+ unsigned long v;
+ struct uvh_event_occurred0_s {
+ unsigned long lb_hcerr : 1; /* RW, W1C */
+ unsigned long gr0_hcerr : 1; /* RW, W1C */
+ unsigned long gr1_hcerr : 1; /* RW, W1C */
+ unsigned long lh_hcerr : 1; /* RW, W1C */
+ unsigned long rh_hcerr : 1; /* RW, W1C */
+ unsigned long xn_hcerr : 1; /* RW, W1C */
+ unsigned long si_hcerr : 1; /* RW, W1C */
+ unsigned long lb_aoerr0 : 1; /* RW, W1C */
+ unsigned long gr0_aoerr0 : 1; /* RW, W1C */
+ unsigned long gr1_aoerr0 : 1; /* RW, W1C */
+ unsigned long lh_aoerr0 : 1; /* RW, W1C */
+ unsigned long rh_aoerr0 : 1; /* RW, W1C */
+ unsigned long xn_aoerr0 : 1; /* RW, W1C */
+ unsigned long si_aoerr0 : 1; /* RW, W1C */
+ unsigned long lb_aoerr1 : 1; /* RW, W1C */
+ unsigned long gr0_aoerr1 : 1; /* RW, W1C */
+ unsigned long gr1_aoerr1 : 1; /* RW, W1C */
+ unsigned long lh_aoerr1 : 1; /* RW, W1C */
+ unsigned long rh_aoerr1 : 1; /* RW, W1C */
+ unsigned long xn_aoerr1 : 1; /* RW, W1C */
+ unsigned long si_aoerr1 : 1; /* RW, W1C */
+ unsigned long rh_vpi_int : 1; /* RW, W1C */
+ unsigned long system_shutdown_int : 1; /* RW, W1C */
+ unsigned long lb_irq_int_0 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_1 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_2 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_3 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_4 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_5 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_6 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_7 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_8 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_9 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_10 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_11 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_12 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_13 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_14 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_15 : 1; /* RW, W1C */
+ unsigned long l1_nmi_int : 1; /* RW, W1C */
+ unsigned long stop_clock : 1; /* RW, W1C */
+ unsigned long asic_to_l1 : 1; /* RW, W1C */
+ unsigned long l1_to_asic : 1; /* RW, W1C */
+ unsigned long ltc_int : 1; /* RW, W1C */
+ unsigned long la_seq_trigger : 1; /* RW, W1C */
+ unsigned long ipi_int : 1; /* RW, W1C */
+ unsigned long extio_int0 : 1; /* RW, W1C */
+ unsigned long extio_int1 : 1; /* RW, W1C */
+ unsigned long extio_int2 : 1; /* RW, W1C */
+ unsigned long extio_int3 : 1; /* RW, W1C */
+ unsigned long profile_int : 1; /* RW, W1C */
+ unsigned long rtc0 : 1; /* RW, W1C */
+ unsigned long rtc1 : 1; /* RW, W1C */
+ unsigned long rtc2 : 1; /* RW, W1C */
+ unsigned long rtc3 : 1; /* RW, W1C */
+ unsigned long bau_data : 1; /* RW, W1C */
+ unsigned long power_management_req : 1; /* RW, W1C */
+ unsigned long rsvd_57_63 : 7; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_EVENT_OCCURRED0_ALIAS */
+/* ========================================================================= */
+#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
+#define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0
+
+/* ========================================================================= */
+/* UVH_GR0_TLB_INT0_CONFIG */
+/* ========================================================================= */
+#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
+
+#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13
+#define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15
+#define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16
+#define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr0_tlb_int0_config_u {
+ unsigned long v;
+ struct uvh_gr0_tlb_int0_config_s {
+ unsigned long vector_ : 8; /* RW */
+ unsigned long dm : 3; /* RW */
+ unsigned long destmode : 1; /* RW */
+ unsigned long status : 1; /* RO */
+ unsigned long p : 1; /* RO */
+ unsigned long rsvd_14 : 1; /* */
+ unsigned long t : 1; /* RO */
+ unsigned long m : 1; /* RW */
+ unsigned long rsvd_17_31: 15; /* */
+ unsigned long apic_id : 32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_GR0_TLB_INT1_CONFIG */
+/* ========================================================================= */
+#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
+
+#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13
+#define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15
+#define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16
+#define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr0_tlb_int1_config_u {
+ unsigned long v;
+ struct uvh_gr0_tlb_int1_config_s {
+ unsigned long vector_ : 8; /* RW */
+ unsigned long dm : 3; /* RW */
+ unsigned long destmode : 1; /* RW */
+ unsigned long status : 1; /* RO */
+ unsigned long p : 1; /* RO */
+ unsigned long rsvd_14 : 1; /* */
+ unsigned long t : 1; /* RO */
+ unsigned long m : 1; /* RW */
+ unsigned long rsvd_17_31: 15; /* */
+ unsigned long apic_id : 32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_GR1_TLB_INT0_CONFIG */
+/* ========================================================================= */
+#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
+
+#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13
+#define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15
+#define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16
+#define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr1_tlb_int0_config_u {
+ unsigned long v;
+ struct uvh_gr1_tlb_int0_config_s {
+ unsigned long vector_ : 8; /* RW */
+ unsigned long dm : 3; /* RW */
+ unsigned long destmode : 1; /* RW */
+ unsigned long status : 1; /* RO */
+ unsigned long p : 1; /* RO */
+ unsigned long rsvd_14 : 1; /* */
+ unsigned long t : 1; /* RO */
+ unsigned long m : 1; /* RW */
+ unsigned long rsvd_17_31: 15; /* */
+ unsigned long apic_id : 32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_GR1_TLB_INT1_CONFIG */
+/* ========================================================================= */
+#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
+
+#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13
+#define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15
+#define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16
+#define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr1_tlb_int1_config_u {
+ unsigned long v;
+ struct uvh_gr1_tlb_int1_config_s {
+ unsigned long vector_ : 8; /* RW */
+ unsigned long dm : 3; /* RW */
+ unsigned long destmode : 1; /* RW */
+ unsigned long status : 1; /* RO */
+ unsigned long p : 1; /* RO */
+ unsigned long rsvd_14 : 1; /* */
+ unsigned long t : 1; /* RO */
+ unsigned long m : 1; /* RW */
+ unsigned long rsvd_17_31: 15; /* */
+ unsigned long apic_id : 32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_INT_CMPB */
+/* ========================================================================= */
+#define UVH_INT_CMPB 0x22080UL
+
+#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
+#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
+
+union uvh_int_cmpb_u {
+ unsigned long v;
+ struct uvh_int_cmpb_s {
+ unsigned long real_time_cmpb : 56; /* RW */
+ unsigned long rsvd_56_63 : 8; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_INT_CMPC */
+/* ========================================================================= */
+#define UVH_INT_CMPC 0x22100UL
+
+#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
+#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL
+
+union uvh_int_cmpc_u {
+ unsigned long v;
+ struct uvh_int_cmpc_s {
+ unsigned long real_time_cmpc : 56; /* RW */
+ unsigned long rsvd_56_63 : 8; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_INT_CMPD */
+/* ========================================================================= */
+#define UVH_INT_CMPD 0x22180UL
+
+#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
+#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL
+
+union uvh_int_cmpd_u {
+ unsigned long v;
+ struct uvh_int_cmpd_s {
+ unsigned long real_time_cmpd : 56; /* RW */
+ unsigned long rsvd_56_63 : 8; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_NODE_ID */
+/* ========================================================================= */
+#define UVH_NODE_ID 0x0UL
+
+#define UVH_NODE_ID_FORCE1_SHFT 0
+#define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UVH_NODE_ID_MANUFACTURER_SHFT 1
+#define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UVH_NODE_ID_PART_NUMBER_SHFT 12
+#define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UVH_NODE_ID_REVISION_SHFT 28
+#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UVH_NODE_ID_NODE_ID_SHFT 32
+#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UVH_NODE_ID_NODES_PER_BIT_SHFT 48
+#define UVH_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
+#define UVH_NODE_ID_NI_PORT_SHFT 56
+#define UVH_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
+
+union uvh_node_id_u {
+ unsigned long v;
+ struct uvh_node_id_s {
+ unsigned long force1 : 1; /* RO */
+ unsigned long manufacturer : 11; /* RO */
+ unsigned long part_number : 16; /* RO */
+ unsigned long revision : 4; /* RO */
+ unsigned long node_id : 15; /* RW */
+ unsigned long rsvd_47 : 1; /* */
+ unsigned long nodes_per_bit : 7; /* RW */
+ unsigned long rsvd_55 : 1; /* */
+ unsigned long ni_port : 4; /* RO */
+ unsigned long rsvd_60_63 : 4; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
+
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_redirect_config_0_mmr_s {
+ unsigned long rsvd_0_23 : 24; /* */
+ unsigned long dest_base : 22; /* RW */
+ unsigned long rsvd_46_63: 18; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
+
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_redirect_config_1_mmr_s {
+ unsigned long rsvd_0_23 : 24; /* */
+ unsigned long dest_base : 22; /* RW */
+ unsigned long rsvd_46_63: 18; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
+
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_redirect_config_2_mmr_s {
+ unsigned long rsvd_0_23 : 24; /* */
+ unsigned long dest_base : 22; /* RW */
+ unsigned long rsvd_46_63: 18; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_gru_overlay_config_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27: 28; /* */
+ unsigned long base : 18; /* RW */
+ unsigned long rsvd_46_47: 2; /* */
+ unsigned long gr4 : 1; /* RW */
+ unsigned long rsvd_49_51: 3; /* */
+ unsigned long n_gru : 4; /* RW */
+ unsigned long rsvd_56_62: 7; /* */
+ unsigned long enable : 1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_mmr_overlay_config_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25: 26; /* */
+ unsigned long base : 20; /* RW */
+ unsigned long dual_hub : 1; /* RW */
+ unsigned long rsvd_47_62: 16; /* */
+ unsigned long enable : 1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RTC */
+/* ========================================================================= */
+#define UVH_RTC 0x340000UL
+
+#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
+#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
+
+union uvh_rtc_u {
+ unsigned long v;
+ struct uvh_rtc_s {
+ unsigned long real_time_clock : 56; /* RW */
+ unsigned long rsvd_56_63 : 8; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RTC1_INT_CONFIG */
+/* ========================================================================= */
+#define UVH_RTC1_INT_CONFIG 0x615c0UL
+
+#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
+#define UVH_RTC1_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
+#define UVH_RTC1_INT_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11
+#define UVH_RTC1_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12
+#define UVH_RTC1_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_RTC1_INT_CONFIG_P_SHFT 13
+#define UVH_RTC1_INT_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_RTC1_INT_CONFIG_T_SHFT 15
+#define UVH_RTC1_INT_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_RTC1_INT_CONFIG_M_SHFT 16
+#define UVH_RTC1_INT_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32
+#define UVH_RTC1_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_rtc1_int_config_u {
+ unsigned long v;
+ struct uvh_rtc1_int_config_s {
+ unsigned long vector_ : 8; /* RW */
+ unsigned long dm : 3; /* RW */
+ unsigned long destmode : 1; /* RW */
+ unsigned long status : 1; /* RO */
+ unsigned long p : 1; /* RO */
+ unsigned long rsvd_14 : 1; /* */
+ unsigned long t : 1; /* RO */
+ unsigned long m : 1; /* RW */
+ unsigned long rsvd_17_31: 15; /* */
+ unsigned long apic_id : 32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RTC2_INT_CONFIG */
+/* ========================================================================= */
+#define UVH_RTC2_INT_CONFIG 0x61600UL
+
+#define UVH_RTC2_INT_CONFIG_VECTOR_SHFT 0
+#define UVH_RTC2_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_RTC2_INT_CONFIG_DM_SHFT 8
+#define UVH_RTC2_INT_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_RTC2_INT_CONFIG_DESTMODE_SHFT 11
+#define UVH_RTC2_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_RTC2_INT_CONFIG_STATUS_SHFT 12
+#define UVH_RTC2_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_RTC2_INT_CONFIG_P_SHFT 13
+#define UVH_RTC2_INT_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_RTC2_INT_CONFIG_T_SHFT 15
+#define UVH_RTC2_INT_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_RTC2_INT_CONFIG_M_SHFT 16
+#define UVH_RTC2_INT_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_RTC2_INT_CONFIG_APIC_ID_SHFT 32
+#define UVH_RTC2_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_rtc2_int_config_u {
+ unsigned long v;
+ struct uvh_rtc2_int_config_s {
+ unsigned long vector_ : 8; /* RW */
+ unsigned long dm : 3; /* RW */
+ unsigned long destmode : 1; /* RW */
+ unsigned long status : 1; /* RO */
+ unsigned long p : 1; /* RO */
+ unsigned long rsvd_14 : 1; /* */
+ unsigned long t : 1; /* RO */
+ unsigned long m : 1; /* RW */
+ unsigned long rsvd_17_31: 15; /* */
+ unsigned long apic_id : 32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RTC3_INT_CONFIG */
+/* ========================================================================= */
+#define UVH_RTC3_INT_CONFIG 0x61640UL
+
+#define UVH_RTC3_INT_CONFIG_VECTOR_SHFT 0
+#define UVH_RTC3_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_RTC3_INT_CONFIG_DM_SHFT 8
+#define UVH_RTC3_INT_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_RTC3_INT_CONFIG_DESTMODE_SHFT 11
+#define UVH_RTC3_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_RTC3_INT_CONFIG_STATUS_SHFT 12
+#define UVH_RTC3_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_RTC3_INT_CONFIG_P_SHFT 13
+#define UVH_RTC3_INT_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_RTC3_INT_CONFIG_T_SHFT 15
+#define UVH_RTC3_INT_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_RTC3_INT_CONFIG_M_SHFT 16
+#define UVH_RTC3_INT_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_RTC3_INT_CONFIG_APIC_ID_SHFT 32
+#define UVH_RTC3_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_rtc3_int_config_u {
+ unsigned long v;
+ struct uvh_rtc3_int_config_s {
+ unsigned long vector_ : 8; /* RW */
+ unsigned long dm : 3; /* RW */
+ unsigned long destmode : 1; /* RW */
+ unsigned long status : 1; /* RO */
+ unsigned long p : 1; /* RO */
+ unsigned long rsvd_14 : 1; /* */
+ unsigned long t : 1; /* RO */
+ unsigned long m : 1; /* RW */
+ unsigned long rsvd_17_31: 15; /* */
+ unsigned long apic_id : 32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RTC_INC_RATIO */
+/* ========================================================================= */
+#define UVH_RTC_INC_RATIO 0x350000UL
+
+#define UVH_RTC_INC_RATIO_FRACTION_SHFT 0
+#define UVH_RTC_INC_RATIO_FRACTION_MASK 0x00000000000fffffUL
+#define UVH_RTC_INC_RATIO_RATIO_SHFT 20
+#define UVH_RTC_INC_RATIO_RATIO_MASK 0x0000000000700000UL
+
+union uvh_rtc_inc_ratio_u {
+ unsigned long v;
+ struct uvh_rtc_inc_ratio_s {
+ unsigned long fraction : 20; /* RW */
+ unsigned long ratio : 3; /* RW */
+ unsigned long rsvd_23_63: 41; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_SI_ADDR_MAP_CONFIG */
+/* ========================================================================= */
+#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
+
+#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0
+#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
+#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8
+#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL
+
+union uvh_si_addr_map_config_u {
+ unsigned long v;
+ struct uvh_si_addr_map_config_s {
+ unsigned long m_skt : 6; /* RW */
+ unsigned long rsvd_6_7: 2; /* */
+ unsigned long n_skt : 4; /* RW */
+ unsigned long rsvd_12_63: 52; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_SI_ALIAS0_OVERLAY_CONFIG */
+/* ========================================================================= */
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
+
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_si_alias0_overlay_config_u {
+ unsigned long v;
+ struct uvh_si_alias0_overlay_config_s {
+ unsigned long rsvd_0_23: 24; /* */
+ unsigned long base : 8; /* RW */
+ unsigned long rsvd_32_47: 16; /* */
+ unsigned long m_alias : 5; /* RW */
+ unsigned long rsvd_53_62: 10; /* */
+ unsigned long enable : 1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_SI_ALIAS1_OVERLAY_CONFIG */
+/* ========================================================================= */
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
+
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_si_alias1_overlay_config_u {
+ unsigned long v;
+ struct uvh_si_alias1_overlay_config_s {
+ unsigned long rsvd_0_23: 24; /* */
+ unsigned long base : 8; /* RW */
+ unsigned long rsvd_32_47: 16; /* */
+ unsigned long m_alias : 5; /* RW */
+ unsigned long rsvd_53_62: 10; /* */
+ unsigned long enable : 1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_SI_ALIAS2_OVERLAY_CONFIG */
+/* ========================================================================= */
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
+
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_si_alias2_overlay_config_u {
+ unsigned long v;
+ struct uvh_si_alias2_overlay_config_s {
+ unsigned long rsvd_0_23: 24; /* */
+ unsigned long base : 8; /* RW */
+ unsigned long rsvd_32_47: 16; /* */
+ unsigned long m_alias : 5; /* RW */
+ unsigned long rsvd_53_62: 10; /* */
+ unsigned long enable : 1; /* RW */
+ } s;
+};
+
+
+#endif /* _ASM_IA64_UV_UV_MMRS_H */
diff --git a/arch/ia64/include/asm/vermagic.h b/arch/ia64/include/asm/vermagic.h
new file mode 100644
index 000000000..29c7424f4
--- /dev/null
+++ b/arch/ia64/include/asm/vermagic.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#include <linux/stringify.h>
+
+#define MODULE_ARCH_VERMAGIC "ia64" \
+ "gcc-" __stringify(__GNUC__) "." __stringify(__GNUC_MINOR__)
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/ia64/include/asm/vga.h b/arch/ia64/include/asm/vga.h
new file mode 100644
index 000000000..64ce0b971
--- /dev/null
+++ b/arch/ia64/include/asm/vga.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ * (c) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * (c) 1999 Don Dugger <don.dugger@intel.com>
+ */
+
+#ifndef __ASM_IA64_VGA_H_
+#define __ASM_IA64_VGA_H_
+
+/*
+ * On the PC, we can just recalculate addresses and then access the
+ * videoram directly without any black magic.
+ */
+
+extern unsigned long vga_console_iobase;
+extern unsigned long vga_console_membase;
+
+#define VGA_MAP_MEM(x,s) ((unsigned long) ioremap(vga_console_membase + (x), s))
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif /* __ASM_IA64_VGA_H_ */
diff --git a/arch/ia64/include/asm/vmalloc.h b/arch/ia64/include/asm/vmalloc.h
new file mode 100644
index 000000000..a2b51141a
--- /dev/null
+++ b/arch/ia64/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_IA64_VMALLOC_H
+#define _ASM_IA64_VMALLOC_H
+
+#endif /* _ASM_IA64_VMALLOC_H */
diff --git a/arch/ia64/include/asm/xor.h b/arch/ia64/include/asm/xor.h
new file mode 100644
index 000000000..673051bf9
--- /dev/null
+++ b/arch/ia64/include/asm/xor.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Optimized RAID-5 checksumming functions for IA-64.
+ */
+
+
+extern void xor_ia64_2(unsigned long, unsigned long *, unsigned long *);
+extern void xor_ia64_3(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *);
+extern void xor_ia64_4(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *, unsigned long *);
+extern void xor_ia64_5(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *, unsigned long *, unsigned long *);
+
+static struct xor_block_template xor_block_ia64 = {
+ .name = "ia64",
+ .do_2 = xor_ia64_2,
+ .do_3 = xor_ia64_3,
+ .do_4 = xor_ia64_4,
+ .do_5 = xor_ia64_5,
+};
+
+#define XOR_TRY_TEMPLATES xor_speed(&xor_block_ia64)
diff --git a/arch/ia64/include/asm/xtp.h b/arch/ia64/include/asm/xtp.h
new file mode 100644
index 000000000..5bf1d70ad
--- /dev/null
+++ b/arch/ia64/include/asm/xtp.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_XTP_H
+#define _ASM_IA64_XTP_H
+
+#include <asm/io.h>
+
+#ifdef CONFIG_SMP
+
+#define XTP_OFFSET 0x1e0008
+
+#define SMP_IRQ_REDIRECTION (1 << 0)
+#define SMP_IPI_REDIRECTION (1 << 1)
+
+extern unsigned char smp_int_redirect;
+
+/*
+ * XTP control functions:
+ * min_xtp : route all interrupts to this CPU
+ * normal_xtp: nominal XTP value
+ * max_xtp : never deliver interrupts to this CPU.
+ */
+
+static inline void
+min_xtp (void)
+{
+ if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+ writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */
+}
+
+static inline void
+normal_xtp (void)
+{
+ if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+ writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */
+}
+
+static inline void
+max_xtp (void)
+{
+ if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+ writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
+}
+
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_IA64_XTP_Hy */
diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild
new file mode 100644
index 000000000..3a1341e35
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/Kbuild
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+generated-y += unistd_64.h
diff --git a/arch/ia64/include/uapi/asm/auxvec.h b/arch/ia64/include/uapi/asm/auxvec.h
new file mode 100644
index 000000000..09969a5d2
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/auxvec.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_AUXVEC_H
+#define _ASM_IA64_AUXVEC_H
+
+/*
+ * Architecture-neutral AT_ values are in the range 0-17. Leave some room for more of
+ * them, start the architecture-specific ones at 32.
+ */
+#define AT_SYSINFO 32
+#define AT_SYSINFO_EHDR 33
+
+#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
+
+#endif /* _ASM_IA64_AUXVEC_H */
diff --git a/arch/ia64/include/uapi/asm/bitsperlong.h b/arch/ia64/include/uapi/asm/bitsperlong.h
new file mode 100644
index 000000000..1146d5556
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_IA64_BITSPERLONG_H
+#define __ASM_IA64_BITSPERLONG_H
+
+#define __BITS_PER_LONG 64
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_IA64_BITSPERLONG_H */
diff --git a/arch/ia64/include/uapi/asm/break.h b/arch/ia64/include/uapi/asm/break.h
new file mode 100644
index 000000000..4ca110f0a
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/break.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_BREAK_H
+#define _ASM_IA64_BREAK_H
+
+/*
+ * IA-64 Linux break numbers.
+ *
+ * Copyright (C) 1999 Hewlett-Packard Co
+ * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/*
+ * OS-specific debug break numbers:
+ */
+#define __IA64_BREAK_KDB 0x80100
+#define __IA64_BREAK_KPROBE 0x81000 /* .. 0x81fff */
+
+/*
+ * OS-specific break numbers:
+ */
+#define __IA64_BREAK_SYSCALL 0x100000
+
+#endif /* _ASM_IA64_BREAK_H */
diff --git a/arch/ia64/include/uapi/asm/byteorder.h b/arch/ia64/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000..f85d0faaa
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/byteorder.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_BYTEORDER_H
+#define _ASM_IA64_BYTEORDER_H
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* _ASM_IA64_BYTEORDER_H */
diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h
new file mode 100644
index 000000000..d69c97993
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/cmpxchg.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_CMPXCHG_H
+#define _ASM_IA64_CMPXCHG_H
+
+/*
+ * Compare/Exchange, forked from asm/intrinsics.h
+ * which was:
+ *
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+/* include compiler specific intrinsics */
+#include <asm/ia64regs.h>
+#ifdef __INTEL_COMPILER
+# include <asm/intel_intrin.h>
+#else
+# include <asm/gcc_intrin.h>
+#endif
+
+/*
+ * This function doesn't exist, so you'll get a linker error if
+ * something tries to do an invalid xchg().
+ */
+extern void ia64_xchg_called_with_bad_pointer(void);
+
+#define __xchg(x, ptr, size) \
+({ \
+ unsigned long __xchg_result; \
+ \
+ switch (size) { \
+ case 1: \
+ __xchg_result = ia64_xchg1((__u8 *)ptr, x); \
+ break; \
+ \
+ case 2: \
+ __xchg_result = ia64_xchg2((__u16 *)ptr, x); \
+ break; \
+ \
+ case 4: \
+ __xchg_result = ia64_xchg4((__u32 *)ptr, x); \
+ break; \
+ \
+ case 8: \
+ __xchg_result = ia64_xchg8((__u64 *)ptr, x); \
+ break; \
+ default: \
+ ia64_xchg_called_with_bad_pointer(); \
+ } \
+ __xchg_result; \
+})
+
+#define xchg(ptr, x) \
+((__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr))))
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg().
+ */
+extern long ia64_cmpxchg_called_with_bad_pointer(void);
+
+#define ia64_cmpxchg(sem, ptr, old, new, size) \
+({ \
+ __u64 _o_, _r_; \
+ \
+ switch (size) { \
+ case 1: \
+ _o_ = (__u8) (long) (old); \
+ break; \
+ case 2: \
+ _o_ = (__u16) (long) (old); \
+ break; \
+ case 4: \
+ _o_ = (__u32) (long) (old); \
+ break; \
+ case 8: \
+ _o_ = (__u64) (long) (old); \
+ break; \
+ default: \
+ break; \
+ } \
+ switch (size) { \
+ case 1: \
+ _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
+ break; \
+ \
+ case 2: \
+ _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
+ break; \
+ \
+ case 4: \
+ _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
+ break; \
+ \
+ case 8: \
+ _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
+ break; \
+ \
+ default: \
+ _r_ = ia64_cmpxchg_called_with_bad_pointer(); \
+ break; \
+ } \
+ (__typeof__(old)) _r_; \
+})
+
+#define cmpxchg_acq(ptr, o, n) \
+ ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
+#define cmpxchg_rel(ptr, o, n) \
+ ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
+
+/*
+ * Worse still - early processor implementations actually just ignored
+ * the acquire/release and did a full fence all the time. Unfortunately
+ * this meant a lot of badly written code that used .acq when they really
+ * wanted .rel became legacy out in the wild - so when we made a cpu
+ * that strictly did the .acq or .rel ... all that code started breaking - so
+ * we had to back-pedal and keep the "legacy" behavior of a full fence :-(
+ */
+
+/* for compatibility with other platforms: */
+#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+
+#define cmpxchg_local cmpxchg
+#define cmpxchg64_local cmpxchg64
+
+#ifdef CONFIG_IA64_DEBUG_CMPXCHG
+# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
+# define CMPXCHG_BUGCHECK(v) \
+do { \
+ if (_cmpxchg_bugcheck_count-- <= 0) { \
+ void *ip; \
+ extern int printk(const char *fmt, ...); \
+ ip = (void *) ia64_getreg(_IA64_REG_IP); \
+ printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));\
+ break; \
+ } \
+} while (0)
+#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
+# define CMPXCHG_BUGCHECK_DECL
+# define CMPXCHG_BUGCHECK(v)
+#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_CMPXCHG_H */
diff --git a/arch/ia64/include/uapi/asm/fcntl.h b/arch/ia64/include/uapi/asm/fcntl.h
new file mode 100644
index 000000000..7b95523ef
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/fcntl.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_FCNTL_H
+#define _ASM_IA64_FCNTL_H
+/*
+ * Modified 1998-2000
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
+ */
+
+#define force_o_largefile() \
+ (personality(current->personality) != PER_LINUX32)
+
+#include <linux/personality.h>
+#include <asm-generic/fcntl.h>
+
+#endif /* _ASM_IA64_FCNTL_H */
diff --git a/arch/ia64/include/uapi/asm/fpu.h b/arch/ia64/include/uapi/asm/fpu.h
new file mode 100644
index 000000000..0df392982
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/fpu.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_FPU_H
+#define _ASM_IA64_FPU_H
+
+/*
+ * Copyright (C) 1998, 1999, 2002, 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/types.h>
+
+/* floating point status register: */
+#define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */
+#define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */
+#define FPSR_TRAP_ZD (1 << 2) /* zero-divide trap disabled */
+#define FPSR_TRAP_OD (1 << 3) /* overflow trap disabled */
+#define FPSR_TRAP_UD (1 << 4) /* underflow trap disabled */
+#define FPSR_TRAP_ID (1 << 5) /* inexact trap disabled */
+#define FPSR_S0(x) ((x) << 6)
+#define FPSR_S1(x) ((x) << 19)
+#define FPSR_S2(x) (__IA64_UL(x) << 32)
+#define FPSR_S3(x) (__IA64_UL(x) << 45)
+
+/* floating-point status field controls: */
+#define FPSF_FTZ (1 << 0) /* flush-to-zero */
+#define FPSF_WRE (1 << 1) /* widest-range exponent */
+#define FPSF_PC(x) (((x) & 0x3) << 2) /* precision control */
+#define FPSF_RC(x) (((x) & 0x3) << 4) /* rounding control */
+#define FPSF_TD (1 << 6) /* trap disabled */
+
+/* floating-point status field flags: */
+#define FPSF_V (1 << 7) /* invalid operation flag */
+#define FPSF_D (1 << 8) /* denormal/unnormal operand flag */
+#define FPSF_Z (1 << 9) /* zero divide (IEEE) flag */
+#define FPSF_O (1 << 10) /* overflow (IEEE) flag */
+#define FPSF_U (1 << 11) /* underflow (IEEE) flag */
+#define FPSF_I (1 << 12) /* inexact (IEEE) flag) */
+
+/* floating-point rounding control: */
+#define FPRC_NEAREST 0x0
+#define FPRC_NEGINF 0x1
+#define FPRC_POSINF 0x2
+#define FPRC_TRUNC 0x3
+
+#define FPSF_DEFAULT (FPSF_PC (0x3) | FPSF_RC (FPRC_NEAREST))
+
+/* This default value is the same as HP-UX uses. Don't change it
+ without a very good reason. */
+#define FPSR_DEFAULT (FPSR_TRAP_VD | FPSR_TRAP_DD | FPSR_TRAP_ZD \
+ | FPSR_TRAP_OD | FPSR_TRAP_UD | FPSR_TRAP_ID \
+ | FPSR_S0 (FPSF_DEFAULT) \
+ | FPSR_S1 (FPSF_DEFAULT | FPSF_TD | FPSF_WRE) \
+ | FPSR_S2 (FPSF_DEFAULT | FPSF_TD) \
+ | FPSR_S3 (FPSF_DEFAULT | FPSF_TD))
+
+# ifndef __ASSEMBLY__
+
+struct ia64_fpreg {
+ union {
+ unsigned long bits[2];
+ long double __dummy; /* force 16-byte alignment */
+ } u;
+};
+
+# endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_FPU_H */
diff --git a/arch/ia64/include/uapi/asm/gcc_intrin.h b/arch/ia64/include/uapi/asm/gcc_intrin.h
new file mode 100644
index 000000000..ecfa3eadb
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/gcc_intrin.h
@@ -0,0 +1,619 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ *
+ * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
+ * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
+ */
+#ifndef _UAPI_ASM_IA64_GCC_INTRIN_H
+#define _UAPI_ASM_IA64_GCC_INTRIN_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/* define this macro to get some asm stmts included in 'c' files */
+#define ASM_SUPPORTED
+
+/* Optimization barrier */
+/* The "volatile" is due to gcc bugs */
+#define ia64_barrier() asm volatile ("":::"memory")
+
+#define ia64_stop() asm volatile (";;"::)
+
+#define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
+
+#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
+
+#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
+
+#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
+
+extern void ia64_bad_param_for_setreg (void);
+extern void ia64_bad_param_for_getreg (void);
+
+
+#define ia64_setreg(regnum, val) \
+({ \
+ switch (regnum) { \
+ case _IA64_REG_PSR_L: \
+ asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
+ break; \
+ case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
+ asm volatile ("mov ar%0=%1" :: \
+ "i" (regnum - _IA64_REG_AR_KR0), \
+ "r"(val): "memory"); \
+ break; \
+ case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
+ asm volatile ("mov cr%0=%1" :: \
+ "i" (regnum - _IA64_REG_CR_DCR), \
+ "r"(val): "memory" ); \
+ break; \
+ case _IA64_REG_SP: \
+ asm volatile ("mov r12=%0" :: \
+ "r"(val): "memory"); \
+ break; \
+ case _IA64_REG_GP: \
+ asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
+ break; \
+ default: \
+ ia64_bad_param_for_setreg(); \
+ break; \
+ } \
+})
+
+#define ia64_getreg(regnum) \
+({ \
+ __u64 ia64_intri_res; \
+ \
+ switch (regnum) { \
+ case _IA64_REG_GP: \
+ asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
+ break; \
+ case _IA64_REG_IP: \
+ asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
+ break; \
+ case _IA64_REG_PSR: \
+ asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
+ break; \
+ case _IA64_REG_TP: /* for current() */ \
+ ia64_intri_res = ia64_r13; \
+ break; \
+ case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
+ asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
+ : "i"(regnum - _IA64_REG_AR_KR0)); \
+ break; \
+ case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
+ asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
+ : "i" (regnum - _IA64_REG_CR_DCR)); \
+ break; \
+ case _IA64_REG_SP: \
+ asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
+ break; \
+ default: \
+ ia64_bad_param_for_getreg(); \
+ break; \
+ } \
+ ia64_intri_res; \
+})
+
+#define ia64_hint_pause 0
+
+#define ia64_hint(mode) \
+({ \
+ switch (mode) { \
+ case ia64_hint_pause: \
+ asm volatile ("hint @pause" ::: "memory"); \
+ break; \
+ } \
+})
+
+
+/* Integer values for mux1 instruction */
+#define ia64_mux1_brcst 0
+#define ia64_mux1_mix 8
+#define ia64_mux1_shuf 9
+#define ia64_mux1_alt 10
+#define ia64_mux1_rev 11
+
+#define ia64_mux1(x, mode) \
+({ \
+ __u64 ia64_intri_res; \
+ \
+ switch (mode) { \
+ case ia64_mux1_brcst: \
+ asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
+ break; \
+ case ia64_mux1_mix: \
+ asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
+ break; \
+ case ia64_mux1_shuf: \
+ asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
+ break; \
+ case ia64_mux1_alt: \
+ asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
+ break; \
+ case ia64_mux1_rev: \
+ asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
+ break; \
+ } \
+ ia64_intri_res; \
+})
+
+#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
+# define ia64_popcnt(x) __builtin_popcountl(x)
+#else
+# define ia64_popcnt(x) \
+ ({ \
+ __u64 ia64_intri_res; \
+ asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
+ \
+ ia64_intri_res; \
+ })
+#endif
+
+#define ia64_getf_exp(x) \
+({ \
+ long ia64_intri_res; \
+ \
+ asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
+ \
+ ia64_intri_res; \
+})
+
+#define ia64_shrp(a, b, count) \
+({ \
+ __u64 ia64_intri_res; \
+ asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
+ ia64_intri_res; \
+})
+
+#define ia64_ldfs(regnum, x) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_ldfd(regnum, x) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_ldfe(regnum, x) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_ldf8(regnum, x) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_ldf_fill(regnum, x) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_st4_rel_nta(m, val) \
+({ \
+ asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val)); \
+})
+
+#define ia64_stfs(x, regnum) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stfd(x, regnum) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stfe(x, regnum) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stf8(x, regnum) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stf_spill(x, regnum) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_fetchadd4_acq(p, inc) \
+({ \
+ \
+ __u64 ia64_intri_res; \
+ asm volatile ("fetchadd4.acq %0=[%1],%2" \
+ : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
+ : "memory"); \
+ \
+ ia64_intri_res; \
+})
+
+#define ia64_fetchadd4_rel(p, inc) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("fetchadd4.rel %0=[%1],%2" \
+ : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
+ : "memory"); \
+ \
+ ia64_intri_res; \
+})
+
+#define ia64_fetchadd8_acq(p, inc) \
+({ \
+ \
+ __u64 ia64_intri_res; \
+ asm volatile ("fetchadd8.acq %0=[%1],%2" \
+ : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
+ : "memory"); \
+ \
+ ia64_intri_res; \
+})
+
+#define ia64_fetchadd8_rel(p, inc) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("fetchadd8.rel %0=[%1],%2" \
+ : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
+ : "memory"); \
+ \
+ ia64_intri_res; \
+})
+
+#define ia64_xchg1(ptr,x) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("xchg1 %0=[%1],%2" \
+ : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_xchg2(ptr,x) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
+ : "r" (ptr), "r" (x) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_xchg4(ptr,x) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
+ : "r" (ptr), "r" (x) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_xchg8(ptr,x) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
+ : "r" (ptr), "r" (x) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg1_acq(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg1_rel(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg2_acq(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg2_rel(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ \
+ asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg4_acq(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg4_rel(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg8_acq(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg8_rel(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ \
+ asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_mf() asm volatile ("mf" ::: "memory")
+#define ia64_mfa() asm volatile ("mf.a" ::: "memory")
+
+#define ia64_invala() asm volatile ("invala" ::: "memory")
+
+#define ia64_thash(addr) \
+({ \
+ unsigned long ia64_intri_res; \
+ asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
+ ia64_intri_res; \
+})
+
+#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
+#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
+
+#ifdef HAVE_SERIALIZE_DIRECTIVE
+# define ia64_dv_serialize_data() asm volatile (".serialize.data");
+# define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");
+#else
+# define ia64_dv_serialize_data()
+# define ia64_dv_serialize_instruction()
+#endif
+
+#define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
+
+#define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
+
+#define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
+
+
+#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
+ :: "r"(trnum), "r"(addr) : "memory")
+
+#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
+ :: "r"(trnum), "r"(addr) : "memory")
+
+#define ia64_tpa(addr) \
+({ \
+ unsigned long ia64_pa; \
+ asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
+ ia64_pa; \
+})
+
+#define __ia64_set_dbr(index, val) \
+ asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_ibr(index, val) \
+ asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pkr(index, val) \
+ asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pmc(index, val) \
+ asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pmd(index, val) \
+ asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_rr(index, val) \
+ asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
+
+#define ia64_get_cpuid(index) \
+({ \
+ unsigned long ia64_intri_res; \
+ asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
+ ia64_intri_res; \
+})
+
+#define __ia64_get_dbr(index) \
+({ \
+ unsigned long ia64_intri_res; \
+ asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+})
+
+#define ia64_get_ibr(index) \
+({ \
+ unsigned long ia64_intri_res; \
+ asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+})
+
+#define ia64_get_pkr(index) \
+({ \
+ unsigned long ia64_intri_res; \
+ asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+})
+
+#define ia64_get_pmc(index) \
+({ \
+ unsigned long ia64_intri_res; \
+ asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+})
+
+
+#define ia64_get_pmd(index) \
+({ \
+ unsigned long ia64_intri_res; \
+ asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+})
+
+#define ia64_get_rr(index) \
+({ \
+ unsigned long ia64_intri_res; \
+ asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
+ ia64_intri_res; \
+})
+
+#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
+
+
+#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
+
+#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
+#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
+#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
+#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
+
+#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
+
+#define ia64_ptcga(addr, size) \
+do { \
+ asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
+ ia64_dv_serialize_data(); \
+} while (0)
+
+#define ia64_ptcl(addr, size) \
+do { \
+ asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
+ ia64_dv_serialize_data(); \
+} while (0)
+
+#define ia64_ptri(addr, size) \
+ asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
+
+#define ia64_ptrd(addr, size) \
+ asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
+
+#define ia64_ttag(addr) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
+ ia64_intri_res; \
+})
+
+
+/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
+
+#define ia64_lfhint_none 0
+#define ia64_lfhint_nt1 1
+#define ia64_lfhint_nt2 2
+#define ia64_lfhint_nta 3
+
+#define ia64_lfetch(lfhint, y) \
+({ \
+ switch (lfhint) { \
+ case ia64_lfhint_none: \
+ asm volatile ("lfetch [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nt1: \
+ asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nt2: \
+ asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nta: \
+ asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
+ break; \
+ } \
+})
+
+#define ia64_lfetch_excl(lfhint, y) \
+({ \
+ switch (lfhint) { \
+ case ia64_lfhint_none: \
+ asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nt1: \
+ asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nt2: \
+ asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nta: \
+ asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
+ break; \
+ } \
+})
+
+#define ia64_lfetch_fault(lfhint, y) \
+({ \
+ switch (lfhint) { \
+ case ia64_lfhint_none: \
+ asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nt1: \
+ asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nt2: \
+ asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nta: \
+ asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
+ break; \
+ } \
+})
+
+#define ia64_lfetch_fault_excl(lfhint, y) \
+({ \
+ switch (lfhint) { \
+ case ia64_lfhint_none: \
+ asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nt1: \
+ asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nt2: \
+ asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nta: \
+ asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
+ break; \
+ } \
+})
+
+#define ia64_intrin_local_irq_restore(x) \
+do { \
+ asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
+ "(p6) ssm psr.i;" \
+ "(p7) rsm psr.i;;" \
+ "(p6) srlz.d" \
+ :: "r"((x)) : "p6", "p7", "memory"); \
+} while (0)
+
+#endif /* _UAPI_ASM_IA64_GCC_INTRIN_H */
diff --git a/arch/ia64/include/uapi/asm/ia64regs.h b/arch/ia64/include/uapi/asm/ia64regs.h
new file mode 100644
index 000000000..d7d10cec8
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/ia64regs.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2002,2003 Intel Corp.
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Suresh Siddha <suresh.b.siddha@intel.com>
+ */
+
+#ifndef _ASM_IA64_IA64REGS_H
+#define _ASM_IA64_IA64REGS_H
+
+/*
+ * Register Names for getreg() and setreg().
+ *
+ * The "magic" numbers happen to match the values used by the Intel compiler's
+ * getreg()/setreg() intrinsics.
+ */
+
+/* Special Registers */
+
+#define _IA64_REG_IP 1016 /* getreg only */
+#define _IA64_REG_PSR 1019
+#define _IA64_REG_PSR_L 1019
+
+/* General Integer Registers */
+
+#define _IA64_REG_GP 1025 /* R1 */
+#define _IA64_REG_R8 1032 /* R8 */
+#define _IA64_REG_R9 1033 /* R9 */
+#define _IA64_REG_SP 1036 /* R12 */
+#define _IA64_REG_TP 1037 /* R13 */
+
+/* Application Registers */
+
+#define _IA64_REG_AR_KR0 3072
+#define _IA64_REG_AR_KR1 3073
+#define _IA64_REG_AR_KR2 3074
+#define _IA64_REG_AR_KR3 3075
+#define _IA64_REG_AR_KR4 3076
+#define _IA64_REG_AR_KR5 3077
+#define _IA64_REG_AR_KR6 3078
+#define _IA64_REG_AR_KR7 3079
+#define _IA64_REG_AR_RSC 3088
+#define _IA64_REG_AR_BSP 3089
+#define _IA64_REG_AR_BSPSTORE 3090
+#define _IA64_REG_AR_RNAT 3091
+#define _IA64_REG_AR_FCR 3093
+#define _IA64_REG_AR_EFLAG 3096
+#define _IA64_REG_AR_CSD 3097
+#define _IA64_REG_AR_SSD 3098
+#define _IA64_REG_AR_CFLAG 3099
+#define _IA64_REG_AR_FSR 3100
+#define _IA64_REG_AR_FIR 3101
+#define _IA64_REG_AR_FDR 3102
+#define _IA64_REG_AR_CCV 3104
+#define _IA64_REG_AR_UNAT 3108
+#define _IA64_REG_AR_FPSR 3112
+#define _IA64_REG_AR_ITC 3116
+#define _IA64_REG_AR_PFS 3136
+#define _IA64_REG_AR_LC 3137
+#define _IA64_REG_AR_EC 3138
+
+/* Control Registers */
+
+#define _IA64_REG_CR_DCR 4096
+#define _IA64_REG_CR_ITM 4097
+#define _IA64_REG_CR_IVA 4098
+#define _IA64_REG_CR_PTA 4104
+#define _IA64_REG_CR_IPSR 4112
+#define _IA64_REG_CR_ISR 4113
+#define _IA64_REG_CR_IIP 4115
+#define _IA64_REG_CR_IFA 4116
+#define _IA64_REG_CR_ITIR 4117
+#define _IA64_REG_CR_IIPA 4118
+#define _IA64_REG_CR_IFS 4119
+#define _IA64_REG_CR_IIM 4120
+#define _IA64_REG_CR_IHA 4121
+#define _IA64_REG_CR_LID 4160
+#define _IA64_REG_CR_IVR 4161 /* getreg only */
+#define _IA64_REG_CR_TPR 4162
+#define _IA64_REG_CR_EOI 4163
+#define _IA64_REG_CR_IRR0 4164 /* getreg only */
+#define _IA64_REG_CR_IRR1 4165 /* getreg only */
+#define _IA64_REG_CR_IRR2 4166 /* getreg only */
+#define _IA64_REG_CR_IRR3 4167 /* getreg only */
+#define _IA64_REG_CR_ITV 4168
+#define _IA64_REG_CR_PMV 4169
+#define _IA64_REG_CR_CMCV 4170
+#define _IA64_REG_CR_LRR0 4176
+#define _IA64_REG_CR_LRR1 4177
+
+/* Indirect Registers for getindreg() and setindreg() */
+
+#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */
+#define _IA64_REG_INDR_DBR 9001
+#define _IA64_REG_INDR_IBR 9002
+#define _IA64_REG_INDR_PKR 9003
+#define _IA64_REG_INDR_PMC 9004
+#define _IA64_REG_INDR_PMD 9005
+#define _IA64_REG_INDR_RR 9006
+
+#endif /* _ASM_IA64_IA64REGS_H */
diff --git a/arch/ia64/include/uapi/asm/intel_intrin.h b/arch/ia64/include/uapi/asm/intel_intrin.h
new file mode 100644
index 000000000..dc1884dc5
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/intel_intrin.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_INTEL_INTRIN_H
+#define _ASM_IA64_INTEL_INTRIN_H
+/*
+ * Intel Compiler Intrinsics
+ *
+ * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
+ * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
+ * Copyright (C) 2005,2006 Hongjiu Lu <hongjiu.lu@intel.com>
+ *
+ */
+#include <ia64intrin.h>
+
+#define ia64_barrier() __memory_barrier()
+
+#define ia64_stop() /* Nothing: As of now stop bit is generated for each
+ * intrinsic
+ */
+
+#define ia64_getreg __getReg
+#define ia64_setreg __setReg
+
+#define ia64_hint __hint
+#define ia64_hint_pause __hint_pause
+
+#define ia64_mux1_brcst _m64_mux1_brcst
+#define ia64_mux1_mix _m64_mux1_mix
+#define ia64_mux1_shuf _m64_mux1_shuf
+#define ia64_mux1_alt _m64_mux1_alt
+#define ia64_mux1_rev _m64_mux1_rev
+
+#define ia64_mux1(x,v) _m_to_int64(_m64_mux1(_m_from_int64(x), (v)))
+#define ia64_popcnt _m64_popcnt
+#define ia64_getf_exp __getf_exp
+#define ia64_shrp _m64_shrp
+
+#define ia64_tpa __tpa
+#define ia64_invala __invala
+#define ia64_invala_gr __invala_gr
+#define ia64_invala_fr __invala_fr
+#define ia64_nop __nop
+#define ia64_sum __sum
+#define ia64_ssm __ssm
+#define ia64_rum __rum
+#define ia64_rsm __rsm
+#define ia64_fc __fc
+
+#define ia64_ldfs __ldfs
+#define ia64_ldfd __ldfd
+#define ia64_ldfe __ldfe
+#define ia64_ldf8 __ldf8
+#define ia64_ldf_fill __ldf_fill
+
+#define ia64_stfs __stfs
+#define ia64_stfd __stfd
+#define ia64_stfe __stfe
+#define ia64_stf8 __stf8
+#define ia64_stf_spill __stf_spill
+
+#define ia64_mf __mf
+#define ia64_mfa __mfa
+
+#define ia64_fetchadd4_acq __fetchadd4_acq
+#define ia64_fetchadd4_rel __fetchadd4_rel
+#define ia64_fetchadd8_acq __fetchadd8_acq
+#define ia64_fetchadd8_rel __fetchadd8_rel
+
+#define ia64_xchg1 _InterlockedExchange8
+#define ia64_xchg2 _InterlockedExchange16
+#define ia64_xchg4 _InterlockedExchange
+#define ia64_xchg8 _InterlockedExchange64
+
+#define ia64_cmpxchg1_rel _InterlockedCompareExchange8_rel
+#define ia64_cmpxchg1_acq _InterlockedCompareExchange8_acq
+#define ia64_cmpxchg2_rel _InterlockedCompareExchange16_rel
+#define ia64_cmpxchg2_acq _InterlockedCompareExchange16_acq
+#define ia64_cmpxchg4_rel _InterlockedCompareExchange_rel
+#define ia64_cmpxchg4_acq _InterlockedCompareExchange_acq
+#define ia64_cmpxchg8_rel _InterlockedCompareExchange64_rel
+#define ia64_cmpxchg8_acq _InterlockedCompareExchange64_acq
+
+#define __ia64_set_dbr(index, val) \
+ __setIndReg(_IA64_REG_INDR_DBR, index, val)
+#define ia64_set_ibr(index, val) \
+ __setIndReg(_IA64_REG_INDR_IBR, index, val)
+#define ia64_set_pkr(index, val) \
+ __setIndReg(_IA64_REG_INDR_PKR, index, val)
+#define ia64_set_pmc(index, val) \
+ __setIndReg(_IA64_REG_INDR_PMC, index, val)
+#define ia64_set_pmd(index, val) \
+ __setIndReg(_IA64_REG_INDR_PMD, index, val)
+#define ia64_set_rr(index, val) \
+ __setIndReg(_IA64_REG_INDR_RR, index, val)
+
+#define ia64_get_cpuid(index) \
+ __getIndReg(_IA64_REG_INDR_CPUID, index)
+#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index)
+#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index)
+#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index)
+#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index)
+#define ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index)
+#define ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index)
+
+#define ia64_srlz_d __dsrlz
+#define ia64_srlz_i __isrlz
+
+#define ia64_dv_serialize_data()
+#define ia64_dv_serialize_instruction()
+
+#define ia64_st1_rel __st1_rel
+#define ia64_st2_rel __st2_rel
+#define ia64_st4_rel __st4_rel
+#define ia64_st8_rel __st8_rel
+
+/* FIXME: need st4.rel.nta intrinsic */
+#define ia64_st4_rel_nta __st4_rel
+
+#define ia64_ld1_acq __ld1_acq
+#define ia64_ld2_acq __ld2_acq
+#define ia64_ld4_acq __ld4_acq
+#define ia64_ld8_acq __ld8_acq
+
+#define ia64_sync_i __synci
+#define ia64_thash __thash
+#define ia64_ttag __ttag
+#define ia64_itcd __itcd
+#define ia64_itci __itci
+#define ia64_itrd __itrd
+#define ia64_itri __itri
+#define ia64_ptce __ptce
+#define ia64_ptcl __ptcl
+#define ia64_ptcg __ptcg
+#define ia64_ptcga __ptcga
+#define ia64_ptri __ptri
+#define ia64_ptrd __ptrd
+#define ia64_dep_mi _m64_dep_mi
+
+/* Values for lfhint in __lfetch and __lfetch_fault */
+
+#define ia64_lfhint_none __lfhint_none
+#define ia64_lfhint_nt1 __lfhint_nt1
+#define ia64_lfhint_nt2 __lfhint_nt2
+#define ia64_lfhint_nta __lfhint_nta
+
+#define ia64_lfetch __lfetch
+#define ia64_lfetch_excl __lfetch_excl
+#define ia64_lfetch_fault __lfetch_fault
+#define ia64_lfetch_fault_excl __lfetch_fault_excl
+
+#define ia64_intrin_local_irq_restore(x) \
+do { \
+ if ((x) != 0) { \
+ ia64_ssm(IA64_PSR_I); \
+ ia64_srlz_d(); \
+ } else { \
+ ia64_rsm(IA64_PSR_I); \
+ } \
+} while (0)
+
+#define __builtin_trap() __break(0);
+
+#endif /* _ASM_IA64_INTEL_INTRIN_H */
diff --git a/arch/ia64/include/uapi/asm/intrinsics.h b/arch/ia64/include/uapi/asm/intrinsics.h
new file mode 100644
index 000000000..a0e0a064f
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/intrinsics.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Compiler-dependent intrinsics.
+ *
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#ifndef _UAPI_ASM_IA64_INTRINSICS_H
+#define _UAPI_ASM_IA64_INTRINSICS_H
+
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+/* include compiler specific intrinsics */
+#include <asm/ia64regs.h>
+#ifdef __INTEL_COMPILER
+# include <asm/intel_intrin.h>
+#else
+# include <asm/gcc_intrin.h>
+#endif
+#include <asm/cmpxchg.h>
+
+#define ia64_set_rr0_to_rr4(val0, val1, val2, val3, val4) \
+do { \
+ ia64_set_rr(0x0000000000000000UL, (val0)); \
+ ia64_set_rr(0x2000000000000000UL, (val1)); \
+ ia64_set_rr(0x4000000000000000UL, (val2)); \
+ ia64_set_rr(0x6000000000000000UL, (val3)); \
+ ia64_set_rr(0x8000000000000000UL, (val4)); \
+} while (0)
+
+/*
+ * Force an unresolved reference if someone tries to use
+ * ia64_fetch_and_add() with a bad value.
+ */
+extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
+extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
+
+#define IA64_FETCHADD(tmp,v,n,sz,sem) \
+({ \
+ switch (sz) { \
+ case 4: \
+ tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \
+ break; \
+ \
+ case 8: \
+ tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \
+ break; \
+ \
+ default: \
+ __bad_size_for_ia64_fetch_and_add(); \
+ } \
+})
+
+#define ia64_fetchadd(i,v,sem) \
+({ \
+ __u64 _tmp; \
+ volatile __typeof__(*(v)) *_v = (v); \
+ /* Can't use a switch () here: gcc isn't always smart enough for that... */ \
+ if ((i) == -16) \
+ IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem); \
+ else if ((i) == -8) \
+ IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem); \
+ else if ((i) == -4) \
+ IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem); \
+ else if ((i) == -1) \
+ IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem); \
+ else if ((i) == 1) \
+ IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem); \
+ else if ((i) == 4) \
+ IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem); \
+ else if ((i) == 8) \
+ IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem); \
+ else if ((i) == 16) \
+ IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem); \
+ else \
+ _tmp = __bad_increment_for_ia64_fetch_and_add(); \
+ (__typeof__(*(v))) (_tmp); /* return old value */ \
+})
+
+#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
+
+#endif
+
+#endif /* _UAPI_ASM_IA64_INTRINSICS_H */
diff --git a/arch/ia64/include/uapi/asm/mman.h b/arch/ia64/include/uapi/asm/mman.h
new file mode 100644
index 000000000..ce0cc3d75
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/mman.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Based on <asm-i386/mman.h>.
+ *
+ * Modified 1998-2000, 2002
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#ifndef _UAPI_ASM_IA64_MMAN_H
+#define _UAPI_ASM_IA64_MMAN_H
+
+
+#include <asm-generic/mman.h>
+
+#define MAP_GROWSUP 0x0200 /* register stack-like segment */
+
+
+#endif /* _UAPI_ASM_IA64_MMAN_H */
diff --git a/arch/ia64/include/uapi/asm/param.h b/arch/ia64/include/uapi/asm/param.h
new file mode 100644
index 000000000..123ab4594
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/param.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Fundamental kernel parameters.
+ *
+ * Based on <asm-i386/param.h>.
+ *
+ * Modified 1998, 1999, 2002-2003
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#ifndef _UAPI_ASM_IA64_PARAM_H
+#define _UAPI_ASM_IA64_PARAM_H
+
+
+#define EXEC_PAGESIZE 65536
+
+#ifndef NOGROUP
+# define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#ifndef __KERNEL__
+ /*
+ * Technically, this is wrong, but some old apps still refer to it. The proper way to
+ * get the HZ value is via sysconf(_SC_CLK_TCK).
+ */
+# define HZ 1024
+#endif
+
+#endif /* _UAPI_ASM_IA64_PARAM_H */
diff --git a/arch/ia64/include/uapi/asm/perfmon.h b/arch/ia64/include/uapi/asm/perfmon.h
new file mode 100644
index 000000000..017548365
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/perfmon.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2001-2003 Hewlett-Packard Co
+ * Stephane Eranian <eranian@hpl.hp.com>
+ */
+
+#ifndef _UAPI_ASM_IA64_PERFMON_H
+#define _UAPI_ASM_IA64_PERFMON_H
+
+/*
+ * perfmon commands supported on all CPU models
+ */
+#define PFM_WRITE_PMCS 0x01
+#define PFM_WRITE_PMDS 0x02
+#define PFM_READ_PMDS 0x03
+#define PFM_STOP 0x04
+#define PFM_START 0x05
+#define PFM_ENABLE 0x06 /* obsolete */
+#define PFM_DISABLE 0x07 /* obsolete */
+#define PFM_CREATE_CONTEXT 0x08
+#define PFM_DESTROY_CONTEXT 0x09 /* obsolete use close() */
+#define PFM_RESTART 0x0a
+#define PFM_PROTECT_CONTEXT 0x0b /* obsolete */
+#define PFM_GET_FEATURES 0x0c
+#define PFM_DEBUG 0x0d
+#define PFM_UNPROTECT_CONTEXT 0x0e /* obsolete */
+#define PFM_GET_PMC_RESET_VAL 0x0f
+#define PFM_LOAD_CONTEXT 0x10
+#define PFM_UNLOAD_CONTEXT 0x11
+
+/*
+ * PMU model specific commands (may not be supported on all PMU models)
+ */
+#define PFM_WRITE_IBRS 0x20
+#define PFM_WRITE_DBRS 0x21
+
+/*
+ * context flags
+ */
+#define PFM_FL_NOTIFY_BLOCK 0x01 /* block task on user level notifications */
+#define PFM_FL_SYSTEM_WIDE 0x02 /* create a system wide context */
+#define PFM_FL_OVFL_NO_MSG 0x80 /* do not post overflow/end messages for notification */
+
+/*
+ * event set flags
+ */
+#define PFM_SETFL_EXCL_IDLE 0x01 /* exclude idle task (syswide only) XXX: DO NOT USE YET */
+
+/*
+ * PMC flags
+ */
+#define PFM_REGFL_OVFL_NOTIFY 0x1 /* send notification on overflow */
+#define PFM_REGFL_RANDOM 0x2 /* randomize sampling interval */
+
+/*
+ * PMD/PMC/IBR/DBR return flags (ignored on input)
+ *
+ * Those flags are used on output and must be checked in case EAGAIN is returned
+ * by any of the calls using a pfarg_reg_t or pfarg_dbreg_t structure.
+ */
+#define PFM_REG_RETFL_NOTAVAIL (1UL<<31) /* set if register is implemented but not available */
+#define PFM_REG_RETFL_EINVAL (1UL<<30) /* set if register entry is invalid */
+#define PFM_REG_RETFL_MASK (PFM_REG_RETFL_NOTAVAIL|PFM_REG_RETFL_EINVAL)
+
+#define PFM_REG_HAS_ERROR(flag) (((flag) & PFM_REG_RETFL_MASK) != 0)
+
+typedef unsigned char pfm_uuid_t[16]; /* custom sampling buffer identifier type */
+
+/*
+ * Request structure used to define a context
+ */
+typedef struct {
+ pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */
+ unsigned long ctx_flags; /* noblock/block */
+ unsigned short ctx_nextra_sets; /* number of extra event sets (you always get 1) */
+ unsigned short ctx_reserved1; /* for future use */
+ int ctx_fd; /* return arg: unique identification for context */
+ void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */
+ unsigned long ctx_reserved2[11];/* for future use */
+} pfarg_context_t;
+
+/*
+ * Request structure used to write/read a PMC or PMD
+ */
+typedef struct {
+ unsigned int reg_num; /* which register */
+ unsigned short reg_set; /* event set for this register */
+ unsigned short reg_reserved1; /* for future use */
+
+ unsigned long reg_value; /* initial pmc/pmd value */
+ unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */
+
+ unsigned long reg_long_reset; /* reset after buffer overflow notification */
+ unsigned long reg_short_reset; /* reset after counter overflow */
+
+ unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */
+ unsigned long reg_random_seed; /* seed value when randomization is used */
+ unsigned long reg_random_mask; /* bitmask used to limit random value */
+ unsigned long reg_last_reset_val;/* return: PMD last reset value */
+
+ unsigned long reg_smpl_pmds[4]; /* which pmds are accessed when PMC overflows */
+ unsigned long reg_smpl_eventid; /* opaque sampling event identifier */
+
+ unsigned long reg_reserved2[3]; /* for future use */
+} pfarg_reg_t;
+
+typedef struct {
+ unsigned int dbreg_num; /* which debug register */
+ unsigned short dbreg_set; /* event set for this register */
+ unsigned short dbreg_reserved1; /* for future use */
+ unsigned long dbreg_value; /* value for debug register */
+ unsigned long dbreg_flags; /* return: dbreg error */
+ unsigned long dbreg_reserved2[1]; /* for future use */
+} pfarg_dbreg_t;
+
+typedef struct {
+ unsigned int ft_version; /* perfmon: major [16-31], minor [0-15] */
+ unsigned int ft_reserved; /* reserved for future use */
+ unsigned long reserved[4]; /* for future use */
+} pfarg_features_t;
+
+typedef struct {
+ pid_t load_pid; /* process to load the context into */
+ unsigned short load_set; /* first event set to load */
+ unsigned short load_reserved1; /* for future use */
+ unsigned long load_reserved2[3]; /* for future use */
+} pfarg_load_t;
+
+typedef struct {
+ int msg_type; /* generic message header */
+ int msg_ctx_fd; /* generic message header */
+ unsigned long msg_ovfl_pmds[4]; /* which PMDs overflowed */
+ unsigned short msg_active_set; /* active set at the time of overflow */
+ unsigned short msg_reserved1; /* for future use */
+ unsigned int msg_reserved2; /* for future use */
+ unsigned long msg_tstamp; /* for perf tuning/debug */
+} pfm_ovfl_msg_t;
+
+typedef struct {
+ int msg_type; /* generic message header */
+ int msg_ctx_fd; /* generic message header */
+ unsigned long msg_tstamp; /* for perf tuning */
+} pfm_end_msg_t;
+
+typedef struct {
+ int msg_type; /* type of the message */
+ int msg_ctx_fd; /* unique identifier for the context */
+ unsigned long msg_tstamp; /* for perf tuning */
+} pfm_gen_msg_t;
+
+#define PFM_MSG_OVFL 1 /* an overflow happened */
+#define PFM_MSG_END 2 /* task to which context was attached ended */
+
+typedef union {
+ pfm_ovfl_msg_t pfm_ovfl_msg;
+ pfm_end_msg_t pfm_end_msg;
+ pfm_gen_msg_t pfm_gen_msg;
+} pfm_msg_t;
+
+/*
+ * Define the version numbers for both perfmon as a whole and the sampling buffer format.
+ */
+#define PFM_VERSION_MAJ 2U
+#define PFM_VERSION_MIN 0U
+#define PFM_VERSION (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff))
+#define PFM_VERSION_MAJOR(x) (((x)>>16) & 0xffff)
+#define PFM_VERSION_MINOR(x) ((x) & 0xffff)
+
+
+/*
+ * miscellaneous architected definitions
+ */
+#define PMU_FIRST_COUNTER 4 /* first counting monitor (PMC/PMD) */
+#define PMU_MAX_PMCS 256 /* maximum architected number of PMC registers */
+#define PMU_MAX_PMDS 256 /* maximum architected number of PMD registers */
+
+
+#endif /* _UAPI_ASM_IA64_PERFMON_H */
diff --git a/arch/ia64/include/uapi/asm/perfmon_default_smpl.h b/arch/ia64/include/uapi/asm/perfmon_default_smpl.h
new file mode 100644
index 000000000..d3f36aff0
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/perfmon_default_smpl.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * This file implements the default sampling buffer format
+ * for Linux/ia64 perfmon subsystem.
+ */
+#ifndef __PERFMON_DEFAULT_SMPL_H__
+#define __PERFMON_DEFAULT_SMPL_H__ 1
+
+#define PFM_DEFAULT_SMPL_UUID { \
+ 0x4d, 0x72, 0xbe, 0xc0, 0x06, 0x64, 0x41, 0x43, 0x82, 0xb4, 0xd3, 0xfd, 0x27, 0x24, 0x3c, 0x97}
+
+/*
+ * format specific parameters (passed at context creation)
+ */
+typedef struct {
+ unsigned long buf_size; /* size of the buffer in bytes */
+ unsigned int flags; /* buffer specific flags */
+ unsigned int res1; /* for future use */
+ unsigned long reserved[2]; /* for future use */
+} pfm_default_smpl_arg_t;
+
+/*
+ * combined context+format specific structure. Can be passed
+ * to PFM_CONTEXT_CREATE
+ */
+typedef struct {
+ pfarg_context_t ctx_arg;
+ pfm_default_smpl_arg_t buf_arg;
+} pfm_default_smpl_ctx_arg_t;
+
+/*
+ * This header is at the beginning of the sampling buffer returned to the user.
+ * It is directly followed by the first record.
+ */
+typedef struct {
+ unsigned long hdr_count; /* how many valid entries */
+ unsigned long hdr_cur_offs; /* current offset from top of buffer */
+ unsigned long hdr_reserved2; /* reserved for future use */
+
+ unsigned long hdr_overflows; /* how many times the buffer overflowed */
+ unsigned long hdr_buf_size; /* how many bytes in the buffer */
+
+ unsigned int hdr_version; /* contains perfmon version (smpl format diffs) */
+ unsigned int hdr_reserved1; /* for future use */
+ unsigned long hdr_reserved[10]; /* for future use */
+} pfm_default_smpl_hdr_t;
+
+/*
+ * Entry header in the sampling buffer. The header is directly followed
+ * with the values of the PMD registers of interest saved in increasing
+ * index order: PMD4, PMD5, and so on. How many PMDs are present depends
+ * on how the session was programmed.
+ *
+ * In the case where multiple counters overflow at the same time, multiple
+ * entries are written consecutively.
+ *
+ * last_reset_value member indicates the initial value of the overflowed PMD.
+ */
+typedef struct {
+ int pid; /* thread id (for NPTL, this is gettid()) */
+ unsigned char reserved1[3]; /* reserved for future use */
+ unsigned char ovfl_pmd; /* index of overflowed PMD */
+
+ unsigned long last_reset_val; /* initial value of overflowed PMD */
+ unsigned long ip; /* where did the overflow interrupt happened */
+ unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
+
+ unsigned short cpu; /* cpu on which the overflow occurred */
+ unsigned short set; /* event set active when overflow occurred */
+ int tgid; /* thread group id (for NPTL, this is getpid()) */
+} pfm_default_smpl_entry_t;
+
+#define PFM_DEFAULT_MAX_PMDS 64 /* how many pmds supported by data structures (sizeof(unsigned long) */
+#define PFM_DEFAULT_MAX_ENTRY_SIZE (sizeof(pfm_default_smpl_entry_t)+(sizeof(unsigned long)*PFM_DEFAULT_MAX_PMDS))
+#define PFM_DEFAULT_SMPL_MIN_BUF_SIZE (sizeof(pfm_default_smpl_hdr_t)+PFM_DEFAULT_MAX_ENTRY_SIZE)
+
+#define PFM_DEFAULT_SMPL_VERSION_MAJ 2U
+#define PFM_DEFAULT_SMPL_VERSION_MIN 0U
+#define PFM_DEFAULT_SMPL_VERSION (((PFM_DEFAULT_SMPL_VERSION_MAJ&0xffff)<<16)|(PFM_DEFAULT_SMPL_VERSION_MIN & 0xffff))
+
+#endif /* __PERFMON_DEFAULT_SMPL_H__ */
diff --git a/arch/ia64/include/uapi/asm/posix_types.h b/arch/ia64/include/uapi/asm/posix_types.h
new file mode 100644
index 000000000..bded40f7d
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/posix_types.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_POSIX_TYPES_H
+#define _ASM_IA64_POSIX_TYPES_H
+
+typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
+
+#include <asm-generic/posix_types.h>
+
+#endif /* _ASM_IA64_POSIX_TYPES_H */
diff --git a/arch/ia64/include/uapi/asm/ptrace.h b/arch/ia64/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000..f52655b44
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/ptrace.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 1998-2004 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 2003 Intel Co
+ * Suresh Siddha <suresh.b.siddha@intel.com>
+ * Fenghua Yu <fenghua.yu@intel.com>
+ * Arun Sharma <arun.sharma@intel.com>
+ *
+ * 12/07/98 S. Eranian added pt_regs & switch_stack
+ * 12/21/98 D. Mosberger updated to match latest code
+ * 6/17/99 D. Mosberger added second unat member to "struct switch_stack"
+ *
+ */
+#ifndef _UAPI_ASM_IA64_PTRACE_H
+#define _UAPI_ASM_IA64_PTRACE_H
+
+/*
+ * When a user process is blocked, its state looks as follows:
+ *
+ * +----------------------+ ------- IA64_STK_OFFSET
+ * | | ^
+ * | struct pt_regs | |
+ * | | |
+ * +----------------------+ |
+ * | | |
+ * | memory stack | |
+ * | (growing downwards) | |
+ * //.....................// |
+ * |
+ * //.....................// |
+ * | | |
+ * +----------------------+ |
+ * | struct switch_stack | |
+ * | | |
+ * +----------------------+ |
+ * | | |
+ * //.....................// |
+ * |
+ * //.....................// |
+ * | | |
+ * | register stack | |
+ * | (growing upwards) | |
+ * | | |
+ * +----------------------+ | --- IA64_RBS_OFFSET
+ * | struct thread_info | | ^
+ * +----------------------+ | |
+ * | | | |
+ * | struct task_struct | | |
+ * current -> | | | |
+ * +----------------------+ -------
+ *
+ * Note that ar.ec is not saved explicitly in pt_reg or switch_stack.
+ * This is because ar.ec is saved as part of ar.pfs.
+ */
+
+
+#include <asm/fpu.h>
+
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This struct defines the way the registers are saved on system
+ * calls.
+ *
+ * We don't save all floating point register because the kernel
+ * is compiled to use only a very small subset, so the other are
+ * untouched.
+ *
+ * THIS STRUCTURE MUST BE A MULTIPLE 16-BYTE IN SIZE
+ * (because the memory stack pointer MUST ALWAYS be aligned this way)
+ *
+ */
+struct pt_regs {
+ /* The following registers are saved by SAVE_MIN: */
+ unsigned long b6; /* scratch */
+ unsigned long b7; /* scratch */
+
+ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
+ unsigned long ar_ssd; /* reserved for future use (scratch) */
+
+ unsigned long r8; /* scratch (return value register 0) */
+ unsigned long r9; /* scratch (return value register 1) */
+ unsigned long r10; /* scratch (return value register 2) */
+ unsigned long r11; /* scratch (return value register 3) */
+
+ unsigned long cr_ipsr; /* interrupted task's psr */
+ unsigned long cr_iip; /* interrupted task's instruction pointer */
+ /*
+ * interrupted task's function state; if bit 63 is cleared, it
+ * contains syscall's ar.pfs.pfm:
+ */
+ unsigned long cr_ifs;
+
+ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
+ unsigned long ar_pfs; /* prev function state */
+ unsigned long ar_rsc; /* RSE configuration */
+ /* The following two are valid only if cr_ipsr.cpl > 0 || ti->flags & _TIF_MCA_INIT */
+ unsigned long ar_rnat; /* RSE NaT */
+ unsigned long ar_bspstore; /* RSE bspstore */
+
+ unsigned long pr; /* 64 predicate registers (1 bit each) */
+ unsigned long b0; /* return pointer (bp) */
+ unsigned long loadrs; /* size of dirty partition << 16 */
+
+ unsigned long r1; /* the gp pointer */
+ unsigned long r12; /* interrupted task's memory stack pointer */
+ unsigned long r13; /* thread pointer */
+
+ unsigned long ar_fpsr; /* floating point status (preserved) */
+ unsigned long r15; /* scratch */
+
+ /* The remaining registers are NOT saved for system calls. */
+
+ unsigned long r14; /* scratch */
+ unsigned long r2; /* scratch */
+ unsigned long r3; /* scratch */
+
+ /* The following registers are saved by SAVE_REST: */
+ unsigned long r16; /* scratch */
+ unsigned long r17; /* scratch */
+ unsigned long r18; /* scratch */
+ unsigned long r19; /* scratch */
+ unsigned long r20; /* scratch */
+ unsigned long r21; /* scratch */
+ unsigned long r22; /* scratch */
+ unsigned long r23; /* scratch */
+ unsigned long r24; /* scratch */
+ unsigned long r25; /* scratch */
+ unsigned long r26; /* scratch */
+ unsigned long r27; /* scratch */
+ unsigned long r28; /* scratch */
+ unsigned long r29; /* scratch */
+ unsigned long r30; /* scratch */
+ unsigned long r31; /* scratch */
+
+ unsigned long ar_ccv; /* compare/exchange value (scratch) */
+
+ /*
+ * Floating point registers that the kernel considers scratch:
+ */
+ struct ia64_fpreg f6; /* scratch */
+ struct ia64_fpreg f7; /* scratch */
+ struct ia64_fpreg f8; /* scratch */
+ struct ia64_fpreg f9; /* scratch */
+ struct ia64_fpreg f10; /* scratch */
+ struct ia64_fpreg f11; /* scratch */
+};
+
+/*
+ * This structure contains the addition registers that need to
+ * preserved across a context switch. This generally consists of
+ * "preserved" registers.
+ */
+struct switch_stack {
+ unsigned long caller_unat; /* user NaT collection register (preserved) */
+ unsigned long ar_fpsr; /* floating-point status register */
+
+ struct ia64_fpreg f2; /* preserved */
+ struct ia64_fpreg f3; /* preserved */
+ struct ia64_fpreg f4; /* preserved */
+ struct ia64_fpreg f5; /* preserved */
+
+ struct ia64_fpreg f12; /* scratch, but untouched by kernel */
+ struct ia64_fpreg f13; /* scratch, but untouched by kernel */
+ struct ia64_fpreg f14; /* scratch, but untouched by kernel */
+ struct ia64_fpreg f15; /* scratch, but untouched by kernel */
+ struct ia64_fpreg f16; /* preserved */
+ struct ia64_fpreg f17; /* preserved */
+ struct ia64_fpreg f18; /* preserved */
+ struct ia64_fpreg f19; /* preserved */
+ struct ia64_fpreg f20; /* preserved */
+ struct ia64_fpreg f21; /* preserved */
+ struct ia64_fpreg f22; /* preserved */
+ struct ia64_fpreg f23; /* preserved */
+ struct ia64_fpreg f24; /* preserved */
+ struct ia64_fpreg f25; /* preserved */
+ struct ia64_fpreg f26; /* preserved */
+ struct ia64_fpreg f27; /* preserved */
+ struct ia64_fpreg f28; /* preserved */
+ struct ia64_fpreg f29; /* preserved */
+ struct ia64_fpreg f30; /* preserved */
+ struct ia64_fpreg f31; /* preserved */
+
+ unsigned long r4; /* preserved */
+ unsigned long r5; /* preserved */
+ unsigned long r6; /* preserved */
+ unsigned long r7; /* preserved */
+
+ unsigned long b0; /* so we can force a direct return in copy_thread */
+ unsigned long b1;
+ unsigned long b2;
+ unsigned long b3;
+ unsigned long b4;
+ unsigned long b5;
+
+ unsigned long ar_pfs; /* previous function state */
+ unsigned long ar_lc; /* loop counter (preserved) */
+ unsigned long ar_unat; /* NaT bits for r4-r7 */
+ unsigned long ar_rnat; /* RSE NaT collection register */
+ unsigned long ar_bspstore; /* RSE dirty base (preserved) */
+ unsigned long pr; /* 64 predicate registers (1 bit each) */
+};
+
+
+/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
+struct pt_all_user_regs {
+ unsigned long nat;
+ unsigned long cr_iip;
+ unsigned long cfm;
+ unsigned long cr_ipsr;
+ unsigned long pr;
+
+ unsigned long gr[32];
+ unsigned long br[8];
+ unsigned long ar[128];
+ struct ia64_fpreg fr[128];
+};
+
+#endif /* !__ASSEMBLY__ */
+
+/* indices to application-registers array in pt_all_user_regs */
+#define PT_AUR_RSC 16
+#define PT_AUR_BSP 17
+#define PT_AUR_BSPSTORE 18
+#define PT_AUR_RNAT 19
+#define PT_AUR_CCV 32
+#define PT_AUR_UNAT 36
+#define PT_AUR_FPSR 40
+#define PT_AUR_PFS 64
+#define PT_AUR_LC 65
+#define PT_AUR_EC 66
+
+/*
+ * The numbers chosen here are somewhat arbitrary but absolutely MUST
+ * not overlap with any of the number assigned in <linux/ptrace.h>.
+ */
+#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
+#define PTRACE_OLD_GETSIGINFO 13 /* (replaced by PTRACE_GETSIGINFO in <linux/ptrace.h>) */
+#define PTRACE_OLD_SETSIGINFO 14 /* (replaced by PTRACE_SETSIGINFO in <linux/ptrace.h>) */
+#define PTRACE_GETREGS 18 /* get all registers (pt_all_user_regs) in one shot */
+#define PTRACE_SETREGS 19 /* set all registers (pt_all_user_regs) in one shot */
+
+#define PTRACE_OLDSETOPTIONS 21
+
+#endif /* _UAPI_ASM_IA64_PTRACE_H */
diff --git a/arch/ia64/include/uapi/asm/ptrace_offsets.h b/arch/ia64/include/uapi/asm/ptrace_offsets.h
new file mode 100644
index 000000000..2847c1813
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/ptrace_offsets.h
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_PTRACE_OFFSETS_H
+#define _ASM_IA64_PTRACE_OFFSETS_H
+
+/*
+ * Copyright (C) 1999, 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+/*
+ * The "uarea" that can be accessed via PEEKUSER and POKEUSER is a
+ * virtual structure that would have the following definition:
+ *
+ * struct uarea {
+ * struct ia64_fpreg fph[96]; // f32-f127
+ * unsigned long nat_bits;
+ * unsigned long empty1;
+ * struct ia64_fpreg f2; // f2-f5
+ * :
+ * struct ia64_fpreg f5;
+ * struct ia64_fpreg f10; // f10-f31
+ * :
+ * struct ia64_fpreg f31;
+ * unsigned long r4; // r4-r7
+ * :
+ * unsigned long r7;
+ * unsigned long b1; // b1-b5
+ * :
+ * unsigned long b5;
+ * unsigned long ar_ec;
+ * unsigned long ar_lc;
+ * unsigned long empty2[5];
+ * unsigned long cr_ipsr;
+ * unsigned long cr_iip;
+ * unsigned long cfm;
+ * unsigned long ar_unat;
+ * unsigned long ar_pfs;
+ * unsigned long ar_rsc;
+ * unsigned long ar_rnat;
+ * unsigned long ar_bspstore;
+ * unsigned long pr;
+ * unsigned long b6;
+ * unsigned long ar_bsp;
+ * unsigned long r1;
+ * unsigned long r2;
+ * unsigned long r3;
+ * unsigned long r12;
+ * unsigned long r13;
+ * unsigned long r14;
+ * unsigned long r15;
+ * unsigned long r8;
+ * unsigned long r9;
+ * unsigned long r10;
+ * unsigned long r11;
+ * unsigned long r16;
+ * :
+ * unsigned long r31;
+ * unsigned long ar_ccv;
+ * unsigned long ar_fpsr;
+ * unsigned long b0;
+ * unsigned long b7;
+ * unsigned long f6;
+ * unsigned long f7;
+ * unsigned long f8;
+ * unsigned long f9;
+ * unsigned long ar_csd;
+ * unsigned long ar_ssd;
+ * unsigned long rsvd1[710];
+ * unsigned long dbr[8];
+ * unsigned long rsvd2[504];
+ * unsigned long ibr[8];
+ * unsigned long rsvd3[504];
+ * unsigned long pmd[4];
+ * }
+ */
+
+/* fph: */
+#define PT_F32 0x0000
+#define PT_F33 0x0010
+#define PT_F34 0x0020
+#define PT_F35 0x0030
+#define PT_F36 0x0040
+#define PT_F37 0x0050
+#define PT_F38 0x0060
+#define PT_F39 0x0070
+#define PT_F40 0x0080
+#define PT_F41 0x0090
+#define PT_F42 0x00a0
+#define PT_F43 0x00b0
+#define PT_F44 0x00c0
+#define PT_F45 0x00d0
+#define PT_F46 0x00e0
+#define PT_F47 0x00f0
+#define PT_F48 0x0100
+#define PT_F49 0x0110
+#define PT_F50 0x0120
+#define PT_F51 0x0130
+#define PT_F52 0x0140
+#define PT_F53 0x0150
+#define PT_F54 0x0160
+#define PT_F55 0x0170
+#define PT_F56 0x0180
+#define PT_F57 0x0190
+#define PT_F58 0x01a0
+#define PT_F59 0x01b0
+#define PT_F60 0x01c0
+#define PT_F61 0x01d0
+#define PT_F62 0x01e0
+#define PT_F63 0x01f0
+#define PT_F64 0x0200
+#define PT_F65 0x0210
+#define PT_F66 0x0220
+#define PT_F67 0x0230
+#define PT_F68 0x0240
+#define PT_F69 0x0250
+#define PT_F70 0x0260
+#define PT_F71 0x0270
+#define PT_F72 0x0280
+#define PT_F73 0x0290
+#define PT_F74 0x02a0
+#define PT_F75 0x02b0
+#define PT_F76 0x02c0
+#define PT_F77 0x02d0
+#define PT_F78 0x02e0
+#define PT_F79 0x02f0
+#define PT_F80 0x0300
+#define PT_F81 0x0310
+#define PT_F82 0x0320
+#define PT_F83 0x0330
+#define PT_F84 0x0340
+#define PT_F85 0x0350
+#define PT_F86 0x0360
+#define PT_F87 0x0370
+#define PT_F88 0x0380
+#define PT_F89 0x0390
+#define PT_F90 0x03a0
+#define PT_F91 0x03b0
+#define PT_F92 0x03c0
+#define PT_F93 0x03d0
+#define PT_F94 0x03e0
+#define PT_F95 0x03f0
+#define PT_F96 0x0400
+#define PT_F97 0x0410
+#define PT_F98 0x0420
+#define PT_F99 0x0430
+#define PT_F100 0x0440
+#define PT_F101 0x0450
+#define PT_F102 0x0460
+#define PT_F103 0x0470
+#define PT_F104 0x0480
+#define PT_F105 0x0490
+#define PT_F106 0x04a0
+#define PT_F107 0x04b0
+#define PT_F108 0x04c0
+#define PT_F109 0x04d0
+#define PT_F110 0x04e0
+#define PT_F111 0x04f0
+#define PT_F112 0x0500
+#define PT_F113 0x0510
+#define PT_F114 0x0520
+#define PT_F115 0x0530
+#define PT_F116 0x0540
+#define PT_F117 0x0550
+#define PT_F118 0x0560
+#define PT_F119 0x0570
+#define PT_F120 0x0580
+#define PT_F121 0x0590
+#define PT_F122 0x05a0
+#define PT_F123 0x05b0
+#define PT_F124 0x05c0
+#define PT_F125 0x05d0
+#define PT_F126 0x05e0
+#define PT_F127 0x05f0
+
+#define PT_NAT_BITS 0x0600
+
+#define PT_F2 0x0610
+#define PT_F3 0x0620
+#define PT_F4 0x0630
+#define PT_F5 0x0640
+#define PT_F10 0x0650
+#define PT_F11 0x0660
+#define PT_F12 0x0670
+#define PT_F13 0x0680
+#define PT_F14 0x0690
+#define PT_F15 0x06a0
+#define PT_F16 0x06b0
+#define PT_F17 0x06c0
+#define PT_F18 0x06d0
+#define PT_F19 0x06e0
+#define PT_F20 0x06f0
+#define PT_F21 0x0700
+#define PT_F22 0x0710
+#define PT_F23 0x0720
+#define PT_F24 0x0730
+#define PT_F25 0x0740
+#define PT_F26 0x0750
+#define PT_F27 0x0760
+#define PT_F28 0x0770
+#define PT_F29 0x0780
+#define PT_F30 0x0790
+#define PT_F31 0x07a0
+#define PT_R4 0x07b0
+#define PT_R5 0x07b8
+#define PT_R6 0x07c0
+#define PT_R7 0x07c8
+
+#define PT_B1 0x07d8
+#define PT_B2 0x07e0
+#define PT_B3 0x07e8
+#define PT_B4 0x07f0
+#define PT_B5 0x07f8
+
+#define PT_AR_EC 0x0800
+#define PT_AR_LC 0x0808
+
+#define PT_CR_IPSR 0x0830
+#define PT_CR_IIP 0x0838
+#define PT_CFM 0x0840
+#define PT_AR_UNAT 0x0848
+#define PT_AR_PFS 0x0850
+#define PT_AR_RSC 0x0858
+#define PT_AR_RNAT 0x0860
+#define PT_AR_BSPSTORE 0x0868
+#define PT_PR 0x0870
+#define PT_B6 0x0878
+#define PT_AR_BSP 0x0880 /* note: this points to the *end* of the backing store! */
+#define PT_R1 0x0888
+#define PT_R2 0x0890
+#define PT_R3 0x0898
+#define PT_R12 0x08a0
+#define PT_R13 0x08a8
+#define PT_R14 0x08b0
+#define PT_R15 0x08b8
+#define PT_R8 0x08c0
+#define PT_R9 0x08c8
+#define PT_R10 0x08d0
+#define PT_R11 0x08d8
+#define PT_R16 0x08e0
+#define PT_R17 0x08e8
+#define PT_R18 0x08f0
+#define PT_R19 0x08f8
+#define PT_R20 0x0900
+#define PT_R21 0x0908
+#define PT_R22 0x0910
+#define PT_R23 0x0918
+#define PT_R24 0x0920
+#define PT_R25 0x0928
+#define PT_R26 0x0930
+#define PT_R27 0x0938
+#define PT_R28 0x0940
+#define PT_R29 0x0948
+#define PT_R30 0x0950
+#define PT_R31 0x0958
+#define PT_AR_CCV 0x0960
+#define PT_AR_FPSR 0x0968
+#define PT_B0 0x0970
+#define PT_B7 0x0978
+#define PT_F6 0x0980
+#define PT_F7 0x0990
+#define PT_F8 0x09a0
+#define PT_F9 0x09b0
+#define PT_AR_CSD 0x09c0
+#define PT_AR_SSD 0x09c8
+
+#define PT_DBR 0x2000 /* data breakpoint registers */
+#define PT_IBR 0x3000 /* instruction breakpoint registers */
+#define PT_PMD 0x4000 /* performance monitoring counters */
+
+#endif /* _ASM_IA64_PTRACE_OFFSETS_H */
diff --git a/arch/ia64/include/uapi/asm/resource.h b/arch/ia64/include/uapi/asm/resource.h
new file mode 100644
index 000000000..d488d2b22
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/resource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_RESOURCE_H
+#define _ASM_IA64_RESOURCE_H
+
+#include <asm/ustack.h>
+#include <asm-generic/resource.h>
+
+#endif /* _ASM_IA64_RESOURCE_H */
diff --git a/arch/ia64/include/uapi/asm/rse.h b/arch/ia64/include/uapi/asm/rse.h
new file mode 100644
index 000000000..6d260af57
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/rse.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_RSE_H
+#define _ASM_IA64_RSE_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * Register stack engine related helper functions. This file may be
+ * used in applications, so be careful about the name-space and give
+ * some consideration to non-GNU C compilers (though __inline__ is
+ * fine).
+ */
+
+static __inline__ unsigned long
+ia64_rse_slot_num (unsigned long *addr)
+{
+ return (((unsigned long) addr) >> 3) & 0x3f;
+}
+
+/*
+ * Return TRUE if ADDR is the address of an RNAT slot.
+ */
+static __inline__ unsigned long
+ia64_rse_is_rnat_slot (unsigned long *addr)
+{
+ return ia64_rse_slot_num(addr) == 0x3f;
+}
+
+/*
+ * Returns the address of the RNAT slot that covers the slot at
+ * address SLOT_ADDR.
+ */
+static __inline__ unsigned long *
+ia64_rse_rnat_addr (unsigned long *slot_addr)
+{
+ return (unsigned long *) ((unsigned long) slot_addr | (0x3f << 3));
+}
+
+/*
+ * Calculate the number of registers in the dirty partition starting at BSPSTORE and
+ * ending at BSP. This isn't simply (BSP-BSPSTORE)/8 because every 64th slot stores
+ * ar.rnat.
+ */
+static __inline__ unsigned long
+ia64_rse_num_regs (unsigned long *bspstore, unsigned long *bsp)
+{
+ unsigned long slots = (bsp - bspstore);
+
+ return slots - (ia64_rse_slot_num(bspstore) + slots)/0x40;
+}
+
+/*
+ * The inverse of the above: given bspstore and the number of
+ * registers, calculate ar.bsp.
+ */
+static __inline__ unsigned long *
+ia64_rse_skip_regs (unsigned long *addr, long num_regs)
+{
+ long delta = ia64_rse_slot_num(addr) + num_regs;
+
+ if (num_regs < 0)
+ delta -= 0x3e;
+ return addr + num_regs + delta/0x3f;
+}
+
+#endif /* _ASM_IA64_RSE_H */
diff --git a/arch/ia64/include/uapi/asm/setup.h b/arch/ia64/include/uapi/asm/setup.h
new file mode 100644
index 000000000..8d13ce8fb
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/setup.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __IA64_SETUP_H
+#define __IA64_SETUP_H
+
+#define COMMAND_LINE_SIZE 2048
+
+extern struct ia64_boot_param {
+ __u64 command_line; /* physical address of command line arguments */
+ __u64 efi_systab; /* physical address of EFI system table */
+ __u64 efi_memmap; /* physical address of EFI memory map */
+ __u64 efi_memmap_size; /* size of EFI memory map */
+ __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */
+ __u32 efi_memdesc_version; /* memory descriptor version */
+ struct {
+ __u16 num_cols; /* number of columns on console output device */
+ __u16 num_rows; /* number of rows on console output device */
+ __u16 orig_x; /* cursor's x position */
+ __u16 orig_y; /* cursor's y position */
+ } console_info;
+ __u64 fpswa; /* physical address of the fpswa interface */
+ __u64 initrd_start;
+ __u64 initrd_size;
+} *ia64_boot_param;
+
+#endif
diff --git a/arch/ia64/include/uapi/asm/sigcontext.h b/arch/ia64/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000..1bb6f0f2b
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/sigcontext.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_SIGCONTEXT_H
+#define _ASM_IA64_SIGCONTEXT_H
+
+/*
+ * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999, 2001 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/fpu.h>
+
+#define IA64_SC_FLAG_ONSTACK_BIT 0 /* is handler running on signal stack? */
+#define IA64_SC_FLAG_IN_SYSCALL_BIT 1 /* did signal interrupt a syscall? */
+#define IA64_SC_FLAG_FPH_VALID_BIT 2 /* is state in f[32]-f[127] valid? */
+
+#define IA64_SC_FLAG_ONSTACK (1 << IA64_SC_FLAG_ONSTACK_BIT)
+#define IA64_SC_FLAG_IN_SYSCALL (1 << IA64_SC_FLAG_IN_SYSCALL_BIT)
+#define IA64_SC_FLAG_FPH_VALID (1 << IA64_SC_FLAG_FPH_VALID_BIT)
+
+# ifndef __ASSEMBLY__
+
+/*
+ * Note on handling of register backing store: sc_ar_bsp contains the address that would
+ * be found in ar.bsp after executing a "cover" instruction the context in which the
+ * signal was raised. If signal delivery required switching to an alternate signal stack
+ * (sc_rbs_base is not NULL), the "dirty" partition (as it would exist after executing the
+ * imaginary "cover" instruction) is backed by the *alternate* signal stack, not the
+ * original one. In this case, sc_rbs_base contains the base address of the new register
+ * backing store. The number of registers in the dirty partition can be calculated as:
+ *
+ * ndirty = ia64_rse_num_regs(sc_rbs_base, sc_rbs_base + (sc_loadrs >> 16))
+ *
+ */
+
+struct sigcontext {
+ unsigned long sc_flags; /* see manifest constants above */
+ unsigned long sc_nat; /* bit i == 1 iff scratch reg gr[i] is a NaT */
+ stack_t sc_stack; /* previously active stack */
+
+ unsigned long sc_ip; /* instruction pointer */
+ unsigned long sc_cfm; /* current frame marker */
+ unsigned long sc_um; /* user mask bits */
+ unsigned long sc_ar_rsc; /* register stack configuration register */
+ unsigned long sc_ar_bsp; /* backing store pointer */
+ unsigned long sc_ar_rnat; /* RSE NaT collection register */
+ unsigned long sc_ar_ccv; /* compare and exchange compare value register */
+ unsigned long sc_ar_unat; /* ar.unat of interrupted context */
+ unsigned long sc_ar_fpsr; /* floating-point status register */
+ unsigned long sc_ar_pfs; /* previous function state */
+ unsigned long sc_ar_lc; /* loop count register */
+ unsigned long sc_pr; /* predicate registers */
+ unsigned long sc_br[8]; /* branch registers */
+ /* Note: sc_gr[0] is used as the "uc_link" member of ucontext_t */
+ unsigned long sc_gr[32]; /* general registers (static partition) */
+ struct ia64_fpreg sc_fr[128]; /* floating-point registers */
+
+ unsigned long sc_rbs_base; /* NULL or new base of sighandler's rbs */
+ unsigned long sc_loadrs; /* see description above */
+
+ unsigned long sc_ar25; /* cmp8xchg16 uses this */
+ unsigned long sc_ar26; /* rsvd for scratch use */
+ unsigned long sc_rsvd[12]; /* reserved for future use */
+ /*
+ * The mask must come last so we can increase _NSIG_WORDS
+ * without breaking binary compatibility.
+ */
+ sigset_t sc_mask; /* signal mask to restore after handler returns */
+};
+
+# endif /* __ASSEMBLY__ */
+#endif /* _ASM_IA64_SIGCONTEXT_H */
diff --git a/arch/ia64/include/uapi/asm/siginfo.h b/arch/ia64/include/uapi/asm/siginfo.h
new file mode 100644
index 000000000..796af1cca
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/siginfo.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Based on <asm-i386/siginfo.h>.
+ *
+ * Modified 1998-2002
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#ifndef _UAPI_ASM_IA64_SIGINFO_H
+#define _UAPI_ASM_IA64_SIGINFO_H
+
+
+#include <asm-generic/siginfo.h>
+
+#define si_imm _sifields._sigfault._imm /* as per UNIX SysV ABI spec */
+#define si_flags _sifields._sigfault._flags
+/*
+ * si_isr is valid for SIGILL, SIGFPE, SIGSEGV, SIGBUS, and SIGTRAP provided that
+ * si_code is non-zero and __ISR_VALID is set in si_flags.
+ */
+#define si_isr _sifields._sigfault._isr
+
+/*
+ * Flag values for si_flags:
+ */
+#define __ISR_VALID_BIT 0
+#define __ISR_VALID (1 << __ISR_VALID_BIT)
+
+#endif /* _UAPI_ASM_IA64_SIGINFO_H */
diff --git a/arch/ia64/include/uapi/asm/signal.h b/arch/ia64/include/uapi/asm/signal.h
new file mode 100644
index 000000000..aa98ff1b9
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/signal.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Modified 1998-2001, 2003
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ *
+ * Unfortunately, this file is being included by bits/signal.h in
+ * glibc-2.x. Hence the #ifdef __KERNEL__ ugliness.
+ */
+#ifndef _UAPI_ASM_IA64_SIGNAL_H
+#define _UAPI_ASM_IA64_SIGNAL_H
+
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGSYS 31
+/* signal 31 is no longer "unused", but the SIGUNUSED macro remains for backwards compatibility */
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX _NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001
+#define SA_NOCLDWAIT 0x00000002
+#define SA_SIGINFO 0x00000004
+#define SA_ONSTACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_NODEFER 0x40000000
+#define SA_RESETHAND 0x80000000
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
+#define SA_RESTORER 0x04000000
+
+/*
+ * The minimum stack size needs to be fairly large because we want to
+ * be sure that an app compiled for today's CPUs will continue to run
+ * on all future CPU models. The CPU model matters because the signal
+ * frame needs to have space for the complete machine state, including
+ * all physical stacked registers. The number of physical stacked
+ * registers is CPU model dependent, but given that the width of
+ * ar.rsc.loadrs is 14 bits, we can assume that they'll never take up
+ * more than 16KB of space.
+ */
+#if 1
+ /*
+ * This is a stupid typo: the value was _meant_ to be 131072 (0x20000), but I typed it
+ * in wrong. ;-( To preserve backwards compatibility, we leave the kernel at the
+ * incorrect value and fix libc only.
+ */
+# define MINSIGSTKSZ 131027 /* min. stack size for sigaltstack() */
+#else
+# define MINSIGSTKSZ 131072 /* min. stack size for sigaltstack() */
+#endif
+#define SIGSTKSZ 262144 /* default stack size for sigaltstack() */
+
+
+#include <asm-generic/signal-defs.h>
+
+# ifndef __ASSEMBLY__
+
+# include <linux/types.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+
+typedef struct sigaltstack {
+ void __user *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+
+# endif /* !__ASSEMBLY__ */
+#endif /* _UAPI_ASM_IA64_SIGNAL_H */
diff --git a/arch/ia64/include/uapi/asm/stat.h b/arch/ia64/include/uapi/asm/stat.h
new file mode 100644
index 000000000..3265ed5aa
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/stat.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_STAT_H
+#define _ASM_IA64_STAT_H
+
+/*
+ * Modified 1998, 1999
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+struct stat {
+ unsigned long st_dev;
+ unsigned long st_ino;
+ unsigned long st_nlink;
+ unsigned int st_mode;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned int __pad0;
+ unsigned long st_rdev;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long st_blksize;
+ long st_blocks;
+ unsigned long __unused[3];
+};
+
+#define STAT_HAVE_NSEC 1
+
+struct ia64_oldstat {
+ unsigned int st_dev;
+ unsigned int st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned int st_rdev;
+ unsigned int __pad1;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_mtime;
+ unsigned long st_ctime;
+ unsigned int st_blksize;
+ int st_blocks;
+ unsigned int __unused1;
+ unsigned int __unused2;
+};
+
+#endif /* _ASM_IA64_STAT_H */
diff --git a/arch/ia64/include/uapi/asm/statfs.h b/arch/ia64/include/uapi/asm/statfs.h
new file mode 100644
index 000000000..de3bae4f1
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/statfs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_STATFS_H
+#define _ASM_IA64_STATFS_H
+
+/*
+ * Based on <asm-i386/statfs.h>.
+ *
+ * Modified 1998, 1999, 2003
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+/*
+ * We need compat_statfs64 to be packed, because the i386 ABI won't
+ * add padding at the end to bring it to a multiple of 8 bytes, but
+ * the IA64 ABI will.
+ */
+#define ARCH_PACK_COMPAT_STATFS64 __attribute__((packed,aligned(4)))
+
+#include <asm-generic/statfs.h>
+
+#endif /* _ASM_IA64_STATFS_H */
diff --git a/arch/ia64/include/uapi/asm/swab.h b/arch/ia64/include/uapi/asm/swab.h
new file mode 100644
index 000000000..79f3fef1a
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/swab.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_SWAB_H
+#define _ASM_IA64_SWAB_H
+
+/*
+ * Modified 1998, 1999
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
+ */
+
+#include <linux/types.h>
+#include <asm/intrinsics.h>
+#include <linux/compiler.h>
+
+static __inline__ __attribute_const__ __u64 __arch_swab64(__u64 x)
+{
+ __u64 result;
+
+ result = ia64_mux1(x, ia64_mux1_rev);
+ return result;
+}
+#define __arch_swab64 __arch_swab64
+
+static __inline__ __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+ return __arch_swab64(x) >> 32;
+}
+#define __arch_swab32 __arch_swab32
+
+static __inline__ __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+ return __arch_swab64(x) >> 48;
+}
+#define __arch_swab16 __arch_swab16
+
+#endif /* _ASM_IA64_SWAB_H */
diff --git a/arch/ia64/include/uapi/asm/termbits.h b/arch/ia64/include/uapi/asm/termbits.h
new file mode 100644
index 000000000..000a1a297
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/termbits.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_TERMBITS_H
+#define _ASM_IA64_TERMBITS_H
+
+/*
+ * Based on <asm-i386/termbits.h>.
+ *
+ * Modified 1999
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ *
+ * 99/01/28 Added new baudrates
+ */
+
+#include <linux/posix_types.h>
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+};
+
+struct termios2 {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+struct ktermios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+/* c_iflag bits */
+#define IGNBRK 0000001
+#define BRKINT 0000002
+#define IGNPAR 0000004
+#define PARMRK 0000010
+#define INPCK 0000020
+#define ISTRIP 0000040
+#define INLCR 0000100
+#define IGNCR 0000200
+#define ICRNL 0000400
+#define IUCLC 0001000
+#define IXON 0002000
+#define IXANY 0004000
+#define IXOFF 0010000
+#define IMAXBEL 0020000
+#define IUTF8 0040000
+
+/* c_oflag bits */
+#define OPOST 0000001
+#define OLCUC 0000002
+#define ONLCR 0000004
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+#define OFILL 0000100
+#define OFDEL 0000200
+#define NLDLY 0000400
+#define NL0 0000000
+#define NL1 0000400
+#define CRDLY 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
+#define TABDLY 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
+#define BSDLY 0020000
+#define BS0 0000000
+#define BS1 0020000
+#define VTDLY 0040000
+#define VT0 0000000
+#define VT1 0040000
+#define FFDLY 0100000
+#define FF0 0000000
+#define FF1 0100000
+
+/* c_cflag bit meaning */
+#define CBAUD 0010017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE 0000060
+#define CS5 0000000
+#define CS6 0000020
+#define CS7 0000040
+#define CS8 0000060
+#define CSTOPB 0000100
+#define CREAD 0000200
+#define PARENB 0000400
+#define PARODD 0001000
+#define HUPCL 0002000
+#define CLOCAL 0004000
+#define CBAUDEX 0010000
+#define BOTHER 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define B460800 0010004
+#define B500000 0010005
+#define B576000 0010006
+#define B921600 0010007
+#define B1000000 0010010
+#define B1152000 0010011
+#define B1500000 0010012
+#define B2000000 0010013
+#define B2500000 0010014
+#define B3000000 0010015
+#define B3500000 0010016
+#define B4000000 0010017
+#define CIBAUD 002003600000 /* input baud rate */
+#define CMSPAR 010000000000 /* mark or space (stick) parity */
+#define CRTSCTS 020000000000 /* flow control */
+
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
+/* c_lflag bits */
+#define ISIG 0000001
+#define ICANON 0000002
+#define XCASE 0000004
+#define ECHO 0000010
+#define ECHOE 0000020
+#define ECHOK 0000040
+#define ECHONL 0000100
+#define NOFLSH 0000200
+#define TOSTOP 0000400
+#define ECHOCTL 0001000
+#define ECHOPRT 0002000
+#define ECHOKE 0004000
+#define FLUSHO 0010000
+#define PENDIN 0040000
+#define IEXTEN 0100000
+#define EXTPROC 0200000
+
+/* tcflow() and TCXONC use these */
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* tcflush() and TCFLSH use these */
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* tcsetattr uses these */
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif /* _ASM_IA64_TERMBITS_H */
diff --git a/arch/ia64/include/uapi/asm/termios.h b/arch/ia64/include/uapi/asm/termios.h
new file mode 100644
index 000000000..199742d08
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/termios.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Modified 1999
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ *
+ * 99/01/28 Added N_IRDA and N_SMSBLOCK
+ */
+#ifndef _UAPI_ASM_IA64_TERMIOS_H
+#define _UAPI_ASM_IA64_TERMIOS_H
+
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+/* modem lines */
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+
+#endif /* _UAPI_ASM_IA64_TERMIOS_H */
diff --git a/arch/ia64/include/uapi/asm/types.h b/arch/ia64/include/uapi/asm/types.h
new file mode 100644
index 000000000..2000de474
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/types.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is never included by application software unless explicitly
+ * requested (e.g., via linux/types.h) in which case the application is
+ * Linux specific so (user-) name space pollution is not a major issue.
+ * However, for interoperability, libraries still need to be careful to
+ * avoid naming clashes.
+ *
+ * Based on <asm-alpha/types.h>.
+ *
+ * Modified 1998-2000, 2002
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#ifndef _UAPI_ASM_IA64_TYPES_H
+#define _UAPI_ASM_IA64_TYPES_H
+
+
+#ifndef __KERNEL__
+#include <asm-generic/int-l64.h>
+#endif
+
+#ifdef __ASSEMBLY__
+# define __IA64_UL(x) (x)
+# define __IA64_UL_CONST(x) x
+
+#else
+# define __IA64_UL(x) ((unsigned long)(x))
+# define __IA64_UL_CONST(x) x##UL
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _UAPI_ASM_IA64_TYPES_H */
diff --git a/arch/ia64/include/uapi/asm/ucontext.h b/arch/ia64/include/uapi/asm/ucontext.h
new file mode 100644
index 000000000..46f51e535
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/ucontext.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_IA64_UCONTEXT_H
+#define _ASM_IA64_UCONTEXT_H
+
+struct ucontext {
+ struct sigcontext uc_mcontext;
+};
+
+#define uc_link uc_mcontext.sc_gr[0] /* wrong type; nobody cares */
+#define uc_sigmask uc_mcontext.sc_sigmask
+#define uc_stack uc_mcontext.sc_stack
+
+#endif /* _ASM_IA64_UCONTEXT_H */
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
new file mode 100644
index 000000000..013e0bcac
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * IA-64 Linux syscall numbers and inline-functions.
+ *
+ * Copyright (C) 1998-2005 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#ifndef _UAPI_ASM_IA64_UNISTD_H
+#define _UAPI_ASM_IA64_UNISTD_H
+
+
+#include <asm/break.h>
+
+#define __BREAK_SYSCALL __IA64_BREAK_SYSCALL
+
+#define __NR_Linux 1024
+
+#define __NR_umount __NR_umount2
+
+#include <asm/unistd_64.h>
+
+#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/include/uapi/asm/ustack.h b/arch/ia64/include/uapi/asm/ustack.h
new file mode 100644
index 000000000..703cc5f54
--- /dev/null
+++ b/arch/ia64/include/uapi/asm/ustack.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_ASM_IA64_USTACK_H
+#define _UAPI_ASM_IA64_USTACK_H
+
+/*
+ * Constants for the user stack size
+ */
+
+
+/* Make a default stack size of 2GiB */
+#define DEFAULT_USER_STACK_SIZE (1UL << 31)
+
+#endif /* _UAPI_ASM_IA64_USTACK_H */